
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
(FPCore (x y) :precision binary64 (fma (fabs (- x y)) 0.5 x))
double code(double x, double y) {
return fma(fabs((x - y)), 0.5, x);
}
function code(x, y) return fma(abs(Float64(x - y)), 0.5, x) end
code[x_, y_] := N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] * 0.5 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left|x - y\right|, 0.5, x\right)
\end{array}
Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (if (<= (+ x (/ (fabs (- x y)) 2.0)) -1e-238) x (* 0.5 (fabs y))))
double code(double x, double y) {
double tmp;
if ((x + (fabs((x - y)) / 2.0)) <= -1e-238) {
tmp = x;
} else {
tmp = 0.5 * fabs(y);
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x + (abs((x - y)) / 2.0d0)) <= (-1d-238)) then
tmp = x
else
tmp = 0.5d0 * abs(y)
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x + (Math.abs((x - y)) / 2.0)) <= -1e-238) {
tmp = x;
} else {
tmp = 0.5 * Math.abs(y);
}
return tmp;
}
def code(x, y): tmp = 0 if (x + (math.fabs((x - y)) / 2.0)) <= -1e-238: tmp = x else: tmp = 0.5 * math.fabs(y) return tmp
function code(x, y) tmp = 0.0 if (Float64(x + Float64(abs(Float64(x - y)) / 2.0)) <= -1e-238) tmp = x; else tmp = Float64(0.5 * abs(y)); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x + (abs((x - y)) / 2.0)) <= -1e-238) tmp = x; else tmp = 0.5 * abs(y); end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(x + N[(N[Abs[N[(x - y), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision], -1e-238], x, N[(0.5 * N[Abs[y], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x + \frac{\left|x - y\right|}{2} \leq -1 \cdot 10^{-238}:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left|y\right|\\
\end{array}
\end{array}
if (+.f64 x (/.f64 (fabs.f64 (-.f64 y x)) #s(literal 2 binary64))) < -9.9999999999999999e-239Initial program 100.0%
Taylor expanded in x around inf
Simplified18.8%
if -9.9999999999999999e-239 < (+.f64 x (/.f64 (fabs.f64 (-.f64 y x)) #s(literal 2 binary64))) Initial program 99.8%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.8
Applied egg-rr99.8%
Taylor expanded in y around inf
Simplified70.7%
Taylor expanded in x around 0
*-lowering-*.f64N/A
fabs-lowering-fabs.f6465.5
Simplified65.5%
Final simplification53.7%
(FPCore (x y) :precision binary64 (if (<= y -1.35e-75) (fma (- x y) 0.5 x) (if (<= y 8.3e-128) (fma (fabs x) 0.5 x) (fma (- y x) 0.5 x))))
double code(double x, double y) {
double tmp;
if (y <= -1.35e-75) {
tmp = fma((x - y), 0.5, x);
} else if (y <= 8.3e-128) {
tmp = fma(fabs(x), 0.5, x);
} else {
tmp = fma((y - x), 0.5, x);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (y <= -1.35e-75) tmp = fma(Float64(x - y), 0.5, x); elseif (y <= 8.3e-128) tmp = fma(abs(x), 0.5, x); else tmp = fma(Float64(y - x), 0.5, x); end return tmp end
code[x_, y_] := If[LessEqual[y, -1.35e-75], N[(N[(x - y), $MachinePrecision] * 0.5 + x), $MachinePrecision], If[LessEqual[y, 8.3e-128], N[(N[Abs[x], $MachinePrecision] * 0.5 + x), $MachinePrecision], N[(N[(y - x), $MachinePrecision] * 0.5 + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.35 \cdot 10^{-75}:\\
\;\;\;\;\mathsf{fma}\left(x - y, 0.5, x\right)\\
\mathbf{elif}\;y \leq 8.3 \cdot 10^{-128}:\\
\;\;\;\;\mathsf{fma}\left(\left|x\right|, 0.5, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(y - x, 0.5, x\right)\\
\end{array}
\end{array}
if y < -1.3499999999999999e-75Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
flip--N/A
div-invN/A
*-commutativeN/A
fabs-mulN/A
inv-powN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
fabs-lowering-fabs.f64N/A
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f647.5
Applied egg-rr7.5%
Applied egg-rr85.1%
if -1.3499999999999999e-75 < y < 8.30000000000000015e-128Initial program 99.8%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.8
Applied egg-rr99.8%
Taylor expanded in y around 0
mul-1-negN/A
neg-lowering-neg.f6487.8
Simplified87.8%
if 8.30000000000000015e-128 < y Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
flip--N/A
div-invN/A
*-commutativeN/A
fabs-mulN/A
inv-powN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
fabs-lowering-fabs.f64N/A
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f6447.9
Applied egg-rr47.9%
Applied egg-rr92.6%
Final simplification88.3%
(FPCore (x y) :precision binary64 (if (<= x -3.5e-156) (fma (- y x) 0.5 x) (if (<= x 3.25e-124) (fma (fabs y) 0.5 x) (fma (- x y) 0.5 x))))
double code(double x, double y) {
double tmp;
if (x <= -3.5e-156) {
tmp = fma((y - x), 0.5, x);
} else if (x <= 3.25e-124) {
tmp = fma(fabs(y), 0.5, x);
} else {
tmp = fma((x - y), 0.5, x);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (x <= -3.5e-156) tmp = fma(Float64(y - x), 0.5, x); elseif (x <= 3.25e-124) tmp = fma(abs(y), 0.5, x); else tmp = fma(Float64(x - y), 0.5, x); end return tmp end
code[x_, y_] := If[LessEqual[x, -3.5e-156], N[(N[(y - x), $MachinePrecision] * 0.5 + x), $MachinePrecision], If[LessEqual[x, 3.25e-124], N[(N[Abs[y], $MachinePrecision] * 0.5 + x), $MachinePrecision], N[(N[(x - y), $MachinePrecision] * 0.5 + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.5 \cdot 10^{-156}:\\
\;\;\;\;\mathsf{fma}\left(y - x, 0.5, x\right)\\
\mathbf{elif}\;x \leq 3.25 \cdot 10^{-124}:\\
\;\;\;\;\mathsf{fma}\left(\left|y\right|, 0.5, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x - y, 0.5, x\right)\\
\end{array}
\end{array}
if x < -3.4999999999999999e-156Initial program 100.0%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
flip--N/A
div-invN/A
*-commutativeN/A
fabs-mulN/A
inv-powN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
fabs-lowering-fabs.f64N/A
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f6418.9
Applied egg-rr18.9%
Applied egg-rr88.3%
if -3.4999999999999999e-156 < x < 3.24999999999999994e-124Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in y around inf
Simplified91.7%
if 3.24999999999999994e-124 < x Initial program 99.7%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.7
Applied egg-rr99.7%
flip--N/A
div-invN/A
*-commutativeN/A
fabs-mulN/A
inv-powN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
fabs-lowering-fabs.f64N/A
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f6441.9
Applied egg-rr41.9%
Applied egg-rr84.3%
(FPCore (x y) :precision binary64 (if (<= y 2.25e-128) (fma (- x y) 0.5 x) (fma (fabs y) 0.5 x)))
double code(double x, double y) {
double tmp;
if (y <= 2.25e-128) {
tmp = fma((x - y), 0.5, x);
} else {
tmp = fma(fabs(y), 0.5, x);
}
return tmp;
}
function code(x, y) tmp = 0.0 if (y <= 2.25e-128) tmp = fma(Float64(x - y), 0.5, x); else tmp = fma(abs(y), 0.5, x); end return tmp end
code[x_, y_] := If[LessEqual[y, 2.25e-128], N[(N[(x - y), $MachinePrecision] * 0.5 + x), $MachinePrecision], N[(N[Abs[y], $MachinePrecision] * 0.5 + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 2.25 \cdot 10^{-128}:\\
\;\;\;\;\mathsf{fma}\left(x - y, 0.5, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left|y\right|, 0.5, x\right)\\
\end{array}
\end{array}
if y < 2.25e-128Initial program 99.8%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.8
Applied egg-rr99.8%
flip--N/A
div-invN/A
*-commutativeN/A
fabs-mulN/A
inv-powN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
inv-powN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
fabs-lowering-fabs.f64N/A
difference-of-squaresN/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
+-lowering-+.f6421.0
Applied egg-rr21.0%
Applied egg-rr72.8%
if 2.25e-128 < y Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in y around inf
Simplified79.8%
(FPCore (x y) :precision binary64 (fma (fabs y) 0.5 x))
double code(double x, double y) {
return fma(fabs(y), 0.5, x);
}
function code(x, y) return fma(abs(y), 0.5, x) end
code[x_, y_] := N[(N[Abs[y], $MachinePrecision] * 0.5 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left|y\right|, 0.5, x\right)
\end{array}
Initial program 99.9%
+-commutativeN/A
div-invN/A
accelerator-lowering-fma.f64N/A
fabs-lowering-fabs.f64N/A
--lowering--.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in y around inf
Simplified57.5%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.9%
Taylor expanded in x around inf
Simplified11.3%
herbie shell --seed 2024204
(FPCore (x y)
:name "Graphics.Rendering.Chart.Plot.AreaSpots:renderSpotLegend from Chart-1.5.3"
:precision binary64
(+ x (/ (fabs (- y x)) 2.0)))