
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (+ x (/ (fabs (- y x)) 2.0)))
double code(double x, double y) {
return x + (fabs((y - x)) / 2.0);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x + (abs((y - x)) / 2.0d0)
end function
public static double code(double x, double y) {
return x + (Math.abs((y - x)) / 2.0);
}
def code(x, y): return x + (math.fabs((y - x)) / 2.0)
function code(x, y) return Float64(x + Float64(abs(Float64(y - x)) / 2.0)) end
function tmp = code(x, y) tmp = x + (abs((y - x)) / 2.0); end
code[x_, y_] := N[(x + N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \frac{\left|y - x\right|}{2}
\end{array}
(FPCore (x y) :precision binary64 (fma (fabs (- y x)) 0.5 x))
double code(double x, double y) {
return fma(fabs((y - x)), 0.5, x);
}
function code(x, y) return fma(abs(Float64(y - x)), 0.5, x) end
code[x_, y_] := N[(N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision] * 0.5 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left|y - x\right|, 0.5, x\right)
\end{array}
Initial program 99.9%
lift--.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
lower-fma.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (let* ((t_0 (fabs (- y x)))) (if (<= (+ x (/ t_0 2.0)) -5e-284) (fma (fabs x) 0.5 x) (* t_0 0.5))))
double code(double x, double y) {
double t_0 = fabs((y - x));
double tmp;
if ((x + (t_0 / 2.0)) <= -5e-284) {
tmp = fma(fabs(x), 0.5, x);
} else {
tmp = t_0 * 0.5;
}
return tmp;
}
function code(x, y) t_0 = abs(Float64(y - x)) tmp = 0.0 if (Float64(x + Float64(t_0 / 2.0)) <= -5e-284) tmp = fma(abs(x), 0.5, x); else tmp = Float64(t_0 * 0.5); end return tmp end
code[x_, y_] := Block[{t$95$0 = N[Abs[N[(y - x), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(x + N[(t$95$0 / 2.0), $MachinePrecision]), $MachinePrecision], -5e-284], N[(N[Abs[x], $MachinePrecision] * 0.5 + x), $MachinePrecision], N[(t$95$0 * 0.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left|y - x\right|\\
\mathbf{if}\;x + \frac{t\_0}{2} \leq -5 \cdot 10^{-284}:\\
\;\;\;\;\mathsf{fma}\left(\left|x\right|, 0.5, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0 \cdot 0.5\\
\end{array}
\end{array}
if (+.f64 x (/.f64 (fabs.f64 (-.f64 y x)) #s(literal 2 binary64))) < -4.99999999999999973e-284Initial program 100.0%
lift--.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
lower-fma.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in y around 0
mul-1-negN/A
lower-neg.f6498.6
Simplified98.6%
fabs-negN/A
lower-fabs.f6498.6
Applied egg-rr98.6%
if -4.99999999999999973e-284 < (+.f64 x (/.f64 (fabs.f64 (-.f64 y x)) #s(literal 2 binary64))) Initial program 99.9%
Taylor expanded in x around 0
sub-negN/A
mul-1-negN/A
lower-*.f64N/A
mul-1-negN/A
remove-double-negN/A
mul-1-negN/A
distribute-neg-inN/A
+-commutativeN/A
lower-fabs.f64N/A
+-commutativeN/A
distribute-neg-inN/A
mul-1-negN/A
remove-double-negN/A
sub-negN/A
lower--.f6473.8
Simplified73.8%
Final simplification80.5%
(FPCore (x y) :precision binary64 (fma (fabs x) 0.5 x))
double code(double x, double y) {
return fma(fabs(x), 0.5, x);
}
function code(x, y) return fma(abs(x), 0.5, x) end
code[x_, y_] := N[(N[Abs[x], $MachinePrecision] * 0.5 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left|x\right|, 0.5, x\right)
\end{array}
Initial program 99.9%
lift--.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
lower-fma.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in y around 0
mul-1-negN/A
lower-neg.f6450.8
Simplified50.8%
fabs-negN/A
lower-fabs.f6450.8
Applied egg-rr50.8%
(FPCore (x y) :precision binary64 (* x 0.75))
double code(double x, double y) {
return x * 0.75;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x * 0.75d0
end function
public static double code(double x, double y) {
return x * 0.75;
}
def code(x, y): return x * 0.75
function code(x, y) return Float64(x * 0.75) end
function tmp = code(x, y) tmp = x * 0.75; end
code[x_, y_] := N[(x * 0.75), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 0.75
\end{array}
Initial program 99.9%
lift--.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
flip-+N/A
lower-/.f64N/A
Applied egg-rr57.7%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f6411.4
Simplified11.4%
herbie shell --seed 2024207
(FPCore (x y)
:name "Graphics.Rendering.Chart.Plot.AreaSpots:renderSpotLegend from Chart-1.5.3"
:precision binary64
(+ x (/ (fabs (- y x)) 2.0)))