
(FPCore (a b) :precision binary64 (/ (fabs (- a b)) 2.0))
double code(double a, double b) {
return fabs((a - b)) / 2.0;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs((a - b)) / 2.0d0
end function
public static double code(double a, double b) {
return Math.abs((a - b)) / 2.0;
}
def code(a, b): return math.fabs((a - b)) / 2.0
function code(a, b) return Float64(abs(Float64(a - b)) / 2.0) end
function tmp = code(a, b) tmp = abs((a - b)) / 2.0; end
code[a_, b_] := N[(N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|a - b\right|}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (/ (fabs (- a b)) 2.0))
double code(double a, double b) {
return fabs((a - b)) / 2.0;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs((a - b)) / 2.0d0
end function
public static double code(double a, double b) {
return Math.abs((a - b)) / 2.0;
}
def code(a, b): return math.fabs((a - b)) / 2.0
function code(a, b) return Float64(abs(Float64(a - b)) / 2.0) end
function tmp = code(a, b) tmp = abs((a - b)) / 2.0; end
code[a_, b_] := N[(N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|a - b\right|}{2}
\end{array}
(FPCore (a b) :precision binary64 (* 0.5 (fabs (- a b))))
double code(double a, double b) {
return 0.5 * fabs((a - b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 0.5d0 * abs((a - b))
end function
public static double code(double a, double b) {
return 0.5 * Math.abs((a - b));
}
def code(a, b): return 0.5 * math.fabs((a - b))
function code(a, b) return Float64(0.5 * abs(Float64(a - b))) end
function tmp = code(a, b) tmp = 0.5 * abs((a - b)); end
code[a_, b_] := N[(0.5 * N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left|a - b\right|
\end{array}
Initial program 100.0%
lift-/.f64N/A
div-invN/A
lower-*.f64N/A
lift-fabs.f64N/A
neg-fabsN/A
lower-fabs.f64N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
sub-negN/A
lower--.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (a b) :precision binary64 (* (fabs a) 0.5))
double code(double a, double b) {
return fabs(a) * 0.5;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs(a) * 0.5d0
end function
public static double code(double a, double b) {
return Math.abs(a) * 0.5;
}
def code(a, b): return math.fabs(a) * 0.5
function code(a, b) return Float64(abs(a) * 0.5) end
function tmp = code(a, b) tmp = abs(a) * 0.5; end
code[a_, b_] := N[(N[Abs[a], $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left|a\right| \cdot 0.5
\end{array}
Initial program 100.0%
lift-/.f64N/A
div-invN/A
lower-*.f64N/A
lift-fabs.f64N/A
neg-fabsN/A
lower-fabs.f64N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
distribute-neg-inN/A
remove-double-negN/A
sub-negN/A
lower--.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in b around 0
mul-1-negN/A
lower-neg.f6449.4
Applied rewrites49.4%
Applied rewrites49.4%
herbie shell --seed 2024242
(FPCore (a b)
:name "fabs fraction 2"
:precision binary64
(/ (fabs (- a b)) 2.0))