
(FPCore (a b) :precision binary64 (/ (fabs (- a b)) 2.0))
double code(double a, double b) {
return fabs((a - b)) / 2.0;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs((a - b)) / 2.0d0
end function
public static double code(double a, double b) {
return Math.abs((a - b)) / 2.0;
}
def code(a, b): return math.fabs((a - b)) / 2.0
function code(a, b) return Float64(abs(Float64(a - b)) / 2.0) end
function tmp = code(a, b) tmp = abs((a - b)) / 2.0; end
code[a_, b_] := N[(N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|a - b\right|}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b) :precision binary64 (/ (fabs (- a b)) 2.0))
double code(double a, double b) {
return fabs((a - b)) / 2.0;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs((a - b)) / 2.0d0
end function
public static double code(double a, double b) {
return Math.abs((a - b)) / 2.0;
}
def code(a, b): return math.fabs((a - b)) / 2.0
function code(a, b) return Float64(abs(Float64(a - b)) / 2.0) end
function tmp = code(a, b) tmp = abs((a - b)) / 2.0; end
code[a_, b_] := N[(N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|a - b\right|}{2}
\end{array}
(FPCore (a b) :precision binary64 (* (fabs (- a b)) 0.5))
double code(double a, double b) {
return fabs((a - b)) * 0.5;
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = abs((a - b)) * 0.5d0
end function
public static double code(double a, double b) {
return Math.abs((a - b)) * 0.5;
}
def code(a, b): return math.fabs((a - b)) * 0.5
function code(a, b) return Float64(abs(Float64(a - b)) * 0.5) end
function tmp = code(a, b) tmp = abs((a - b)) * 0.5; end
code[a_, b_] := N[(N[Abs[N[(a - b), $MachinePrecision]], $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left|a - b\right| \cdot 0.5
\end{array}
Initial program 100.0%
lift-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval100.0
Applied rewrites100.0%
(FPCore (a b) :precision binary64 (* 0.5 (fabs b)))
double code(double a, double b) {
return 0.5 * fabs(b);
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = 0.5d0 * abs(b)
end function
public static double code(double a, double b) {
return 0.5 * Math.abs(b);
}
def code(a, b): return 0.5 * math.fabs(b)
function code(a, b) return Float64(0.5 * abs(b)) end
function tmp = code(a, b) tmp = 0.5 * abs(b); end
code[a_, b_] := N[(0.5 * N[Abs[b], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left|b\right|
\end{array}
Initial program 100.0%
lift-/.f64N/A
div-invN/A
lower-*.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in a around 0
mul-1-negN/A
lower-neg.f6448.5
Applied rewrites48.5%
Applied rewrites48.5%
Final simplification48.5%
herbie shell --seed 2024234
(FPCore (a b)
:name "fabs fraction 2"
:precision binary64
(/ (fabs (- a b)) 2.0))