
(FPCore (x) :precision binary64 (/ (- (* x x) 3.0) 6.0))
double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) - 3.0d0) / 6.0d0
end function
public static double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
def code(x): return ((x * x) - 3.0) / 6.0
function code(x) return Float64(Float64(Float64(x * x) - 3.0) / 6.0) end
function tmp = code(x) tmp = ((x * x) - 3.0) / 6.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] - 3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x - 3}{6}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (* x x) 3.0) 6.0))
double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) - 3.0d0) / 6.0d0
end function
public static double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
def code(x): return ((x * x) - 3.0) / 6.0
function code(x) return Float64(Float64(Float64(x * x) - 3.0) / 6.0) end
function tmp = code(x) tmp = ((x * x) - 3.0) / 6.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] - 3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x - 3}{6}
\end{array}
(FPCore (x) :precision binary64 (/ (fma x x -3.0) 6.0))
double code(double x) {
return fma(x, x, -3.0) / 6.0;
}
function code(x) return Float64(fma(x, x, -3.0) / 6.0) end
code[x_] := N[(N[(x * x + -3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, x, -3\right)}{6}
\end{array}
Initial program 99.9%
sub-negN/A
accelerator-lowering-fma.f64N/A
metadata-eval99.9%
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (fma (* x 0.16666666666666666) x -0.5))
double code(double x) {
return fma((x * 0.16666666666666666), x, -0.5);
}
function code(x) return fma(Float64(x * 0.16666666666666666), x, -0.5) end
code[x_] := N[(N[(x * 0.16666666666666666), $MachinePrecision] * x + -0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot 0.16666666666666666, x, -0.5\right)
\end{array}
Initial program 99.9%
div-subN/A
sub-negN/A
associate-/l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval99.9%
Applied egg-rr99.9%
(FPCore (x) :precision binary64 -0.5)
double code(double x) {
return -0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -0.5d0
end function
public static double code(double x) {
return -0.5;
}
def code(x): return -0.5
function code(x) return -0.5 end
function tmp = code(x) tmp = -0.5; end
code[x_] := -0.5
\begin{array}{l}
\\
-0.5
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
Simplified51.3%
herbie shell --seed 2024193
(FPCore (x)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, H"
:precision binary64
(/ (- (* x x) 3.0) 6.0))