
(FPCore (x) :precision binary64 (/ (- (* x x) 3.0) 6.0))
double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) - 3.0d0) / 6.0d0
end function
public static double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
def code(x): return ((x * x) - 3.0) / 6.0
function code(x) return Float64(Float64(Float64(x * x) - 3.0) / 6.0) end
function tmp = code(x) tmp = ((x * x) - 3.0) / 6.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] - 3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x - 3}{6}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- (* x x) 3.0) 6.0))
double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) - 3.0d0) / 6.0d0
end function
public static double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
def code(x): return ((x * x) - 3.0) / 6.0
function code(x) return Float64(Float64(Float64(x * x) - 3.0) / 6.0) end
function tmp = code(x) tmp = ((x * x) - 3.0) / 6.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] - 3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x - 3}{6}
\end{array}
(FPCore (x) :precision binary64 (/ (- (* x x) 3.0) 6.0))
double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((x * x) - 3.0d0) / 6.0d0
end function
public static double code(double x) {
return ((x * x) - 3.0) / 6.0;
}
def code(x): return ((x * x) - 3.0) / 6.0
function code(x) return Float64(Float64(Float64(x * x) - 3.0) / 6.0) end
function tmp = code(x) tmp = ((x * x) - 3.0) / 6.0; end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] - 3.0), $MachinePrecision] / 6.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot x - 3}{6}
\end{array}
Initial program 99.9%
(FPCore (x) :precision binary64 (if (<= (* x x) 3.0) -0.5 (* 0.16666666666666666 (* x x))))
double code(double x) {
double tmp;
if ((x * x) <= 3.0) {
tmp = -0.5;
} else {
tmp = 0.16666666666666666 * (x * x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x * x) <= 3.0d0) then
tmp = -0.5d0
else
tmp = 0.16666666666666666d0 * (x * x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x * x) <= 3.0) {
tmp = -0.5;
} else {
tmp = 0.16666666666666666 * (x * x);
}
return tmp;
}
def code(x): tmp = 0 if (x * x) <= 3.0: tmp = -0.5 else: tmp = 0.16666666666666666 * (x * x) return tmp
function code(x) tmp = 0.0 if (Float64(x * x) <= 3.0) tmp = -0.5; else tmp = Float64(0.16666666666666666 * Float64(x * x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x * x) <= 3.0) tmp = -0.5; else tmp = 0.16666666666666666 * (x * x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(x * x), $MachinePrecision], 3.0], -0.5, N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \cdot x \leq 3:\\
\;\;\;\;-0.5\\
\mathbf{else}:\\
\;\;\;\;0.16666666666666666 \cdot \left(x \cdot x\right)\\
\end{array}
\end{array}
if (*.f64 x x) < 3Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites99.2%
if 3 < (*.f64 x x) Initial program 99.8%
Taylor expanded in x around inf
lower-*.f64N/A
unpow2N/A
lower-*.f6498.8
Applied rewrites98.8%
(FPCore (x) :precision binary64 (fma (/ x 6.0) x -0.5))
double code(double x) {
return fma((x / 6.0), x, -0.5);
}
function code(x) return fma(Float64(x / 6.0), x, -0.5) end
code[x_] := N[(N[(x / 6.0), $MachinePrecision] * x + -0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{x}{6}, x, -0.5\right)
\end{array}
Initial program 99.9%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
sub-negN/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-fma.f64N/A
div-invN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval99.9
Applied rewrites99.9%
lift-*.f64N/A
metadata-evalN/A
div-invN/A
lower-/.f6499.9
Applied rewrites99.9%
(FPCore (x) :precision binary64 (fma (* 0.16666666666666666 x) x -0.5))
double code(double x) {
return fma((0.16666666666666666 * x), x, -0.5);
}
function code(x) return fma(Float64(0.16666666666666666 * x), x, -0.5) end
code[x_] := N[(N[(0.16666666666666666 * x), $MachinePrecision] * x + -0.5), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.16666666666666666 \cdot x, x, -0.5\right)
\end{array}
Initial program 99.9%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
sub-negN/A
lift-*.f64N/A
associate-/l*N/A
*-commutativeN/A
lower-fma.f64N/A
div-invN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval99.9
Applied rewrites99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 -0.5)
double code(double x) {
return -0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -0.5d0
end function
public static double code(double x) {
return -0.5;
}
def code(x): return -0.5
function code(x) return -0.5 end
function tmp = code(x) tmp = -0.5; end
code[x_] := -0.5
\begin{array}{l}
\\
-0.5
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
Applied rewrites45.8%
herbie shell --seed 2024242
(FPCore (x)
:name "Numeric.SpecFunctions:invIncompleteBetaWorker from math-functions-0.1.5.2, H"
:precision binary64
(/ (- (* x x) 3.0) 6.0))