
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
(FPCore (x y z) :precision binary64 (fma (* z y) z x))
double code(double x, double y, double z) {
return fma((z * y), z, x);
}
function code(x, y, z) return fma(Float64(z * y), z, x) end
code[x_, y_, z_] := N[(N[(z * y), $MachinePrecision] * z + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(z \cdot y, z, x\right)
\end{array}
Initial program 99.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.9
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
(FPCore (x y z) :precision binary64 (* (* z y) z))
double code(double x, double y, double z) {
return (z * y) * z;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = (z * y) * z
end function
public static double code(double x, double y, double z) {
return (z * y) * z;
}
def code(x, y, z): return (z * y) * z
function code(x, y, z) return Float64(Float64(z * y) * z) end
function tmp = code(x, y, z) tmp = (z * y) * z; end
code[x_, y_, z_] := N[(N[(z * y), $MachinePrecision] * z), $MachinePrecision]
\begin{array}{l}
\\
\left(z \cdot y\right) \cdot z
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6445.9
Applied rewrites45.9%
Applied rewrites51.4%
herbie shell --seed 2024313
(FPCore (x y z)
:name "Statistics.Sample:robustSumVarWeighted from math-functions-0.1.5.2"
:precision binary64
(+ x (* (* y z) z)))