
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
(FPCore (x y z) :precision binary64 (fma (* y z) z x))
double code(double x, double y, double z) {
return fma((y * z), z, x);
}
function code(x, y, z) return fma(Float64(y * z), z, x) end
code[x_, y_, z_] := N[(N[(y * z), $MachinePrecision] * z + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot z, z, x\right)
\end{array}
Initial program 99.9%
lift-*.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lower-fma.f6499.9
Applied egg-rr99.9%
(FPCore (x y z) :precision binary64 (* z (* y z)))
double code(double x, double y, double z) {
return z * (y * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = z * (y * z)
end function
public static double code(double x, double y, double z) {
return z * (y * z);
}
def code(x, y, z): return z * (y * z)
function code(x, y, z) return Float64(z * Float64(y * z)) end
function tmp = code(x, y, z) tmp = z * (y * z); end
code[x_, y_, z_] := N[(z * N[(y * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(y \cdot z\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6450.3
Simplified50.3%
associate-*r*N/A
lift-*.f64N/A
lower-*.f6454.0
Applied egg-rr54.0%
Final simplification54.0%
(FPCore (x y z) :precision binary64 (* y (* z z)))
double code(double x, double y, double z) {
return y * (z * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = y * (z * z)
end function
public static double code(double x, double y, double z) {
return y * (z * z);
}
def code(x, y, z): return y * (z * z)
function code(x, y, z) return Float64(y * Float64(z * z)) end
function tmp = code(x, y, z) tmp = y * (z * z); end
code[x_, y_, z_] := N[(y * N[(z * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
y \cdot \left(z \cdot z\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6450.3
Simplified50.3%
herbie shell --seed 2024207
(FPCore (x y z)
:name "Statistics.Sample:robustSumVarWeighted from math-functions-0.1.5.2"
:precision binary64
(+ x (* (* y z) z)))