
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z) :precision binary64 (+ x (* (* y z) z)))
double code(double x, double y, double z) {
return x + ((y * z) * z);
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + ((y * z) * z)
end function
public static double code(double x, double y, double z) {
return x + ((y * z) * z);
}
def code(x, y, z): return x + ((y * z) * z)
function code(x, y, z) return Float64(x + Float64(Float64(y * z) * z)) end
function tmp = code(x, y, z) tmp = x + ((y * z) * z); end
code[x_, y_, z_] := N[(x + N[(N[(y * z), $MachinePrecision] * z), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot z
\end{array}
(FPCore (x y z) :precision binary64 (+ x (* z (* y z))))
double code(double x, double y, double z) {
return x + (z * (y * z));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (z * (y * z))
end function
public static double code(double x, double y, double z) {
return x + (z * (y * z));
}
def code(x, y, z): return x + (z * (y * z))
function code(x, y, z) return Float64(x + Float64(z * Float64(y * z))) end
function tmp = code(x, y, z) tmp = x + (z * (y * z)); end
code[x_, y_, z_] := N[(x + N[(z * N[(y * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + z \cdot \left(y \cdot z\right)
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x y z) :precision binary64 (+ x (* y (* z z))))
double code(double x, double y, double z) {
return x + (y * (z * z));
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x + (y * (z * z))
end function
public static double code(double x, double y, double z) {
return x + (y * (z * z));
}
def code(x, y, z): return x + (y * (z * z))
function code(x, y, z) return Float64(x + Float64(y * Float64(z * z))) end
function tmp = code(x, y, z) tmp = x + (y * (z * z)); end
code[x_, y_, z_] := N[(x + N[(y * N[(z * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot z\right)
\end{array}
Initial program 99.9%
associate-*l*91.5%
Simplified91.5%
Final simplification91.5%
(FPCore (x y z) :precision binary64 x)
double code(double x, double y, double z) {
return x;
}
real(8) function code(x, y, z)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
code = x
end function
public static double code(double x, double y, double z) {
return x;
}
def code(x, y, z): return x
function code(x, y, z) return x end
function tmp = code(x, y, z) tmp = x; end
code[x_, y_, z_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 99.9%
+-commutative99.9%
associate-*l*91.5%
fma-def91.5%
Simplified91.5%
Taylor expanded in y around 0 48.1%
Final simplification48.1%
herbie shell --seed 2023196
(FPCore (x y z)
:name "Statistics.Sample:robustSumVarWeighted from math-functions-0.1.5.2"
:precision binary64
(+ x (* (* y z) z)))