
(FPCore (x y) :precision binary64 -0.8273960599468214)
double code(double x, double y) {
return -0.8273960599468214;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -0.8273960599468214d0
end function
public static double code(double x, double y) {
return -0.8273960599468214;
}
def code(x, y): return -0.8273960599468214
function code(x, y) return -0.8273960599468214 end
function tmp = code(x, y) tmp = -0.8273960599468214; end
code[x_, y_] := -0.8273960599468214
\begin{array}{l}
\\
-0.8273960599468214
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y)
:precision binary64
(+
(+
(+
(* 333.75 (pow y 6.0))
(*
(* x x)
(-
(- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0)))
2.0)))
(* 5.5 (pow y 8.0)))
(/ x (* 2.0 y))))
double code(double x, double y) {
return (((333.75 * pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - pow(y, 6.0)) - (121.0 * pow(y, 4.0))) - 2.0))) + (5.5 * pow(y, 8.0))) + (x / (2.0 * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((333.75d0 * (y ** 6.0d0)) + ((x * x) * (((((((11.0d0 * x) * x) * y) * y) - (y ** 6.0d0)) - (121.0d0 * (y ** 4.0d0))) - 2.0d0))) + (5.5d0 * (y ** 8.0d0))) + (x / (2.0d0 * y))
end function
public static double code(double x, double y) {
return (((333.75 * Math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - Math.pow(y, 6.0)) - (121.0 * Math.pow(y, 4.0))) - 2.0))) + (5.5 * Math.pow(y, 8.0))) + (x / (2.0 * y));
}
def code(x, y): return (((333.75 * math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - math.pow(y, 6.0)) - (121.0 * math.pow(y, 4.0))) - 2.0))) + (5.5 * math.pow(y, 8.0))) + (x / (2.0 * y))
function code(x, y) return Float64(Float64(Float64(Float64(333.75 * (y ^ 6.0)) + Float64(Float64(x * x) * Float64(Float64(Float64(Float64(Float64(Float64(Float64(11.0 * x) * x) * y) * y) - (y ^ 6.0)) - Float64(121.0 * (y ^ 4.0))) - 2.0))) + Float64(5.5 * (y ^ 8.0))) + Float64(x / Float64(2.0 * y))) end
function tmp = code(x, y) tmp = (((333.75 * (y ^ 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - (y ^ 6.0)) - (121.0 * (y ^ 4.0))) - 2.0))) + (5.5 * (y ^ 8.0))) + (x / (2.0 * y)); end
code[x_, y_] := N[(N[(N[(N[(333.75 * N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(N[(N[(N[(N[(11.0 * x), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision] * y), $MachinePrecision] - N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] - N[(121.0 * N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(5.5 * N[Power[y, 8.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y}
\end{array}
(FPCore (x y)
:precision binary64
(let* ((t_0 (- (* 0.5 (/ x y)) (* -2.0 (* x x)))))
(/
(/
(fma
(fma (* (pow x 5.0) y) -2.0 (* (pow x 4.0) 0.5))
y
(* (pow x 3.0) 0.125))
(pow y 3.0))
(* t_0 t_0))))
double code(double x, double y) {
double t_0 = (0.5 * (x / y)) - (-2.0 * (x * x));
return (fma(fma((pow(x, 5.0) * y), -2.0, (pow(x, 4.0) * 0.5)), y, (pow(x, 3.0) * 0.125)) / pow(y, 3.0)) / (t_0 * t_0);
}
function code(x, y) t_0 = Float64(Float64(0.5 * Float64(x / y)) - Float64(-2.0 * Float64(x * x))) return Float64(Float64(fma(fma(Float64((x ^ 5.0) * y), -2.0, Float64((x ^ 4.0) * 0.5)), y, Float64((x ^ 3.0) * 0.125)) / (y ^ 3.0)) / Float64(t_0 * t_0)) end
code[x_, y_] := Block[{t$95$0 = N[(N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision] - N[(-2.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[Power[x, 5.0], $MachinePrecision] * y), $MachinePrecision] * -2.0 + N[(N[Power[x, 4.0], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] * y + N[(N[Power[x, 3.0], $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision] / N[Power[y, 3.0], $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\\
\frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot 0.5\right), y, {x}^{3} \cdot 0.125\right)}{{y}^{3}}}{t\_0 \cdot t\_0}
\end{array}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
associate-*r/N/A
lift-*.f64N/A
*-commutativeN/A
times-fracN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f6410.8
Applied rewrites10.8%
lift-fma.f64N/A
flip-+N/A
Applied rewrites10.8%
Taylor expanded in y around 0
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-pow.f64N/A
lower-pow.f6420.2
Applied rewrites20.2%
(FPCore (x y) :precision binary64 (fma (/ x y) 0.5 (* (* x x) -2.0)))
double code(double x, double y) {
return fma((x / y), 0.5, ((x * x) * -2.0));
}
function code(x, y) return fma(Float64(x / y), 0.5, Float64(Float64(x * x) * -2.0)) end
code[x_, y_] := N[(N[(x / y), $MachinePrecision] * 0.5 + N[(N[(x * x), $MachinePrecision] * -2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{x}{y}, 0.5, \left(x \cdot x\right) \cdot -2\right)
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
div-invN/A
associate-*r/N/A
lift-*.f64N/A
*-commutativeN/A
times-fracN/A
metadata-evalN/A
lower-fma.f64N/A
lower-/.f6410.8
Applied rewrites10.8%
(FPCore (x y) :precision binary64 (* (/ 0.5 y) x))
double code(double x, double y) {
return (0.5 / y) * x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / y) * x
end function
public static double code(double x, double y) {
return (0.5 / y) * x;
}
def code(x, y): return (0.5 / y) * x
function code(x, y) return Float64(Float64(0.5 / y) * x) end
function tmp = code(x, y) tmp = (0.5 / y) * x; end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{y} \cdot x
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
associate-*r/N/A
associate-*l/N/A
metadata-evalN/A
associate-*r/N/A
lower-*.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f641.6
Applied rewrites1.6%
herbie shell --seed 2024313
(FPCore (x y)
:name "Rump's expression from Stadtherr's award speech"
:precision binary64
:pre (and (== x 77617.0) (== y 33096.0))
(+ (+ (+ (* 333.75 (pow y 6.0)) (* (* x x) (- (- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0))) 2.0))) (* 5.5 (pow y 8.0))) (/ x (* 2.0 y))))