
(FPCore (x y) :precision binary64 -0.8273960599468214)
double code(double x, double y) {
return -0.8273960599468214;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -0.8273960599468214d0
end function
public static double code(double x, double y) {
return -0.8273960599468214;
}
def code(x, y): return -0.8273960599468214
function code(x, y) return -0.8273960599468214 end
function tmp = code(x, y) tmp = -0.8273960599468214; end
code[x_, y_] := -0.8273960599468214
\begin{array}{l}
\\
-0.8273960599468214
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y)
:precision binary64
(+
(+
(+
(* 333.75 (pow y 6.0))
(*
(* x x)
(-
(- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0)))
2.0)))
(* 5.5 (pow y 8.0)))
(/ x (* 2.0 y))))
double code(double x, double y) {
return (((333.75 * pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - pow(y, 6.0)) - (121.0 * pow(y, 4.0))) - 2.0))) + (5.5 * pow(y, 8.0))) + (x / (2.0 * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((333.75d0 * (y ** 6.0d0)) + ((x * x) * (((((((11.0d0 * x) * x) * y) * y) - (y ** 6.0d0)) - (121.0d0 * (y ** 4.0d0))) - 2.0d0))) + (5.5d0 * (y ** 8.0d0))) + (x / (2.0d0 * y))
end function
public static double code(double x, double y) {
return (((333.75 * Math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - Math.pow(y, 6.0)) - (121.0 * Math.pow(y, 4.0))) - 2.0))) + (5.5 * Math.pow(y, 8.0))) + (x / (2.0 * y));
}
def code(x, y): return (((333.75 * math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - math.pow(y, 6.0)) - (121.0 * math.pow(y, 4.0))) - 2.0))) + (5.5 * math.pow(y, 8.0))) + (x / (2.0 * y))
function code(x, y) return Float64(Float64(Float64(Float64(333.75 * (y ^ 6.0)) + Float64(Float64(x * x) * Float64(Float64(Float64(Float64(Float64(Float64(Float64(11.0 * x) * x) * y) * y) - (y ^ 6.0)) - Float64(121.0 * (y ^ 4.0))) - 2.0))) + Float64(5.5 * (y ^ 8.0))) + Float64(x / Float64(2.0 * y))) end
function tmp = code(x, y) tmp = (((333.75 * (y ^ 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - (y ^ 6.0)) - (121.0 * (y ^ 4.0))) - 2.0))) + (5.5 * (y ^ 8.0))) + (x / (2.0 * y)); end
code[x_, y_] := N[(N[(N[(N[(333.75 * N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(N[(N[(N[(N[(11.0 * x), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision] * y), $MachinePrecision] - N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] - N[(121.0 * N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(5.5 * N[Power[y, 8.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y}
\end{array}
(FPCore (x y)
:precision binary64
(let* ((t_0 (- (* 0.5 (/ x y)) (* x (* x -2.0)))))
(/
(/
(fma
y
(fma y (* -2.0 (pow x 5.0)) (* 0.5 (* x (* x (* x x)))))
(* x (* (* x x) 0.125)))
(* y (* y y)))
(* t_0 t_0))))
double code(double x, double y) {
double t_0 = (0.5 * (x / y)) - (x * (x * -2.0));
return (fma(y, fma(y, (-2.0 * pow(x, 5.0)), (0.5 * (x * (x * (x * x))))), (x * ((x * x) * 0.125))) / (y * (y * y))) / (t_0 * t_0);
}
function code(x, y) t_0 = Float64(Float64(0.5 * Float64(x / y)) - Float64(x * Float64(x * -2.0))) return Float64(Float64(fma(y, fma(y, Float64(-2.0 * (x ^ 5.0)), Float64(0.5 * Float64(x * Float64(x * Float64(x * x))))), Float64(x * Float64(Float64(x * x) * 0.125))) / Float64(y * Float64(y * y))) / Float64(t_0 * t_0)) end
code[x_, y_] := Block[{t$95$0 = N[(N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision] - N[(x * N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(y * N[(y * N[(-2.0 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * N[(N[(x * x), $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(y * N[(y * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.5 \cdot \frac{x}{y} - x \cdot \left(x \cdot -2\right)\\
\frac{\frac{\mathsf{fma}\left(y, \mathsf{fma}\left(y, -2 \cdot {x}^{5}, 0.5 \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right), x \cdot \left(\left(x \cdot x\right) \cdot 0.125\right)\right)}{y \cdot \left(y \cdot y\right)}}{t\_0 \cdot t\_0}
\end{array}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
div-invN/A
metadata-evalN/A
associate-*l/N/A
lift-/.f64N/A
lower-fma.f6410.8
Applied rewrites10.8%
lift-fma.f64N/A
flip-+N/A
Applied rewrites10.8%
Taylor expanded in y around 0
lower-/.f64N/A
Applied rewrites20.2%
Final simplification20.2%
(FPCore (x y) :precision binary64 (fma (/ x y) 0.5 (* x (* x -2.0))))
double code(double x, double y) {
return fma((x / y), 0.5, (x * (x * -2.0)));
}
function code(x, y) return fma(Float64(x / y), 0.5, Float64(x * Float64(x * -2.0))) end
code[x_, y_] := N[(N[(x / y), $MachinePrecision] * 0.5 + N[(x * N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{x}{y}, 0.5, x \cdot \left(x \cdot -2\right)\right)
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
div-invN/A
metadata-evalN/A
associate-*l/N/A
lift-/.f64N/A
lower-fma.f6410.8
Applied rewrites10.8%
(FPCore (x y) :precision binary64 (* 0.5 (/ x y)))
double code(double x, double y) {
return 0.5 * (x / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 * (x / y)
end function
public static double code(double x, double y) {
return 0.5 * (x / y);
}
def code(x, y): return 0.5 * (x / y)
function code(x, y) return Float64(0.5 * Float64(x / y)) end
function tmp = code(x, y) tmp = 0.5 * (x / y); end
code[x_, y_] := N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \frac{x}{y}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
lower-/.f641.6
Applied rewrites1.6%
herbie shell --seed 2024222
(FPCore (x y)
:name "Rump's expression from Stadtherr's award speech"
:precision binary64
:pre (and (== x 77617.0) (== y 33096.0))
(+ (+ (+ (* 333.75 (pow y 6.0)) (* (* x x) (- (- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0))) 2.0))) (* 5.5 (pow y 8.0))) (/ x (* 2.0 y))))