
(FPCore (x y) :precision binary64 -0.8273960599468214)
double code(double x, double y) {
return -0.8273960599468214;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -0.8273960599468214d0
end function
public static double code(double x, double y) {
return -0.8273960599468214;
}
def code(x, y): return -0.8273960599468214
function code(x, y) return -0.8273960599468214 end
function tmp = code(x, y) tmp = -0.8273960599468214; end
code[x_, y_] := -0.8273960599468214
\begin{array}{l}
\\
-0.8273960599468214
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y)
:precision binary64
(+
(+
(+
(* 333.75 (pow y 6.0))
(*
(* x x)
(-
(- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0)))
2.0)))
(* 5.5 (pow y 8.0)))
(/ x (* 2.0 y))))
double code(double x, double y) {
return (((333.75 * pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - pow(y, 6.0)) - (121.0 * pow(y, 4.0))) - 2.0))) + (5.5 * pow(y, 8.0))) + (x / (2.0 * y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((333.75d0 * (y ** 6.0d0)) + ((x * x) * (((((((11.0d0 * x) * x) * y) * y) - (y ** 6.0d0)) - (121.0d0 * (y ** 4.0d0))) - 2.0d0))) + (5.5d0 * (y ** 8.0d0))) + (x / (2.0d0 * y))
end function
public static double code(double x, double y) {
return (((333.75 * Math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - Math.pow(y, 6.0)) - (121.0 * Math.pow(y, 4.0))) - 2.0))) + (5.5 * Math.pow(y, 8.0))) + (x / (2.0 * y));
}
def code(x, y): return (((333.75 * math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - math.pow(y, 6.0)) - (121.0 * math.pow(y, 4.0))) - 2.0))) + (5.5 * math.pow(y, 8.0))) + (x / (2.0 * y))
function code(x, y) return Float64(Float64(Float64(Float64(333.75 * (y ^ 6.0)) + Float64(Float64(x * x) * Float64(Float64(Float64(Float64(Float64(Float64(Float64(11.0 * x) * x) * y) * y) - (y ^ 6.0)) - Float64(121.0 * (y ^ 4.0))) - 2.0))) + Float64(5.5 * (y ^ 8.0))) + Float64(x / Float64(2.0 * y))) end
function tmp = code(x, y) tmp = (((333.75 * (y ^ 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - (y ^ 6.0)) - (121.0 * (y ^ 4.0))) - 2.0))) + (5.5 * (y ^ 8.0))) + (x / (2.0 * y)); end
code[x_, y_] := N[(N[(N[(N[(333.75 * N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(N[(N[(N[(N[(11.0 * x), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision] * y), $MachinePrecision] - N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] - N[(121.0 * N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(5.5 * N[Power[y, 8.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y}
\end{array}
(FPCore (x y) :precision binary64 (let* ((t_0 (/ x (* 2.0 y))) (t_1 (- (* (* x x) -2.0) t_0))) (/ (fma t_0 (* t_0 t_1) (* -2.0 (/ (pow x 5.0) y))) (* t_1 t_1))))
double code(double x, double y) {
double t_0 = x / (2.0 * y);
double t_1 = ((x * x) * -2.0) - t_0;
return fma(t_0, (t_0 * t_1), (-2.0 * (pow(x, 5.0) / y))) / (t_1 * t_1);
}
function code(x, y) t_0 = Float64(x / Float64(2.0 * y)) t_1 = Float64(Float64(Float64(x * x) * -2.0) - t_0) return Float64(fma(t_0, Float64(t_0 * t_1), Float64(-2.0 * Float64((x ^ 5.0) / y))) / Float64(t_1 * t_1)) end
code[x_, y_] := Block[{t$95$0 = N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(x * x), $MachinePrecision] * -2.0), $MachinePrecision] - t$95$0), $MachinePrecision]}, N[(N[(t$95$0 * N[(t$95$0 * t$95$1), $MachinePrecision] + N[(-2.0 * N[(N[Power[x, 5.0], $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{2 \cdot y}\\
t_1 := \left(x \cdot x\right) \cdot -2 - t\_0\\
\frac{\mathsf{fma}\left(t\_0, t\_0 \cdot t\_1, -2 \cdot \frac{{x}^{5}}{y}\right)}{t\_1 \cdot t\_1}
\end{array}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
flip-+N/A
Applied rewrites10.8%
Taylor expanded in y around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f64N/A
lower-pow.f6420.2
Applied rewrites20.2%
Applied rewrites20.2%
(FPCore (x y)
:precision binary64
(let* ((t_0 (* (* x x) -2.0))
(t_1 (- t_0 (/ x (* 2.0 y))))
(t_2 (- t_0 (/ x (* -2.0 y)))))
(/ (fma (* t_2 t_0) t_0 (* (* (/ 0.25 y) (/ (* x x) y)) t_2)) (* t_1 t_1))))
double code(double x, double y) {
double t_0 = (x * x) * -2.0;
double t_1 = t_0 - (x / (2.0 * y));
double t_2 = t_0 - (x / (-2.0 * y));
return fma((t_2 * t_0), t_0, (((0.25 / y) * ((x * x) / y)) * t_2)) / (t_1 * t_1);
}
function code(x, y) t_0 = Float64(Float64(x * x) * -2.0) t_1 = Float64(t_0 - Float64(x / Float64(2.0 * y))) t_2 = Float64(t_0 - Float64(x / Float64(-2.0 * y))) return Float64(fma(Float64(t_2 * t_0), t_0, Float64(Float64(Float64(0.25 / y) * Float64(Float64(x * x) / y)) * t_2)) / Float64(t_1 * t_1)) end
code[x_, y_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * -2.0), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 - N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$0 - N[(x / N[(-2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0 + N[(N[(N[(0.25 / y), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] / y), $MachinePrecision]), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot -2\\
t_1 := t\_0 - \frac{x}{2 \cdot y}\\
t_2 := t\_0 - \frac{x}{-2 \cdot y}\\
\frac{\mathsf{fma}\left(t\_2 \cdot t\_0, t\_0, \left(\frac{0.25}{y} \cdot \frac{x \cdot x}{y}\right) \cdot t\_2\right)}{t\_1 \cdot t\_1}
\end{array}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-+.f64N/A
flip-+N/A
Applied rewrites10.8%
Applied rewrites10.8%
Taylor expanded in x around 0
associate-*r/N/A
unpow2N/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
(FPCore (x y) :precision binary64 (+ (* -2.0 (* x x)) (/ x (+ y y))))
double code(double x, double y) {
return (-2.0 * (x * x)) + (x / (y + y));
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((-2.0d0) * (x * x)) + (x / (y + y))
end function
public static double code(double x, double y) {
return (-2.0 * (x * x)) + (x / (y + y));
}
def code(x, y): return (-2.0 * (x * x)) + (x / (y + y))
function code(x, y) return Float64(Float64(-2.0 * Float64(x * x)) + Float64(x / Float64(y + y))) end
function tmp = code(x, y) tmp = (-2.0 * (x * x)) + (x / (y + y)); end
code[x_, y_] := N[(N[(-2.0 * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(x / N[(y + y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(x \cdot x\right) + \frac{x}{y + y}
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6410.8
Applied rewrites10.8%
lift-*.f64N/A
count-2-revN/A
lower-+.f6410.8
Applied rewrites10.8%
(FPCore (x y) :precision binary64 (* (/ 0.5 y) x))
double code(double x, double y) {
return (0.5 / y) * x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / y) * x
end function
public static double code(double x, double y) {
return (0.5 / y) * x;
}
def code(x, y): return (0.5 / y) * x
function code(x, y) return Float64(Float64(0.5 / y) * x) end
function tmp = code(x, y) tmp = (0.5 / y) * x; end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{y} \cdot x
\end{array}
Initial program 9.2%
Taylor expanded in y around 0
associate-*r/N/A
associate-*l/N/A
metadata-evalN/A
associate-*r/N/A
lower-*.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f641.6
Applied rewrites1.6%
herbie shell --seed 2024327
(FPCore (x y)
:name "Rump's expression from Stadtherr's award speech"
:precision binary64
:pre (and (== x 77617.0) (== y 33096.0))
(+ (+ (+ (* 333.75 (pow y 6.0)) (* (* x x) (- (- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0))) 2.0))) (* 5.5 (pow y 8.0))) (/ x (* 2.0 y))))