Rump's expression from Stadtherr's award speech

Percentage Accurate: 9.2% → 20.2%
Time: 6.1s
Alternatives: 3
Speedup: 17.3×

Specification

?
\[x = 77617 \land y = 33096\]
\[\begin{array}{l} \\ -0.8273960599468214 \end{array} \]
(FPCore (x y) :precision binary64 -0.8273960599468214)
double code(double x, double y) {
	return -0.8273960599468214;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = -0.8273960599468214d0
end function
public static double code(double x, double y) {
	return -0.8273960599468214;
}
def code(x, y):
	return -0.8273960599468214
function code(x, y)
	return -0.8273960599468214
end
function tmp = code(x, y)
	tmp = -0.8273960599468214;
end
code[x_, y_] := -0.8273960599468214
\begin{array}{l}

\\
-0.8273960599468214
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 3 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 9.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y} \end{array} \]
(FPCore (x y)
 :precision binary64
 (+
  (+
   (+
    (* 333.75 (pow y 6.0))
    (*
     (* x x)
     (-
      (- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0)))
      2.0)))
   (* 5.5 (pow y 8.0)))
  (/ x (* 2.0 y))))
double code(double x, double y) {
	return (((333.75 * pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - pow(y, 6.0)) - (121.0 * pow(y, 4.0))) - 2.0))) + (5.5 * pow(y, 8.0))) + (x / (2.0 * y));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (((333.75d0 * (y ** 6.0d0)) + ((x * x) * (((((((11.0d0 * x) * x) * y) * y) - (y ** 6.0d0)) - (121.0d0 * (y ** 4.0d0))) - 2.0d0))) + (5.5d0 * (y ** 8.0d0))) + (x / (2.0d0 * y))
end function
public static double code(double x, double y) {
	return (((333.75 * Math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - Math.pow(y, 6.0)) - (121.0 * Math.pow(y, 4.0))) - 2.0))) + (5.5 * Math.pow(y, 8.0))) + (x / (2.0 * y));
}
def code(x, y):
	return (((333.75 * math.pow(y, 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - math.pow(y, 6.0)) - (121.0 * math.pow(y, 4.0))) - 2.0))) + (5.5 * math.pow(y, 8.0))) + (x / (2.0 * y))
function code(x, y)
	return Float64(Float64(Float64(Float64(333.75 * (y ^ 6.0)) + Float64(Float64(x * x) * Float64(Float64(Float64(Float64(Float64(Float64(Float64(11.0 * x) * x) * y) * y) - (y ^ 6.0)) - Float64(121.0 * (y ^ 4.0))) - 2.0))) + Float64(5.5 * (y ^ 8.0))) + Float64(x / Float64(2.0 * y)))
end
function tmp = code(x, y)
	tmp = (((333.75 * (y ^ 6.0)) + ((x * x) * (((((((11.0 * x) * x) * y) * y) - (y ^ 6.0)) - (121.0 * (y ^ 4.0))) - 2.0))) + (5.5 * (y ^ 8.0))) + (x / (2.0 * y));
end
code[x_, y_] := N[(N[(N[(N[(333.75 * N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(N[(N[(N[(N[(N[(11.0 * x), $MachinePrecision] * x), $MachinePrecision] * y), $MachinePrecision] * y), $MachinePrecision] - N[Power[y, 6.0], $MachinePrecision]), $MachinePrecision] - N[(121.0 * N[Power[y, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(5.5 * N[Power[y, 8.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x / N[(2.0 * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y}
\end{array}

Alternative 1: 20.2% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\\ \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot 0.5\right), y, {x}^{3} \cdot 0.125\right)}{{y}^{3}}}{t\_0 \cdot t\_0} \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (- (* 0.5 (/ x y)) (* -2.0 (* x x)))))
   (/
    (/
     (fma
      (fma (* (pow x 5.0) y) -2.0 (* (pow x 4.0) 0.5))
      y
      (* (pow x 3.0) 0.125))
     (pow y 3.0))
    (* t_0 t_0))))
double code(double x, double y) {
	double t_0 = (0.5 * (x / y)) - (-2.0 * (x * x));
	return (fma(fma((pow(x, 5.0) * y), -2.0, (pow(x, 4.0) * 0.5)), y, (pow(x, 3.0) * 0.125)) / pow(y, 3.0)) / (t_0 * t_0);
}
function code(x, y)
	t_0 = Float64(Float64(0.5 * Float64(x / y)) - Float64(-2.0 * Float64(x * x)))
	return Float64(Float64(fma(fma(Float64((x ^ 5.0) * y), -2.0, Float64((x ^ 4.0) * 0.5)), y, Float64((x ^ 3.0) * 0.125)) / (y ^ 3.0)) / Float64(t_0 * t_0))
end
code[x_, y_] := Block[{t$95$0 = N[(N[(0.5 * N[(x / y), $MachinePrecision]), $MachinePrecision] - N[(-2.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[Power[x, 5.0], $MachinePrecision] * y), $MachinePrecision] * -2.0 + N[(N[Power[x, 4.0], $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] * y + N[(N[Power[x, 3.0], $MachinePrecision] * 0.125), $MachinePrecision]), $MachinePrecision] / N[Power[y, 3.0], $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\\
\frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot 0.5\right), y, {x}^{3} \cdot 0.125\right)}{{y}^{3}}}{t\_0 \cdot t\_0}
\end{array}
\end{array}
Derivation
  1. Initial program 9.2%

    \[\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y} \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{-2 \cdot {x}^{2}} + \frac{x}{2 \cdot y} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{-2 \cdot {x}^{2}} + \frac{x}{2 \cdot y} \]
    2. unpow2N/A

      \[\leadsto -2 \cdot \color{blue}{\left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
    3. lower-*.f6410.8

      \[\leadsto -2 \cdot \color{blue}{\left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
  5. Applied rewrites10.8%

    \[\leadsto \color{blue}{-2 \cdot \left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
  6. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \color{blue}{-2 \cdot \left(x \cdot x\right) + \frac{x}{2 \cdot y}} \]
    2. +-commutativeN/A

      \[\leadsto \color{blue}{\frac{x}{2 \cdot y} + -2 \cdot \left(x \cdot x\right)} \]
    3. lift-/.f64N/A

      \[\leadsto \color{blue}{\frac{x}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    4. div-invN/A

      \[\leadsto \color{blue}{x \cdot \frac{1}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    5. associate-*r/N/A

      \[\leadsto \color{blue}{\frac{x \cdot 1}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    6. lift-*.f64N/A

      \[\leadsto \frac{x \cdot 1}{\color{blue}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    7. *-commutativeN/A

      \[\leadsto \frac{x \cdot 1}{\color{blue}{y \cdot 2}} + -2 \cdot \left(x \cdot x\right) \]
    8. times-fracN/A

      \[\leadsto \color{blue}{\frac{x}{y} \cdot \frac{1}{2}} + -2 \cdot \left(x \cdot x\right) \]
    9. metadata-evalN/A

      \[\leadsto \frac{x}{y} \cdot \color{blue}{\frac{1}{2}} + -2 \cdot \left(x \cdot x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{x}{y}, \frac{1}{2}, -2 \cdot \left(x \cdot x\right)\right)} \]
    11. lower-/.f6410.8

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{x}{y}}, 0.5, -2 \cdot \left(x \cdot x\right)\right) \]
  7. Applied rewrites10.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{x}{y}, 0.5, \left(x \cdot x\right) \cdot -2\right)} \]
  8. Step-by-step derivation
    1. lift-fma.f64N/A

      \[\leadsto \color{blue}{\frac{x}{y} \cdot \frac{1}{2} + \left(x \cdot x\right) \cdot -2} \]
    2. flip-+N/A

      \[\leadsto \color{blue}{\frac{\left(\frac{x}{y} \cdot \frac{1}{2}\right) \cdot \left(\frac{x}{y} \cdot \frac{1}{2}\right) - \left(\left(x \cdot x\right) \cdot -2\right) \cdot \left(\left(x \cdot x\right) \cdot -2\right)}{\frac{x}{y} \cdot \frac{1}{2} - \left(x \cdot x\right) \cdot -2}} \]
  9. Applied rewrites10.8%

    \[\leadsto \color{blue}{\frac{{\left(0.5 \cdot \frac{x}{y}\right)}^{2} \cdot \left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) - \left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot {\left(-2 \cdot \left(x \cdot x\right)\right)}^{2}}{\left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)}} \]
  10. Taylor expanded in y around 0

    \[\leadsto \frac{\color{blue}{\frac{\frac{1}{8} \cdot {x}^{3} + y \cdot \left(-2 \cdot \left({x}^{5} \cdot y\right) + \frac{1}{2} \cdot {x}^{4}\right)}{{y}^{3}}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
  11. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \frac{\color{blue}{\frac{\frac{1}{8} \cdot {x}^{3} + y \cdot \left(-2 \cdot \left({x}^{5} \cdot y\right) + \frac{1}{2} \cdot {x}^{4}\right)}{{y}^{3}}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \frac{\frac{\color{blue}{y \cdot \left(-2 \cdot \left({x}^{5} \cdot y\right) + \frac{1}{2} \cdot {x}^{4}\right) + \frac{1}{8} \cdot {x}^{3}}}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    3. *-commutativeN/A

      \[\leadsto \frac{\frac{\color{blue}{\left(-2 \cdot \left({x}^{5} \cdot y\right) + \frac{1}{2} \cdot {x}^{4}\right) \cdot y} + \frac{1}{8} \cdot {x}^{3}}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    4. lower-fma.f64N/A

      \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(-2 \cdot \left({x}^{5} \cdot y\right) + \frac{1}{2} \cdot {x}^{4}, y, \frac{1}{8} \cdot {x}^{3}\right)}}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    5. *-commutativeN/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\color{blue}{\left({x}^{5} \cdot y\right) \cdot -2} + \frac{1}{2} \cdot {x}^{4}, y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    6. lower-fma.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{5} \cdot y, -2, \frac{1}{2} \cdot {x}^{4}\right)}, y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    7. lower-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{{x}^{5} \cdot y}, -2, \frac{1}{2} \cdot {x}^{4}\right), y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    8. lower-pow.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{{x}^{5}} \cdot y, -2, \frac{1}{2} \cdot {x}^{4}\right), y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    9. *-commutativeN/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, \color{blue}{{x}^{4} \cdot \frac{1}{2}}\right), y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    10. lower-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, \color{blue}{{x}^{4} \cdot \frac{1}{2}}\right), y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    11. lower-pow.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, \color{blue}{{x}^{4}} \cdot \frac{1}{2}\right), y, \frac{1}{8} \cdot {x}^{3}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    12. *-commutativeN/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot \frac{1}{2}\right), y, \color{blue}{{x}^{3} \cdot \frac{1}{8}}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    13. lower-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot \frac{1}{2}\right), y, \color{blue}{{x}^{3} \cdot \frac{1}{8}}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    14. lower-pow.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot \frac{1}{2}\right), y, \color{blue}{{x}^{3}} \cdot \frac{1}{8}\right)}{{y}^{3}}}{\left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(\frac{1}{2} \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
    15. lower-pow.f6420.2

      \[\leadsto \frac{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot 0.5\right), y, {x}^{3} \cdot 0.125\right)}{\color{blue}{{y}^{3}}}}{\left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
  12. Applied rewrites20.2%

    \[\leadsto \frac{\color{blue}{\frac{\mathsf{fma}\left(\mathsf{fma}\left({x}^{5} \cdot y, -2, {x}^{4} \cdot 0.5\right), y, {x}^{3} \cdot 0.125\right)}{{y}^{3}}}}{\left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right) \cdot \left(0.5 \cdot \frac{x}{y} - -2 \cdot \left(x \cdot x\right)\right)} \]
  13. Add Preprocessing

Alternative 2: 10.8% accurate, 17.3× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\frac{x}{y}, 0.5, \left(x \cdot x\right) \cdot -2\right) \end{array} \]
(FPCore (x y) :precision binary64 (fma (/ x y) 0.5 (* (* x x) -2.0)))
double code(double x, double y) {
	return fma((x / y), 0.5, ((x * x) * -2.0));
}
function code(x, y)
	return fma(Float64(x / y), 0.5, Float64(Float64(x * x) * -2.0))
end
code[x_, y_] := N[(N[(x / y), $MachinePrecision] * 0.5 + N[(N[(x * x), $MachinePrecision] * -2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\frac{x}{y}, 0.5, \left(x \cdot x\right) \cdot -2\right)
\end{array}
Derivation
  1. Initial program 9.2%

    \[\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y} \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{-2 \cdot {x}^{2}} + \frac{x}{2 \cdot y} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{-2 \cdot {x}^{2}} + \frac{x}{2 \cdot y} \]
    2. unpow2N/A

      \[\leadsto -2 \cdot \color{blue}{\left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
    3. lower-*.f6410.8

      \[\leadsto -2 \cdot \color{blue}{\left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
  5. Applied rewrites10.8%

    \[\leadsto \color{blue}{-2 \cdot \left(x \cdot x\right)} + \frac{x}{2 \cdot y} \]
  6. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \color{blue}{-2 \cdot \left(x \cdot x\right) + \frac{x}{2 \cdot y}} \]
    2. +-commutativeN/A

      \[\leadsto \color{blue}{\frac{x}{2 \cdot y} + -2 \cdot \left(x \cdot x\right)} \]
    3. lift-/.f64N/A

      \[\leadsto \color{blue}{\frac{x}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    4. div-invN/A

      \[\leadsto \color{blue}{x \cdot \frac{1}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    5. associate-*r/N/A

      \[\leadsto \color{blue}{\frac{x \cdot 1}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    6. lift-*.f64N/A

      \[\leadsto \frac{x \cdot 1}{\color{blue}{2 \cdot y}} + -2 \cdot \left(x \cdot x\right) \]
    7. *-commutativeN/A

      \[\leadsto \frac{x \cdot 1}{\color{blue}{y \cdot 2}} + -2 \cdot \left(x \cdot x\right) \]
    8. times-fracN/A

      \[\leadsto \color{blue}{\frac{x}{y} \cdot \frac{1}{2}} + -2 \cdot \left(x \cdot x\right) \]
    9. metadata-evalN/A

      \[\leadsto \frac{x}{y} \cdot \color{blue}{\frac{1}{2}} + -2 \cdot \left(x \cdot x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{x}{y}, \frac{1}{2}, -2 \cdot \left(x \cdot x\right)\right)} \]
    11. lower-/.f6410.8

      \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{x}{y}}, 0.5, -2 \cdot \left(x \cdot x\right)\right) \]
  7. Applied rewrites10.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{x}{y}, 0.5, \left(x \cdot x\right) \cdot -2\right)} \]
  8. Add Preprocessing

Alternative 3: 1.6% accurate, 28.5× speedup?

\[\begin{array}{l} \\ \frac{0.5}{y} \cdot x \end{array} \]
(FPCore (x y) :precision binary64 (* (/ 0.5 y) x))
double code(double x, double y) {
	return (0.5 / y) * x;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (0.5d0 / y) * x
end function
public static double code(double x, double y) {
	return (0.5 / y) * x;
}
def code(x, y):
	return (0.5 / y) * x
function code(x, y)
	return Float64(Float64(0.5 / y) * x)
end
function tmp = code(x, y)
	tmp = (0.5 / y) * x;
end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}

\\
\frac{0.5}{y} \cdot x
\end{array}
Derivation
  1. Initial program 9.2%

    \[\left(\left(333.75 \cdot {y}^{6} + \left(x \cdot x\right) \cdot \left(\left(\left(\left(\left(\left(11 \cdot x\right) \cdot x\right) \cdot y\right) \cdot y - {y}^{6}\right) - 121 \cdot {y}^{4}\right) - 2\right)\right) + 5.5 \cdot {y}^{8}\right) + \frac{x}{2 \cdot y} \]
  2. Add Preprocessing
  3. Taylor expanded in y around 0

    \[\leadsto \color{blue}{\frac{1}{2} \cdot \frac{x}{y}} \]
  4. Step-by-step derivation
    1. associate-*r/N/A

      \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot x}{y}} \]
    2. associate-*l/N/A

      \[\leadsto \color{blue}{\frac{\frac{1}{2}}{y} \cdot x} \]
    3. metadata-evalN/A

      \[\leadsto \frac{\color{blue}{\frac{1}{2} \cdot 1}}{y} \cdot x \]
    4. associate-*r/N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{y}\right)} \cdot x \]
    5. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\frac{1}{2} \cdot \frac{1}{y}\right) \cdot x} \]
    6. associate-*r/N/A

      \[\leadsto \color{blue}{\frac{\frac{1}{2} \cdot 1}{y}} \cdot x \]
    7. metadata-evalN/A

      \[\leadsto \frac{\color{blue}{\frac{1}{2}}}{y} \cdot x \]
    8. lower-/.f641.6

      \[\leadsto \color{blue}{\frac{0.5}{y}} \cdot x \]
  5. Applied rewrites1.6%

    \[\leadsto \color{blue}{\frac{0.5}{y} \cdot x} \]
  6. Add Preprocessing

Reproduce

?
herbie shell --seed 2024313 
(FPCore (x y)
  :name "Rump's expression from Stadtherr's award speech"
  :precision binary64
  :pre (and (== x 77617.0) (== y 33096.0))
  (+ (+ (+ (* 333.75 (pow y 6.0)) (* (* x x) (- (- (- (* (* (* (* 11.0 x) x) y) y) (pow y 6.0)) (* 121.0 (pow y 4.0))) 2.0))) (* 5.5 (pow y 8.0))) (/ x (* 2.0 y))))