Quadratic roots, medium range

Percentage Accurate: 31.3% → 95.5%
Time: 19.5s
Alternatives: 9
Speedup: 29.0×

Specification

?
\[\left(\left(1.1102230246251565 \cdot 10^{-16} < a \land a < 9007199254740992\right) \land \left(1.1102230246251565 \cdot 10^{-16} < b \land b < 9007199254740992\right)\right) \land \left(1.1102230246251565 \cdot 10^{-16} < c \land c < 9007199254740992\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 31.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 95.5% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(c \cdot a\right)}^{4} \cdot \left(20 \cdot {b}^{-6}\right)}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (fma
   -2.0
   (/ (* (pow c 3.0) (pow a 2.0)) (pow b 4.0))
   (-
    (-
     (* -0.25 (/ (* (pow (* c a) 4.0) (* 20.0 (pow b -6.0))) a))
     (/ (* a (pow c 2.0)) (pow b 2.0)))
    c))
  b))
double code(double a, double b, double c) {
	return fma(-2.0, ((pow(c, 3.0) * pow(a, 2.0)) / pow(b, 4.0)), (((-0.25 * ((pow((c * a), 4.0) * (20.0 * pow(b, -6.0))) / a)) - ((a * pow(c, 2.0)) / pow(b, 2.0))) - c)) / b;
}
function code(a, b, c)
	return Float64(fma(-2.0, Float64(Float64((c ^ 3.0) * (a ^ 2.0)) / (b ^ 4.0)), Float64(Float64(Float64(-0.25 * Float64(Float64((Float64(c * a) ^ 4.0) * Float64(20.0 * (b ^ -6.0))) / a)) - Float64(Float64(a * (c ^ 2.0)) / (b ^ 2.0))) - c)) / b)
end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-0.25 * N[(N[(N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision] * N[(20.0 * N[Power[b, -6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision] - N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(c \cdot a\right)}^{4} \cdot \left(20 \cdot {b}^{-6}\right)}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b}
\end{array}
Derivation
  1. Initial program 33.1%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative33.1%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified33.1%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 95.2%

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}} + -0.25 \cdot \frac{4 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 16 \cdot \left({a}^{4} \cdot {c}^{4}\right)}{a \cdot {b}^{6}}\right)\right)}{b}} \]
  6. Step-by-step derivation
    1. Simplified95.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \left(\frac{{a}^{4} \cdot {c}^{4}}{a} \cdot \frac{20}{{b}^{6}}\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b}} \]
    2. Step-by-step derivation
      1. associate-*l/95.2%

        \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \color{blue}{\frac{\left({a}^{4} \cdot {c}^{4}\right) \cdot \frac{20}{{b}^{6}}}{a}} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
      2. pow-prod-down95.2%

        \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{\color{blue}{{\left(a \cdot c\right)}^{4}} \cdot \frac{20}{{b}^{6}}}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
      3. div-inv95.2%

        \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(a \cdot c\right)}^{4} \cdot \color{blue}{\left(20 \cdot \frac{1}{{b}^{6}}\right)}}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
      4. pow-flip95.2%

        \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(a \cdot c\right)}^{4} \cdot \left(20 \cdot \color{blue}{{b}^{\left(-6\right)}}\right)}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
      5. metadata-eval95.2%

        \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(a \cdot c\right)}^{4} \cdot \left(20 \cdot {b}^{\color{blue}{-6}}\right)}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
    3. Applied egg-rr95.2%

      \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \color{blue}{\frac{{\left(a \cdot c\right)}^{4} \cdot \left(20 \cdot {b}^{-6}\right)}{a}} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
    4. Final simplification95.2%

      \[\leadsto \frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}}, \left(-0.25 \cdot \frac{{\left(c \cdot a\right)}^{4} \cdot \left(20 \cdot {b}^{-6}\right)}{a} - \frac{a \cdot {c}^{2}}{{b}^{2}}\right) - c\right)}{b} \]
    5. Add Preprocessing

    Alternative 2: 95.5% accurate, 0.2× speedup?

    \[\begin{array}{l} \\ a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, -0.25 \cdot \left(\left(a \cdot 20\right) \cdot \frac{{c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (-
      (*
       a
       (-
        (*
         a
         (fma
          -2.0
          (/ (pow c 3.0) (pow b 5.0))
          (* -0.25 (* (* a 20.0) (/ (pow c 4.0) (pow b 7.0))))))
        (/ (pow c 2.0) (pow b 3.0))))
      (/ c b)))
    double code(double a, double b, double c) {
    	return (a * ((a * fma(-2.0, (pow(c, 3.0) / pow(b, 5.0)), (-0.25 * ((a * 20.0) * (pow(c, 4.0) / pow(b, 7.0)))))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
    }
    
    function code(a, b, c)
    	return Float64(Float64(a * Float64(Float64(a * fma(-2.0, Float64((c ^ 3.0) / (b ^ 5.0)), Float64(-0.25 * Float64(Float64(a * 20.0) * Float64((c ^ 4.0) / (b ^ 7.0)))))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b))
    end
    
    code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] + N[(-0.25 * N[(N[(a * 20.0), $MachinePrecision] * N[(N[Power[c, 4.0], $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, -0.25 \cdot \left(\left(a \cdot 20\right) \cdot \frac{{c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in a around 0 95.1%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
    6. Step-by-step derivation
      1. +-commutative95.1%

        \[\leadsto \color{blue}{a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right) + -1 \cdot \frac{c}{b}} \]
      2. mul-1-neg95.1%

        \[\leadsto a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right) + \color{blue}{\left(-\frac{c}{b}\right)} \]
      3. unsub-neg95.1%

        \[\leadsto \color{blue}{a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right) - \frac{c}{b}} \]
    7. Simplified95.1%

      \[\leadsto \color{blue}{a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, \left(a \cdot \left(\frac{\frac{{c}^{4}}{{b}^{6}}}{b} \cdot 20\right)\right) \cdot -0.25\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}} \]
    8. Taylor expanded in a around 0 95.1%

      \[\leadsto a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, \color{blue}{\left(20 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)} \cdot -0.25\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    9. Step-by-step derivation
      1. associate-/l*95.1%

        \[\leadsto a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, \left(20 \cdot \color{blue}{\left(a \cdot \frac{{c}^{4}}{{b}^{7}}\right)}\right) \cdot -0.25\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
      2. associate-*r*95.1%

        \[\leadsto a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, \color{blue}{\left(\left(20 \cdot a\right) \cdot \frac{{c}^{4}}{{b}^{7}}\right)} \cdot -0.25\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    10. Simplified95.1%

      \[\leadsto a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, \color{blue}{\left(\left(20 \cdot a\right) \cdot \frac{{c}^{4}}{{b}^{7}}\right)} \cdot -0.25\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    11. Final simplification95.1%

      \[\leadsto a \cdot \left(a \cdot \mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}}, -0.25 \cdot \left(\left(a \cdot 20\right) \cdot \frac{{c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    12. Add Preprocessing

    Alternative 3: 95.2% accurate, 0.2× speedup?

    \[\begin{array}{l} \\ \left(a \cdot \left(a \cdot \left(a \cdot \left(0.5 \cdot \left(a \cdot \frac{20 \cdot \frac{{c}^{4}}{{b}^{6}}}{b}\right) + 4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) + 2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + 2 \cdot \frac{c}{b}\right)\right) \cdot \frac{1}{-2 \cdot a} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (*
      (*
       a
       (+
        (*
         a
         (+
          (*
           a
           (+
            (* 0.5 (* a (/ (* 20.0 (/ (pow c 4.0) (pow b 6.0))) b)))
            (* 4.0 (/ (pow c 3.0) (pow b 5.0)))))
          (* 2.0 (/ (pow c 2.0) (pow b 3.0)))))
        (* 2.0 (/ c b))))
      (/ 1.0 (* -2.0 a))))
    double code(double a, double b, double c) {
    	return (a * ((a * ((a * ((0.5 * (a * ((20.0 * (pow(c, 4.0) / pow(b, 6.0))) / b))) + (4.0 * (pow(c, 3.0) / pow(b, 5.0))))) + (2.0 * (pow(c, 2.0) / pow(b, 3.0))))) + (2.0 * (c / b)))) * (1.0 / (-2.0 * a));
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = (a * ((a * ((a * ((0.5d0 * (a * ((20.0d0 * ((c ** 4.0d0) / (b ** 6.0d0))) / b))) + (4.0d0 * ((c ** 3.0d0) / (b ** 5.0d0))))) + (2.0d0 * ((c ** 2.0d0) / (b ** 3.0d0))))) + (2.0d0 * (c / b)))) * (1.0d0 / ((-2.0d0) * a))
    end function
    
    public static double code(double a, double b, double c) {
    	return (a * ((a * ((a * ((0.5 * (a * ((20.0 * (Math.pow(c, 4.0) / Math.pow(b, 6.0))) / b))) + (4.0 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))))) + (2.0 * (Math.pow(c, 2.0) / Math.pow(b, 3.0))))) + (2.0 * (c / b)))) * (1.0 / (-2.0 * a));
    }
    
    def code(a, b, c):
    	return (a * ((a * ((a * ((0.5 * (a * ((20.0 * (math.pow(c, 4.0) / math.pow(b, 6.0))) / b))) + (4.0 * (math.pow(c, 3.0) / math.pow(b, 5.0))))) + (2.0 * (math.pow(c, 2.0) / math.pow(b, 3.0))))) + (2.0 * (c / b)))) * (1.0 / (-2.0 * a))
    
    function code(a, b, c)
    	return Float64(Float64(a * Float64(Float64(a * Float64(Float64(a * Float64(Float64(0.5 * Float64(a * Float64(Float64(20.0 * Float64((c ^ 4.0) / (b ^ 6.0))) / b))) + Float64(4.0 * Float64((c ^ 3.0) / (b ^ 5.0))))) + Float64(2.0 * Float64((c ^ 2.0) / (b ^ 3.0))))) + Float64(2.0 * Float64(c / b)))) * Float64(1.0 / Float64(-2.0 * a)))
    end
    
    function tmp = code(a, b, c)
    	tmp = (a * ((a * ((a * ((0.5 * (a * ((20.0 * ((c ^ 4.0) / (b ^ 6.0))) / b))) + (4.0 * ((c ^ 3.0) / (b ^ 5.0))))) + (2.0 * ((c ^ 2.0) / (b ^ 3.0))))) + (2.0 * (c / b)))) * (1.0 / (-2.0 * a));
    end
    
    code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(N[(a * N[(N[(0.5 * N[(a * N[(N[(20.0 * N[(N[Power[c, 4.0], $MachinePrecision] / N[Power[b, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(4.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(-2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(a \cdot \left(a \cdot \left(a \cdot \left(0.5 \cdot \left(a \cdot \frac{20 \cdot \frac{{c}^{4}}{{b}^{6}}}{b}\right) + 4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) + 2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + 2 \cdot \frac{c}{b}\right)\right) \cdot \frac{1}{-2 \cdot a}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(a, c \cdot -4, b \cdot b\right)} - b}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Step-by-step derivation
      1. frac-2neg33.1%

        \[\leadsto \color{blue}{\frac{-\left(\sqrt{\mathsf{fma}\left(a, c \cdot -4, b \cdot b\right)} - b\right)}{-a \cdot 2}} \]
      2. div-inv33.1%

        \[\leadsto \color{blue}{\left(-\left(\sqrt{\mathsf{fma}\left(a, c \cdot -4, b \cdot b\right)} - b\right)\right) \cdot \frac{1}{-a \cdot 2}} \]
      3. sub-neg33.1%

        \[\leadsto \left(-\color{blue}{\left(\sqrt{\mathsf{fma}\left(a, c \cdot -4, b \cdot b\right)} + \left(-b\right)\right)}\right) \cdot \frac{1}{-a \cdot 2} \]
      4. distribute-neg-in33.1%

        \[\leadsto \color{blue}{\left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, b \cdot b\right)}\right) + \left(-\left(-b\right)\right)\right)} \cdot \frac{1}{-a \cdot 2} \]
      5. pow233.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, \color{blue}{{b}^{2}}\right)}\right) + \left(-\left(-b\right)\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      6. add-sqr-sqrt0.0%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \left(-\color{blue}{\sqrt{-b} \cdot \sqrt{-b}}\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      7. sqrt-unprod1.6%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \left(-\color{blue}{\sqrt{\left(-b\right) \cdot \left(-b\right)}}\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      8. sqr-neg1.6%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \left(-\sqrt{\color{blue}{b \cdot b}}\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      9. sqrt-prod1.6%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \left(-\color{blue}{\sqrt{b} \cdot \sqrt{b}}\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      10. add-sqr-sqrt1.6%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \left(-\color{blue}{b}\right)\right) \cdot \frac{1}{-a \cdot 2} \]
      11. add-sqr-sqrt0.0%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \color{blue}{\sqrt{-b} \cdot \sqrt{-b}}\right) \cdot \frac{1}{-a \cdot 2} \]
      12. sqrt-unprod33.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \color{blue}{\sqrt{\left(-b\right) \cdot \left(-b\right)}}\right) \cdot \frac{1}{-a \cdot 2} \]
      13. sqr-neg33.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \sqrt{\color{blue}{b \cdot b}}\right) \cdot \frac{1}{-a \cdot 2} \]
      14. sqrt-prod33.0%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \color{blue}{\sqrt{b} \cdot \sqrt{b}}\right) \cdot \frac{1}{-a \cdot 2} \]
      15. add-sqr-sqrt33.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + \color{blue}{b}\right) \cdot \frac{1}{-a \cdot 2} \]
      16. distribute-rgt-neg-in33.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + b\right) \cdot \frac{1}{\color{blue}{a \cdot \left(-2\right)}} \]
      17. metadata-eval33.1%

        \[\leadsto \left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + b\right) \cdot \frac{1}{a \cdot \color{blue}{-2}} \]
    6. Applied egg-rr33.1%

      \[\leadsto \color{blue}{\left(\left(-\sqrt{\mathsf{fma}\left(a, c \cdot -4, {b}^{2}\right)}\right) + b\right) \cdot \frac{1}{a \cdot -2}} \]
    7. Taylor expanded in a around 0 94.9%

      \[\leadsto \color{blue}{\left(a \cdot \left(a \cdot \left(a \cdot \left(0.5 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b} - -4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) - -2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) - -2 \cdot \frac{c}{b}\right)\right)} \cdot \frac{1}{a \cdot -2} \]
    8. Step-by-step derivation
      1. cancel-sign-sub-inv94.9%

        \[\leadsto \left(a \cdot \color{blue}{\left(a \cdot \left(a \cdot \left(0.5 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b} - -4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) - -2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + \left(--2\right) \cdot \frac{c}{b}\right)}\right) \cdot \frac{1}{a \cdot -2} \]
    9. Simplified94.9%

      \[\leadsto \color{blue}{\left(a \cdot \left(a \cdot \left(a \cdot \left(0.5 \cdot \left(a \cdot \frac{\frac{{c}^{4}}{{b}^{6}} \cdot 20}{b}\right) + 4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) + 2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + 2 \cdot \frac{c}{b}\right)\right)} \cdot \frac{1}{a \cdot -2} \]
    10. Final simplification94.9%

      \[\leadsto \left(a \cdot \left(a \cdot \left(a \cdot \left(0.5 \cdot \left(a \cdot \frac{20 \cdot \frac{{c}^{4}}{{b}^{6}}}{b}\right) + 4 \cdot \frac{{c}^{3}}{{b}^{5}}\right) + 2 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + 2 \cdot \frac{c}{b}\right)\right) \cdot \frac{1}{-2 \cdot a} \]
    11. Add Preprocessing

    Alternative 4: 93.9% accurate, 0.2× speedup?

    \[\begin{array}{l} \\ \frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (/
      (-
       (* -2.0 (/ (* (pow c 3.0) (pow a 2.0)) (pow b 4.0)))
       (+ c (/ (* a (pow c 2.0)) (pow b 2.0))))
      b))
    double code(double a, double b, double c) {
    	return ((-2.0 * ((pow(c, 3.0) * pow(a, 2.0)) / pow(b, 4.0))) - (c + ((a * pow(c, 2.0)) / pow(b, 2.0)))) / b;
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = (((-2.0d0) * (((c ** 3.0d0) * (a ** 2.0d0)) / (b ** 4.0d0))) - (c + ((a * (c ** 2.0d0)) / (b ** 2.0d0)))) / b
    end function
    
    public static double code(double a, double b, double c) {
    	return ((-2.0 * ((Math.pow(c, 3.0) * Math.pow(a, 2.0)) / Math.pow(b, 4.0))) - (c + ((a * Math.pow(c, 2.0)) / Math.pow(b, 2.0)))) / b;
    }
    
    def code(a, b, c):
    	return ((-2.0 * ((math.pow(c, 3.0) * math.pow(a, 2.0)) / math.pow(b, 4.0))) - (c + ((a * math.pow(c, 2.0)) / math.pow(b, 2.0)))) / b
    
    function code(a, b, c)
    	return Float64(Float64(Float64(-2.0 * Float64(Float64((c ^ 3.0) * (a ^ 2.0)) / (b ^ 4.0))) - Float64(c + Float64(Float64(a * (c ^ 2.0)) / (b ^ 2.0)))) / b)
    end
    
    function tmp = code(a, b, c)
    	tmp = ((-2.0 * (((c ^ 3.0) * (a ^ 2.0)) / (b ^ 4.0))) - (c + ((a * (c ^ 2.0)) / (b ^ 2.0)))) / b;
    end
    
    code[a_, b_, c_] := N[(N[(N[(-2.0 * N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c + N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in b around inf 93.5%

      \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
    6. Final simplification93.5%

      \[\leadsto \frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b} \]
    7. Add Preprocessing

    Alternative 5: 93.9% accurate, 0.3× speedup?

    \[\begin{array}{l} \\ a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (-
      (*
       a
       (- (* -2.0 (* a (/ (pow c 3.0) (pow b 5.0)))) (/ (pow c 2.0) (pow b 3.0))))
      (/ c b)))
    double code(double a, double b, double c) {
    	return (a * ((-2.0 * (a * (pow(c, 3.0) / pow(b, 5.0)))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = (a * (((-2.0d0) * (a * ((c ** 3.0d0) / (b ** 5.0d0)))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
    end function
    
    public static double code(double a, double b, double c) {
    	return (a * ((-2.0 * (a * (Math.pow(c, 3.0) / Math.pow(b, 5.0)))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
    }
    
    def code(a, b, c):
    	return (a * ((-2.0 * (a * (math.pow(c, 3.0) / math.pow(b, 5.0)))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
    
    function code(a, b, c)
    	return Float64(Float64(a * Float64(Float64(-2.0 * Float64(a * Float64((c ^ 3.0) / (b ^ 5.0)))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b))
    end
    
    function tmp = code(a, b, c)
    	tmp = (a * ((-2.0 * (a * ((c ^ 3.0) / (b ^ 5.0)))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b);
    end
    
    code[a_, b_, c_] := N[(N[(a * N[(N[(-2.0 * N[(a * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in a around 0 93.5%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right)} \]
    6. Step-by-step derivation
      1. +-commutative93.5%

        \[\leadsto \color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + -1 \cdot \frac{c}{b}} \]
      2. mul-1-neg93.5%

        \[\leadsto a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right) + \color{blue}{\left(-\frac{c}{b}\right)} \]
      3. unsub-neg93.5%

        \[\leadsto \color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}} \]
      4. mul-1-neg93.5%

        \[\leadsto a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + \color{blue}{\left(-\frac{{c}^{2}}{{b}^{3}}\right)}\right) - \frac{c}{b} \]
      5. unsub-neg93.5%

        \[\leadsto a \cdot \color{blue}{\left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} - \frac{{c}^{2}}{{b}^{3}}\right)} - \frac{c}{b} \]
      6. associate-/l*93.5%

        \[\leadsto a \cdot \left(-2 \cdot \color{blue}{\left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right)} - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    7. Simplified93.5%

      \[\leadsto \color{blue}{a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}} \]
    8. Final simplification93.5%

      \[\leadsto a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
    9. Add Preprocessing

    Alternative 6: 93.6% accurate, 0.4× speedup?

    \[\begin{array}{l} \\ c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (*
      c
      (+
       (* c (- (* -2.0 (/ (* c (pow a 2.0)) (pow b 5.0))) (/ a (pow b 3.0))))
       (/ -1.0 b))))
    double code(double a, double b, double c) {
    	return c * ((c * ((-2.0 * ((c * pow(a, 2.0)) / pow(b, 5.0))) - (a / pow(b, 3.0)))) + (-1.0 / b));
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = c * ((c * (((-2.0d0) * ((c * (a ** 2.0d0)) / (b ** 5.0d0))) - (a / (b ** 3.0d0)))) + ((-1.0d0) / b))
    end function
    
    public static double code(double a, double b, double c) {
    	return c * ((c * ((-2.0 * ((c * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) - (a / Math.pow(b, 3.0)))) + (-1.0 / b));
    }
    
    def code(a, b, c):
    	return c * ((c * ((-2.0 * ((c * math.pow(a, 2.0)) / math.pow(b, 5.0))) - (a / math.pow(b, 3.0)))) + (-1.0 / b))
    
    function code(a, b, c)
    	return Float64(c * Float64(Float64(c * Float64(Float64(-2.0 * Float64(Float64(c * (a ^ 2.0)) / (b ^ 5.0))) - Float64(a / (b ^ 3.0)))) + Float64(-1.0 / b)))
    end
    
    function tmp = code(a, b, c)
    	tmp = c * ((c * ((-2.0 * ((c * (a ^ 2.0)) / (b ^ 5.0))) - (a / (b ^ 3.0)))) + (-1.0 / b));
    end
    
    code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(-2.0 * N[(N[(c * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right)
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in c around 0 93.3%

      \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
    6. Final simplification93.3%

      \[\leadsto c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \]
    7. Add Preprocessing

    Alternative 7: 90.7% accurate, 0.6× speedup?

    \[\begin{array}{l} \\ \frac{\mathsf{fma}\left(a, {\left(\frac{c}{-b}\right)}^{2}, c\right)}{-b} \end{array} \]
    (FPCore (a b c) :precision binary64 (/ (fma a (pow (/ c (- b)) 2.0) c) (- b)))
    double code(double a, double b, double c) {
    	return fma(a, pow((c / -b), 2.0), c) / -b;
    }
    
    function code(a, b, c)
    	return Float64(fma(a, (Float64(c / Float64(-b)) ^ 2.0), c) / Float64(-b))
    end
    
    code[a_, b_, c_] := N[(N[(a * N[Power[N[(c / (-b)), $MachinePrecision], 2.0], $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{\mathsf{fma}\left(a, {\left(\frac{c}{-b}\right)}^{2}, c\right)}{-b}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in b around inf 90.3%

      \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    6. Step-by-step derivation
      1. mul-1-neg90.3%

        \[\leadsto \frac{-1 \cdot c + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
      2. unsub-neg90.3%

        \[\leadsto \frac{\color{blue}{-1 \cdot c - \frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
      3. mul-1-neg90.3%

        \[\leadsto \frac{\color{blue}{\left(-c\right)} - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    7. Simplified90.3%

      \[\leadsto \color{blue}{\frac{\left(-c\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    8. Taylor expanded in c around inf 89.9%

      \[\leadsto \color{blue}{{c}^{2} \cdot \left(-1 \cdot \frac{a}{{b}^{3}} - \frac{1}{b \cdot c}\right)} \]
    9. Step-by-step derivation
      1. mul-1-neg89.9%

        \[\leadsto {c}^{2} \cdot \left(\color{blue}{\left(-\frac{a}{{b}^{3}}\right)} - \frac{1}{b \cdot c}\right) \]
      2. associate-/r*89.9%

        \[\leadsto {c}^{2} \cdot \left(\left(-\frac{a}{{b}^{3}}\right) - \color{blue}{\frac{\frac{1}{b}}{c}}\right) \]
    10. Simplified89.9%

      \[\leadsto \color{blue}{{c}^{2} \cdot \left(\left(-\frac{a}{{b}^{3}}\right) - \frac{\frac{1}{b}}{c}\right)} \]
    11. Taylor expanded in a around 0 90.3%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    12. Step-by-step derivation
      1. mul-1-neg90.3%

        \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
      2. unsub-neg90.3%

        \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
      3. associate-*r/90.3%

        \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
      4. mul-1-neg90.3%

        \[\leadsto \frac{\color{blue}{-c}}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
      5. associate-/l*90.3%

        \[\leadsto \frac{-c}{b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
    13. Simplified90.3%

      \[\leadsto \color{blue}{\frac{-c}{b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
    14. Taylor expanded in b around inf 90.3%

      \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    15. Step-by-step derivation
      1. distribute-lft-out90.3%

        \[\leadsto \frac{\color{blue}{-1 \cdot \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
      2. associate-*r/90.3%

        \[\leadsto \color{blue}{-1 \cdot \frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
      3. mul-1-neg90.3%

        \[\leadsto \color{blue}{-\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
      4. distribute-neg-frac290.3%

        \[\leadsto \color{blue}{\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{-b}} \]
      5. +-commutative90.3%

        \[\leadsto \frac{\color{blue}{\frac{a \cdot {c}^{2}}{{b}^{2}} + c}}{-b} \]
      6. associate-/l*90.3%

        \[\leadsto \frac{\color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}} + c}{-b} \]
      7. fma-define90.3%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(a, \frac{{c}^{2}}{{b}^{2}}, c\right)}}{-b} \]
      8. unpow290.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \frac{\color{blue}{c \cdot c}}{{b}^{2}}, c\right)}{-b} \]
      9. unpow290.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \frac{c \cdot c}{\color{blue}{b \cdot b}}, c\right)}{-b} \]
      10. times-frac90.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \color{blue}{\frac{c}{b} \cdot \frac{c}{b}}, c\right)}{-b} \]
      11. sqr-neg90.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \color{blue}{\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)}, c\right)}{-b} \]
      12. distribute-frac-neg90.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right), c\right)}{-b} \]
      13. distribute-frac-neg90.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}, c\right)}{-b} \]
      14. unpow290.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}, c\right)}{-b} \]
      15. distribute-frac-neg90.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, {\color{blue}{\left(-\frac{c}{b}\right)}}^{2}, c\right)}{-b} \]
      16. distribute-neg-frac290.3%

        \[\leadsto \frac{\mathsf{fma}\left(a, {\color{blue}{\left(\frac{c}{-b}\right)}}^{2}, c\right)}{-b} \]
    16. Simplified90.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(a, {\left(\frac{c}{-b}\right)}^{2}, c\right)}{-b}} \]
    17. Final simplification90.3%

      \[\leadsto \frac{\mathsf{fma}\left(a, {\left(\frac{c}{-b}\right)}^{2}, c\right)}{-b} \]
    18. Add Preprocessing

    Alternative 8: 90.4% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right) \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (* c (- (/ -1.0 b) (/ (* c a) (pow b 3.0)))))
    double code(double a, double b, double c) {
    	return c * ((-1.0 / b) - ((c * a) / pow(b, 3.0)));
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = c * (((-1.0d0) / b) - ((c * a) / (b ** 3.0d0)))
    end function
    
    public static double code(double a, double b, double c) {
    	return c * ((-1.0 / b) - ((c * a) / Math.pow(b, 3.0)));
    }
    
    def code(a, b, c):
    	return c * ((-1.0 / b) - ((c * a) / math.pow(b, 3.0)))
    
    function code(a, b, c)
    	return Float64(c * Float64(Float64(-1.0 / b) - Float64(Float64(c * a) / (b ^ 3.0))))
    end
    
    function tmp = code(a, b, c)
    	tmp = c * ((-1.0 / b) - ((c * a) / (b ^ 3.0)));
    end
    
    code[a_, b_, c_] := N[(c * N[(N[(-1.0 / b), $MachinePrecision] - N[(N[(c * a), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right)
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in c around 0 90.1%

      \[\leadsto \color{blue}{c \cdot \left(-1 \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{b}\right)} \]
    6. Step-by-step derivation
      1. associate-*r/90.1%

        \[\leadsto c \cdot \left(\color{blue}{\frac{-1 \cdot \left(a \cdot c\right)}{{b}^{3}}} - \frac{1}{b}\right) \]
      2. neg-mul-190.1%

        \[\leadsto c \cdot \left(\frac{\color{blue}{-a \cdot c}}{{b}^{3}} - \frac{1}{b}\right) \]
      3. distribute-rgt-neg-in90.1%

        \[\leadsto c \cdot \left(\frac{\color{blue}{a \cdot \left(-c\right)}}{{b}^{3}} - \frac{1}{b}\right) \]
    7. Simplified90.1%

      \[\leadsto \color{blue}{c \cdot \left(\frac{a \cdot \left(-c\right)}{{b}^{3}} - \frac{1}{b}\right)} \]
    8. Final simplification90.1%

      \[\leadsto c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right) \]
    9. Add Preprocessing

    Alternative 9: 81.3% accurate, 29.0× speedup?

    \[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
    (FPCore (a b c) :precision binary64 (/ c (- b)))
    double code(double a, double b, double c) {
    	return c / -b;
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = c / -b
    end function
    
    public static double code(double a, double b, double c) {
    	return c / -b;
    }
    
    def code(a, b, c):
    	return c / -b
    
    function code(a, b, c)
    	return Float64(c / Float64(-b))
    end
    
    function tmp = code(a, b, c)
    	tmp = c / -b;
    end
    
    code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{c}{-b}
    \end{array}
    
    Derivation
    1. Initial program 33.1%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative33.1%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified33.1%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in b around inf 80.2%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
    6. Step-by-step derivation
      1. associate-*r/80.2%

        \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} \]
      2. mul-1-neg80.2%

        \[\leadsto \frac{\color{blue}{-c}}{b} \]
    7. Simplified80.2%

      \[\leadsto \color{blue}{\frac{-c}{b}} \]
    8. Final simplification80.2%

      \[\leadsto \frac{c}{-b} \]
    9. Add Preprocessing

    Reproduce

    ?
    herbie shell --seed 2024080 
    (FPCore (a b c)
      :name "Quadratic roots, medium range"
      :precision binary64
      :pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
      (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))