Quadratic roots, wide range

Percentage Accurate: 17.7% → 97.7%
Time: 14.1s
Alternatives: 9
Speedup: 29.0×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.7% accurate, 0.2× speedup?

\[\begin{array}{l} \\ a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   a
   (-
    (*
     a
     (+
      (* -2.0 (/ (pow c 3.0) (pow b 5.0)))
      (/ (* -5.0 (* a (pow c 4.0))) (pow b 7.0))))
    (/ (pow c 2.0) (pow b 3.0))))
  (/ c b)))
double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (pow(c, 3.0) / pow(b, 5.0))) + ((-5.0 * (a * pow(c, 4.0))) / pow(b, 7.0)))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (a * ((a * (((-2.0d0) * ((c ** 3.0d0) / (b ** 5.0d0))) + (((-5.0d0) * (a * (c ** 4.0d0))) / (b ** 7.0d0)))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))) + ((-5.0 * (a * Math.pow(c, 4.0))) / Math.pow(b, 7.0)))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c):
	return (a * ((a * ((-2.0 * (math.pow(c, 3.0) / math.pow(b, 5.0))) + ((-5.0 * (a * math.pow(c, 4.0))) / math.pow(b, 7.0)))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c)
	return Float64(Float64(a * Float64(Float64(a * Float64(Float64(-2.0 * Float64((c ^ 3.0) / (b ^ 5.0))) + Float64(Float64(-5.0 * Float64(a * (c ^ 4.0))) / (b ^ 7.0)))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = (a * ((a * ((-2.0 * ((c ^ 3.0) / (b ^ 5.0))) + ((-5.0 * (a * (c ^ 4.0))) / (b ^ 7.0)))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b);
end
code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-5.0 * N[(a * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 20.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative20.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified20.7%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 96.8%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 96.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{-5 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}}\right)\right) \]
  7. Step-by-step derivation
    1. associate-*r/96.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{\frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}}\right)\right) \]
    2. *-commutative96.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \color{blue}{\left({c}^{4} \cdot a\right)}}{{b}^{7}}\right)\right) \]
  8. Simplified96.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{\frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}}\right)\right) \]
  9. Step-by-step derivation
    1. associate-*r/96.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{-1 \cdot {c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  10. Applied egg-rr96.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{-1 \cdot {c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  11. Step-by-step derivation
    1. mul-1-neg96.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\frac{\color{blue}{-{c}^{2}}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  12. Simplified96.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{-{c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  13. Step-by-step derivation
    1. mul-1-neg96.8%

      \[\leadsto \color{blue}{\left(-\frac{c}{b}\right)} + a \cdot \left(\frac{-{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  14. Applied egg-rr96.8%

    \[\leadsto \color{blue}{\left(-\frac{c}{b}\right)} + a \cdot \left(\frac{-{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left({c}^{4} \cdot a\right)}{{b}^{7}}\right)\right) \]
  15. Final simplification96.8%

    \[\leadsto a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
  16. Add Preprocessing

Alternative 2: 97.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ c \cdot \left(c \cdot \left(a \cdot \left(a \cdot \left(-5 \cdot \frac{a \cdot {c}^{2}}{{b}^{7}} + -2 \cdot \frac{c}{{b}^{5}}\right) + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (+
   (*
    c
    (*
     a
     (+
      (*
       a
       (+
        (* -5.0 (/ (* a (pow c 2.0)) (pow b 7.0)))
        (* -2.0 (/ c (pow b 5.0)))))
      (/ -1.0 (pow b 3.0)))))
   (/ -1.0 b))))
double code(double a, double b, double c) {
	return c * ((c * (a * ((a * ((-5.0 * ((a * pow(c, 2.0)) / pow(b, 7.0))) + (-2.0 * (c / pow(b, 5.0))))) + (-1.0 / pow(b, 3.0))))) + (-1.0 / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * ((c * (a * ((a * (((-5.0d0) * ((a * (c ** 2.0d0)) / (b ** 7.0d0))) + ((-2.0d0) * (c / (b ** 5.0d0))))) + ((-1.0d0) / (b ** 3.0d0))))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
	return c * ((c * (a * ((a * ((-5.0 * ((a * Math.pow(c, 2.0)) / Math.pow(b, 7.0))) + (-2.0 * (c / Math.pow(b, 5.0))))) + (-1.0 / Math.pow(b, 3.0))))) + (-1.0 / b));
}
def code(a, b, c):
	return c * ((c * (a * ((a * ((-5.0 * ((a * math.pow(c, 2.0)) / math.pow(b, 7.0))) + (-2.0 * (c / math.pow(b, 5.0))))) + (-1.0 / math.pow(b, 3.0))))) + (-1.0 / b))
function code(a, b, c)
	return Float64(c * Float64(Float64(c * Float64(a * Float64(Float64(a * Float64(Float64(-5.0 * Float64(Float64(a * (c ^ 2.0)) / (b ^ 7.0))) + Float64(-2.0 * Float64(c / (b ^ 5.0))))) + Float64(-1.0 / (b ^ 3.0))))) + Float64(-1.0 / b)))
end
function tmp = code(a, b, c)
	tmp = c * ((c * (a * ((a * ((-5.0 * ((a * (c ^ 2.0)) / (b ^ 7.0))) + (-2.0 * (c / (b ^ 5.0))))) + (-1.0 / (b ^ 3.0))))) + (-1.0 / b));
end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(a * N[(N[(a * N[(N[(-5.0 * N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(c / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(c \cdot \left(a \cdot \left(a \cdot \left(-5 \cdot \frac{a \cdot {c}^{2}}{{b}^{7}} + -2 \cdot \frac{c}{{b}^{5}}\right) + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right)
\end{array}
Derivation
  1. Initial program 20.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative20.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified20.7%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 96.4%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-1 \cdot \frac{a}{{b}^{3}} + c \cdot \left(-2 \cdot \frac{{a}^{2}}{{b}^{5}} + -0.25 \cdot \frac{c \cdot \left(4 \cdot \frac{{a}^{4}}{{b}^{6}} + 16 \cdot \frac{{a}^{4}}{{b}^{6}}\right)}{a \cdot b}\right)\right) - \frac{1}{b}\right)} \]
  6. Step-by-step derivation
    1. Simplified96.4%

      \[\leadsto \color{blue}{c \cdot \left(c \cdot \mathsf{fma}\left(-1, \frac{a}{{b}^{3}}, c \cdot \mathsf{fma}\left(-2, \frac{{a}^{2}}{{b}^{5}}, -0.25 \cdot \left(c \cdot \frac{\frac{{a}^{4}}{{b}^{6}} \cdot 20}{a \cdot b}\right)\right)\right) - \frac{1}{b}\right)} \]
    2. Taylor expanded in a around 0 96.4%

      \[\leadsto c \cdot \left(c \cdot \color{blue}{\left(a \cdot \left(a \cdot \left(-5 \cdot \frac{a \cdot {c}^{2}}{{b}^{7}} + -2 \cdot \frac{c}{{b}^{5}}\right) - \frac{1}{{b}^{3}}\right)\right)} - \frac{1}{b}\right) \]
    3. Final simplification96.4%

      \[\leadsto c \cdot \left(c \cdot \left(a \cdot \left(a \cdot \left(-5 \cdot \frac{a \cdot {c}^{2}}{{b}^{7}} + -2 \cdot \frac{c}{{b}^{5}}\right) + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right) \]
    4. Add Preprocessing

    Alternative 3: 96.9% accurate, 0.4× speedup?

    \[\begin{array}{l} \\ \frac{a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{4}}\right) - {\left(\frac{c}{-b}\right)}^{2}\right)}{b} - \frac{c}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (-
      (/
       (* a (- (* -2.0 (* a (/ (pow c 3.0) (pow b 4.0)))) (pow (/ c (- b)) 2.0)))
       b)
      (/ c b)))
    double code(double a, double b, double c) {
    	return ((a * ((-2.0 * (a * (pow(c, 3.0) / pow(b, 4.0)))) - pow((c / -b), 2.0))) / b) - (c / b);
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = ((a * (((-2.0d0) * (a * ((c ** 3.0d0) / (b ** 4.0d0)))) - ((c / -b) ** 2.0d0))) / b) - (c / b)
    end function
    
    public static double code(double a, double b, double c) {
    	return ((a * ((-2.0 * (a * (Math.pow(c, 3.0) / Math.pow(b, 4.0)))) - Math.pow((c / -b), 2.0))) / b) - (c / b);
    }
    
    def code(a, b, c):
    	return ((a * ((-2.0 * (a * (math.pow(c, 3.0) / math.pow(b, 4.0)))) - math.pow((c / -b), 2.0))) / b) - (c / b)
    
    function code(a, b, c)
    	return Float64(Float64(Float64(a * Float64(Float64(-2.0 * Float64(a * Float64((c ^ 3.0) / (b ^ 4.0)))) - (Float64(c / Float64(-b)) ^ 2.0))) / b) - Float64(c / b))
    end
    
    function tmp = code(a, b, c)
    	tmp = ((a * ((-2.0 * (a * ((c ^ 3.0) / (b ^ 4.0)))) - ((c / -b) ^ 2.0))) / b) - (c / b);
    end
    
    code[a_, b_, c_] := N[(N[(N[(a * N[(N[(-2.0 * N[(a * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Power[N[(c / (-b)), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{4}}\right) - {\left(\frac{c}{-b}\right)}^{2}\right)}{b} - \frac{c}{b}
    \end{array}
    
    Derivation
    1. Initial program 20.7%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative20.7%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified20.7%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in b around inf 95.6%

      \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
    6. Taylor expanded in a around 0 95.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot c + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}}{b} \]
    7. Step-by-step derivation
      1. neg-mul-195.6%

        \[\leadsto \frac{\color{blue}{\left(-c\right)} + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}{b} \]
      2. +-commutative95.6%

        \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) + \left(-c\right)}}{b} \]
      3. unsub-neg95.6%

        \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) - c}}{b} \]
      4. mul-1-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + \color{blue}{\left(-\frac{{c}^{2}}{{b}^{2}}\right)}\right) - c}{b} \]
      5. unsub-neg95.6%

        \[\leadsto \frac{a \cdot \color{blue}{\left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{{c}^{2}}{{b}^{2}}\right)} - c}{b} \]
      6. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{\color{blue}{c \cdot c}}{{b}^{2}}\right) - c}{b} \]
      7. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c \cdot c}{\color{blue}{b \cdot b}}\right) - c}{b} \]
      8. times-frac95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{c}{b} \cdot \frac{c}{b}}\right) - c}{b} \]
      9. sqr-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)}\right) - c}{b} \]
      10. distribute-frac-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right)\right) - c}{b} \]
      11. distribute-frac-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}\right) - c}{b} \]
      12. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}\right) - c}{b} \]
    8. Simplified95.6%

      \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - {\left(\frac{-c}{b}\right)}^{2}\right) - c}}{b} \]
    9. Step-by-step derivation
      1. div-sub95.6%

        \[\leadsto \color{blue}{\frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - {\left(\frac{-c}{b}\right)}^{2}\right)}{b} - \frac{c}{b}} \]
      2. *-commutative95.6%

        \[\leadsto \frac{a \cdot \left(\color{blue}{\frac{a \cdot {c}^{3}}{{b}^{4}} \cdot -2} - {\left(\frac{-c}{b}\right)}^{2}\right)}{b} - \frac{c}{b} \]
      3. associate-/l*95.6%

        \[\leadsto \frac{a \cdot \left(\color{blue}{\left(a \cdot \frac{{c}^{3}}{{b}^{4}}\right)} \cdot -2 - {\left(\frac{-c}{b}\right)}^{2}\right)}{b} - \frac{c}{b} \]
    10. Applied egg-rr95.6%

      \[\leadsto \color{blue}{\frac{a \cdot \left(\left(a \cdot \frac{{c}^{3}}{{b}^{4}}\right) \cdot -2 - {\left(\frac{-c}{b}\right)}^{2}\right)}{b} - \frac{c}{b}} \]
    11. Final simplification95.6%

      \[\leadsto \frac{a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{4}}\right) - {\left(\frac{c}{-b}\right)}^{2}\right)}{b} - \frac{c}{b} \]
    12. Add Preprocessing

    Alternative 4: 96.9% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c}{b} \cdot \frac{c}{b}\right) - c}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (/
      (-
       (* a (- (* -2.0 (/ (* a (pow c 3.0)) (pow b 4.0))) (* (/ c b) (/ c b))))
       c)
      b))
    double code(double a, double b, double c) {
    	return ((a * ((-2.0 * ((a * pow(c, 3.0)) / pow(b, 4.0))) - ((c / b) * (c / b)))) - c) / b;
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = ((a * (((-2.0d0) * ((a * (c ** 3.0d0)) / (b ** 4.0d0))) - ((c / b) * (c / b)))) - c) / b
    end function
    
    public static double code(double a, double b, double c) {
    	return ((a * ((-2.0 * ((a * Math.pow(c, 3.0)) / Math.pow(b, 4.0))) - ((c / b) * (c / b)))) - c) / b;
    }
    
    def code(a, b, c):
    	return ((a * ((-2.0 * ((a * math.pow(c, 3.0)) / math.pow(b, 4.0))) - ((c / b) * (c / b)))) - c) / b
    
    function code(a, b, c)
    	return Float64(Float64(Float64(a * Float64(Float64(-2.0 * Float64(Float64(a * (c ^ 3.0)) / (b ^ 4.0))) - Float64(Float64(c / b) * Float64(c / b)))) - c) / b)
    end
    
    function tmp = code(a, b, c)
    	tmp = ((a * ((-2.0 * ((a * (c ^ 3.0)) / (b ^ 4.0))) - ((c / b) * (c / b)))) - c) / b;
    end
    
    code[a_, b_, c_] := N[(N[(N[(a * N[(N[(-2.0 * N[(N[(a * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c / b), $MachinePrecision] * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c}{b} \cdot \frac{c}{b}\right) - c}{b}
    \end{array}
    
    Derivation
    1. Initial program 20.7%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative20.7%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified20.7%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in b around inf 95.6%

      \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
    6. Taylor expanded in a around 0 95.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot c + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}}{b} \]
    7. Step-by-step derivation
      1. neg-mul-195.6%

        \[\leadsto \frac{\color{blue}{\left(-c\right)} + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}{b} \]
      2. +-commutative95.6%

        \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) + \left(-c\right)}}{b} \]
      3. unsub-neg95.6%

        \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) - c}}{b} \]
      4. mul-1-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + \color{blue}{\left(-\frac{{c}^{2}}{{b}^{2}}\right)}\right) - c}{b} \]
      5. unsub-neg95.6%

        \[\leadsto \frac{a \cdot \color{blue}{\left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{{c}^{2}}{{b}^{2}}\right)} - c}{b} \]
      6. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{\color{blue}{c \cdot c}}{{b}^{2}}\right) - c}{b} \]
      7. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c \cdot c}{\color{blue}{b \cdot b}}\right) - c}{b} \]
      8. times-frac95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{c}{b} \cdot \frac{c}{b}}\right) - c}{b} \]
      9. sqr-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)}\right) - c}{b} \]
      10. distribute-frac-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right)\right) - c}{b} \]
      11. distribute-frac-neg95.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}\right) - c}{b} \]
      12. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}\right) - c}{b} \]
    8. Simplified95.6%

      \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - {\left(\frac{-c}{b}\right)}^{2}\right) - c}}{b} \]
    9. Step-by-step derivation
      1. unpow295.6%

        \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{-c}{b} \cdot \frac{-c}{b}}\right) - c}{b} \]
    10. Applied egg-rr95.6%

      \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{-c}{b} \cdot \frac{-c}{b}}\right) - c}{b} \]
    11. Final simplification95.6%

      \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c}{b} \cdot \frac{c}{b}\right) - c}{b} \]
    12. Add Preprocessing

    Alternative 5: 96.6% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ c \cdot \left(c \cdot \left(a \cdot \left(-2 \cdot \frac{c \cdot a}{{b}^{5}} + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right) \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (*
      c
      (+
       (* c (* a (+ (* -2.0 (/ (* c a) (pow b 5.0))) (/ -1.0 (pow b 3.0)))))
       (/ -1.0 b))))
    double code(double a, double b, double c) {
    	return c * ((c * (a * ((-2.0 * ((c * a) / pow(b, 5.0))) + (-1.0 / pow(b, 3.0))))) + (-1.0 / b));
    }
    
    real(8) function code(a, b, c)
        real(8), intent (in) :: a
        real(8), intent (in) :: b
        real(8), intent (in) :: c
        code = c * ((c * (a * (((-2.0d0) * ((c * a) / (b ** 5.0d0))) + ((-1.0d0) / (b ** 3.0d0))))) + ((-1.0d0) / b))
    end function
    
    public static double code(double a, double b, double c) {
    	return c * ((c * (a * ((-2.0 * ((c * a) / Math.pow(b, 5.0))) + (-1.0 / Math.pow(b, 3.0))))) + (-1.0 / b));
    }
    
    def code(a, b, c):
    	return c * ((c * (a * ((-2.0 * ((c * a) / math.pow(b, 5.0))) + (-1.0 / math.pow(b, 3.0))))) + (-1.0 / b))
    
    function code(a, b, c)
    	return Float64(c * Float64(Float64(c * Float64(a * Float64(Float64(-2.0 * Float64(Float64(c * a) / (b ^ 5.0))) + Float64(-1.0 / (b ^ 3.0))))) + Float64(-1.0 / b)))
    end
    
    function tmp = code(a, b, c)
    	tmp = c * ((c * (a * ((-2.0 * ((c * a) / (b ^ 5.0))) + (-1.0 / (b ^ 3.0))))) + (-1.0 / b));
    end
    
    code[a_, b_, c_] := N[(c * N[(N[(c * N[(a * N[(N[(-2.0 * N[(N[(c * a), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    c \cdot \left(c \cdot \left(a \cdot \left(-2 \cdot \frac{c \cdot a}{{b}^{5}} + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right)
    \end{array}
    
    Derivation
    1. Initial program 20.7%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
    2. Step-by-step derivation
      1. *-commutative20.7%

        \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
    3. Simplified20.7%

      \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
    4. Add Preprocessing
    5. Taylor expanded in c around 0 96.4%

      \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-1 \cdot \frac{a}{{b}^{3}} + c \cdot \left(-2 \cdot \frac{{a}^{2}}{{b}^{5}} + -0.25 \cdot \frac{c \cdot \left(4 \cdot \frac{{a}^{4}}{{b}^{6}} + 16 \cdot \frac{{a}^{4}}{{b}^{6}}\right)}{a \cdot b}\right)\right) - \frac{1}{b}\right)} \]
    6. Step-by-step derivation
      1. Simplified96.4%

        \[\leadsto \color{blue}{c \cdot \left(c \cdot \mathsf{fma}\left(-1, \frac{a}{{b}^{3}}, c \cdot \mathsf{fma}\left(-2, \frac{{a}^{2}}{{b}^{5}}, -0.25 \cdot \left(c \cdot \frac{\frac{{a}^{4}}{{b}^{6}} \cdot 20}{a \cdot b}\right)\right)\right) - \frac{1}{b}\right)} \]
      2. Taylor expanded in a around 0 95.3%

        \[\leadsto c \cdot \left(c \cdot \color{blue}{\left(a \cdot \left(-2 \cdot \frac{a \cdot c}{{b}^{5}} - \frac{1}{{b}^{3}}\right)\right)} - \frac{1}{b}\right) \]
      3. Step-by-step derivation
        1. *-commutative95.3%

          \[\leadsto c \cdot \left(c \cdot \left(a \cdot \left(-2 \cdot \frac{\color{blue}{c \cdot a}}{{b}^{5}} - \frac{1}{{b}^{3}}\right)\right) - \frac{1}{b}\right) \]
      4. Simplified95.3%

        \[\leadsto c \cdot \left(c \cdot \color{blue}{\left(a \cdot \left(-2 \cdot \frac{c \cdot a}{{b}^{5}} - \frac{1}{{b}^{3}}\right)\right)} - \frac{1}{b}\right) \]
      5. Final simplification95.3%

        \[\leadsto c \cdot \left(c \cdot \left(a \cdot \left(-2 \cdot \frac{c \cdot a}{{b}^{5}} + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right) \]
      6. Add Preprocessing

      Alternative 6: 95.3% accurate, 0.5× speedup?

      \[\begin{array}{l} \\ a \cdot \frac{{c}^{2}}{-{b}^{3}} - \frac{c}{b} \end{array} \]
      (FPCore (a b c)
       :precision binary64
       (- (* a (/ (pow c 2.0) (- (pow b 3.0)))) (/ c b)))
      double code(double a, double b, double c) {
      	return (a * (pow(c, 2.0) / -pow(b, 3.0))) - (c / b);
      }
      
      real(8) function code(a, b, c)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          code = (a * ((c ** 2.0d0) / -(b ** 3.0d0))) - (c / b)
      end function
      
      public static double code(double a, double b, double c) {
      	return (a * (Math.pow(c, 2.0) / -Math.pow(b, 3.0))) - (c / b);
      }
      
      def code(a, b, c):
      	return (a * (math.pow(c, 2.0) / -math.pow(b, 3.0))) - (c / b)
      
      function code(a, b, c)
      	return Float64(Float64(a * Float64((c ^ 2.0) / Float64(-(b ^ 3.0)))) - Float64(c / b))
      end
      
      function tmp = code(a, b, c)
      	tmp = (a * ((c ^ 2.0) / -(b ^ 3.0))) - (c / b);
      end
      
      code[a_, b_, c_] := N[(N[(a * N[(N[Power[c, 2.0], $MachinePrecision] / (-N[Power[b, 3.0], $MachinePrecision])), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      a \cdot \frac{{c}^{2}}{-{b}^{3}} - \frac{c}{b}
      \end{array}
      
      Derivation
      1. Initial program 20.7%

        \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
      2. Step-by-step derivation
        1. *-commutative20.7%

          \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
      3. Simplified20.7%

        \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
      4. Add Preprocessing
      5. Taylor expanded in a around 0 93.7%

        \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
      6. Step-by-step derivation
        1. mul-1-neg93.7%

          \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
        2. unsub-neg93.7%

          \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
        3. mul-1-neg93.7%

          \[\leadsto \color{blue}{\left(-\frac{c}{b}\right)} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
        4. distribute-neg-frac293.7%

          \[\leadsto \color{blue}{\frac{c}{-b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
        5. associate-/l*93.7%

          \[\leadsto \frac{c}{-b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
      7. Simplified93.7%

        \[\leadsto \color{blue}{\frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
      8. Final simplification93.7%

        \[\leadsto a \cdot \frac{{c}^{2}}{-{b}^{3}} - \frac{c}{b} \]
      9. Add Preprocessing

      Alternative 7: 95.3% accurate, 1.0× speedup?

      \[\begin{array}{l} \\ \frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b} \end{array} \]
      (FPCore (a b c) :precision binary64 (/ (+ c (* a (pow (/ c b) 2.0))) (- b)))
      double code(double a, double b, double c) {
      	return (c + (a * pow((c / b), 2.0))) / -b;
      }
      
      real(8) function code(a, b, c)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          code = (c + (a * ((c / b) ** 2.0d0))) / -b
      end function
      
      public static double code(double a, double b, double c) {
      	return (c + (a * Math.pow((c / b), 2.0))) / -b;
      }
      
      def code(a, b, c):
      	return (c + (a * math.pow((c / b), 2.0))) / -b
      
      function code(a, b, c)
      	return Float64(Float64(c + Float64(a * (Float64(c / b) ^ 2.0))) / Float64(-b))
      end
      
      function tmp = code(a, b, c)
      	tmp = (c + (a * ((c / b) ^ 2.0))) / -b;
      end
      
      code[a_, b_, c_] := N[(N[(c + N[(a * N[Power[N[(c / b), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / (-b)), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b}
      \end{array}
      
      Derivation
      1. Initial program 20.7%

        \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
      2. Step-by-step derivation
        1. *-commutative20.7%

          \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
      3. Simplified20.7%

        \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
      4. Add Preprocessing
      5. Taylor expanded in b around inf 95.6%

        \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
      6. Taylor expanded in a around 0 95.6%

        \[\leadsto \frac{\color{blue}{-1 \cdot c + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}}{b} \]
      7. Step-by-step derivation
        1. neg-mul-195.6%

          \[\leadsto \frac{\color{blue}{\left(-c\right)} + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right)}{b} \]
        2. +-commutative95.6%

          \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) + \left(-c\right)}}{b} \]
        3. unsub-neg95.6%

          \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}\right) - c}}{b} \]
        4. mul-1-neg95.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} + \color{blue}{\left(-\frac{{c}^{2}}{{b}^{2}}\right)}\right) - c}{b} \]
        5. unsub-neg95.6%

          \[\leadsto \frac{a \cdot \color{blue}{\left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{{c}^{2}}{{b}^{2}}\right)} - c}{b} \]
        6. unpow295.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{\color{blue}{c \cdot c}}{{b}^{2}}\right) - c}{b} \]
        7. unpow295.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{c \cdot c}{\color{blue}{b \cdot b}}\right) - c}{b} \]
        8. times-frac95.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{c}{b} \cdot \frac{c}{b}}\right) - c}{b} \]
        9. sqr-neg95.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)}\right) - c}{b} \]
        10. distribute-frac-neg95.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right)\right) - c}{b} \]
        11. distribute-frac-neg95.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}\right) - c}{b} \]
        12. unpow295.6%

          \[\leadsto \frac{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}\right) - c}{b} \]
      8. Simplified95.6%

        \[\leadsto \frac{\color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{4}} - {\left(\frac{-c}{b}\right)}^{2}\right) - c}}{b} \]
      9. Taylor expanded in a around 0 93.7%

        \[\leadsto \frac{\color{blue}{-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}} - c}{b} \]
      10. Step-by-step derivation
        1. mul-1-neg93.7%

          \[\leadsto \frac{\color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{2}}\right)} - c}{b} \]
        2. associate-/l*93.7%

          \[\leadsto \frac{\left(-\color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}\right) - c}{b} \]
        3. unpow293.7%

          \[\leadsto \frac{\left(-a \cdot \frac{\color{blue}{c \cdot c}}{{b}^{2}}\right) - c}{b} \]
        4. unpow293.7%

          \[\leadsto \frac{\left(-a \cdot \frac{c \cdot c}{\color{blue}{b \cdot b}}\right) - c}{b} \]
        5. times-frac93.7%

          \[\leadsto \frac{\left(-a \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}\right) - c}{b} \]
        6. sqr-neg93.7%

          \[\leadsto \frac{\left(-a \cdot \color{blue}{\left(\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)\right)}\right) - c}{b} \]
        7. distribute-frac-neg93.7%

          \[\leadsto \frac{\left(-a \cdot \left(\color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right)\right)\right) - c}{b} \]
        8. distribute-frac-neg93.7%

          \[\leadsto \frac{\left(-a \cdot \left(\frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}\right)\right) - c}{b} \]
        9. unpow293.7%

          \[\leadsto \frac{\left(-a \cdot \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}\right) - c}{b} \]
        10. distribute-lft-neg-in93.7%

          \[\leadsto \frac{\color{blue}{\left(-a\right) \cdot {\left(\frac{-c}{b}\right)}^{2}} - c}{b} \]
        11. unpow293.7%

          \[\leadsto \frac{\left(-a\right) \cdot \color{blue}{\left(\frac{-c}{b} \cdot \frac{-c}{b}\right)} - c}{b} \]
        12. distribute-frac-neg93.7%

          \[\leadsto \frac{\left(-a\right) \cdot \left(\color{blue}{\left(-\frac{c}{b}\right)} \cdot \frac{-c}{b}\right) - c}{b} \]
        13. distribute-frac-neg93.7%

          \[\leadsto \frac{\left(-a\right) \cdot \left(\left(-\frac{c}{b}\right) \cdot \color{blue}{\left(-\frac{c}{b}\right)}\right) - c}{b} \]
        14. sqr-neg93.7%

          \[\leadsto \frac{\left(-a\right) \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)} - c}{b} \]
        15. unpow293.7%

          \[\leadsto \frac{\left(-a\right) \cdot \color{blue}{{\left(\frac{c}{b}\right)}^{2}} - c}{b} \]
      11. Simplified93.7%

        \[\leadsto \frac{\color{blue}{\left(-a\right) \cdot {\left(\frac{c}{b}\right)}^{2}} - c}{b} \]
      12. Final simplification93.7%

        \[\leadsto \frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b} \]
      13. Add Preprocessing

      Alternative 8: 90.5% accurate, 29.0× speedup?

      \[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
      (FPCore (a b c) :precision binary64 (/ c (- b)))
      double code(double a, double b, double c) {
      	return c / -b;
      }
      
      real(8) function code(a, b, c)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          code = c / -b
      end function
      
      public static double code(double a, double b, double c) {
      	return c / -b;
      }
      
      def code(a, b, c):
      	return c / -b
      
      function code(a, b, c)
      	return Float64(c / Float64(-b))
      end
      
      function tmp = code(a, b, c)
      	tmp = c / -b;
      end
      
      code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \frac{c}{-b}
      \end{array}
      
      Derivation
      1. Initial program 20.7%

        \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
      2. Step-by-step derivation
        1. *-commutative20.7%

          \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
      3. Simplified20.7%

        \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
      4. Add Preprocessing
      5. Taylor expanded in b around inf 88.3%

        \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
      6. Step-by-step derivation
        1. associate-*r/88.3%

          \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} \]
        2. mul-1-neg88.3%

          \[\leadsto \frac{\color{blue}{-c}}{b} \]
      7. Simplified88.3%

        \[\leadsto \color{blue}{\frac{-c}{b}} \]
      8. Final simplification88.3%

        \[\leadsto \frac{c}{-b} \]
      9. Add Preprocessing

      Alternative 9: 1.7% accurate, 38.7× speedup?

      \[\begin{array}{l} \\ \frac{c}{b} \end{array} \]
      (FPCore (a b c) :precision binary64 (/ c b))
      double code(double a, double b, double c) {
      	return c / b;
      }
      
      real(8) function code(a, b, c)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          code = c / b
      end function
      
      public static double code(double a, double b, double c) {
      	return c / b;
      }
      
      def code(a, b, c):
      	return c / b
      
      function code(a, b, c)
      	return Float64(c / b)
      end
      
      function tmp = code(a, b, c)
      	tmp = c / b;
      end
      
      code[a_, b_, c_] := N[(c / b), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \frac{c}{b}
      \end{array}
      
      Derivation
      1. Initial program 20.7%

        \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
      2. Step-by-step derivation
        1. *-commutative20.7%

          \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
      3. Simplified20.7%

        \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
      4. Add Preprocessing
      5. Taylor expanded in b around -inf 8.9%

        \[\leadsto \color{blue}{-1 \cdot \left(b \cdot \left(-1 \cdot \frac{c}{{b}^{2}} + \frac{1}{a}\right)\right)} \]
      6. Step-by-step derivation
        1. mul-1-neg8.9%

          \[\leadsto \color{blue}{-b \cdot \left(-1 \cdot \frac{c}{{b}^{2}} + \frac{1}{a}\right)} \]
        2. *-commutative8.9%

          \[\leadsto -\color{blue}{\left(-1 \cdot \frac{c}{{b}^{2}} + \frac{1}{a}\right) \cdot b} \]
        3. distribute-rgt-neg-in8.9%

          \[\leadsto \color{blue}{\left(-1 \cdot \frac{c}{{b}^{2}} + \frac{1}{a}\right) \cdot \left(-b\right)} \]
        4. +-commutative8.9%

          \[\leadsto \color{blue}{\left(\frac{1}{a} + -1 \cdot \frac{c}{{b}^{2}}\right)} \cdot \left(-b\right) \]
        5. mul-1-neg8.9%

          \[\leadsto \left(\frac{1}{a} + \color{blue}{\left(-\frac{c}{{b}^{2}}\right)}\right) \cdot \left(-b\right) \]
        6. unsub-neg8.9%

          \[\leadsto \color{blue}{\left(\frac{1}{a} - \frac{c}{{b}^{2}}\right)} \cdot \left(-b\right) \]
      7. Simplified8.9%

        \[\leadsto \color{blue}{\left(\frac{1}{a} - \frac{c}{{b}^{2}}\right) \cdot \left(-b\right)} \]
      8. Taylor expanded in a around inf 1.7%

        \[\leadsto \color{blue}{\frac{c}{b}} \]
      9. Add Preprocessing

      Reproduce

      ?
      herbie shell --seed 2024131 
      (FPCore (a b c)
        :name "Quadratic roots, wide range"
        :precision binary64
        :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
        (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))