Quadratic roots, wide range

Percentage Accurate: 18.2% → 97.8%
Time: 13.8s
Alternatives: 8
Speedup: 29.0×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 18.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.8% accurate, 0.2× speedup?

\[\begin{array}{l} \\ a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(a \cdot -5\right) \cdot {c}^{4}}{{b}^{7}}\right) - \frac{c \cdot c}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   a
   (-
    (*
     a
     (+
      (* -2.0 (/ (pow c 3.0) (pow b 5.0)))
      (/ (* (* a -5.0) (pow c 4.0)) (pow b 7.0))))
    (/ (* c c) (pow b 3.0))))
  (/ c b)))
double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (pow(c, 3.0) / pow(b, 5.0))) + (((a * -5.0) * pow(c, 4.0)) / pow(b, 7.0)))) - ((c * c) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (a * ((a * (((-2.0d0) * ((c ** 3.0d0) / (b ** 5.0d0))) + (((a * (-5.0d0)) * (c ** 4.0d0)) / (b ** 7.0d0)))) - ((c * c) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))) + (((a * -5.0) * Math.pow(c, 4.0)) / Math.pow(b, 7.0)))) - ((c * c) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c):
	return (a * ((a * ((-2.0 * (math.pow(c, 3.0) / math.pow(b, 5.0))) + (((a * -5.0) * math.pow(c, 4.0)) / math.pow(b, 7.0)))) - ((c * c) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c)
	return Float64(Float64(a * Float64(Float64(a * Float64(Float64(-2.0 * Float64((c ^ 3.0) / (b ^ 5.0))) + Float64(Float64(Float64(a * -5.0) * (c ^ 4.0)) / (b ^ 7.0)))) - Float64(Float64(c * c) / (b ^ 3.0)))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = (a * ((a * ((-2.0 * ((c ^ 3.0) / (b ^ 5.0))) + (((a * -5.0) * (c ^ 4.0)) / (b ^ 7.0)))) - ((c * c) / (b ^ 3.0)))) - (c / b);
end
code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * -5.0), $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(a \cdot -5\right) \cdot {c}^{4}}{{b}^{7}}\right) - \frac{c \cdot c}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 98.4%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 98.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{-5 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}}\right)\right) \]
  7. Step-by-step derivation
    1. associate-*r/98.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{\frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}}\right)\right) \]
    2. associate-*r*98.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\color{blue}{\left(-5 \cdot a\right) \cdot {c}^{4}}}{{b}^{7}}\right)\right) \]
  8. Simplified98.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{\frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}}\right)\right) \]
  9. Step-by-step derivation
    1. associate-*r/98.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{-1 \cdot {c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  10. Applied egg-rr98.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{-1 \cdot {c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  11. Step-by-step derivation
    1. associate-*r/98.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{-1 \cdot \frac{{c}^{2}}{{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
    2. mul-1-neg98.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\left(-\frac{{c}^{2}}{{b}^{3}}\right)} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
    3. distribute-neg-frac298.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{{c}^{2}}{-{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  12. Simplified98.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\color{blue}{\frac{{c}^{2}}{-{b}^{3}}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  13. Step-by-step derivation
    1. unpow298.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\frac{\color{blue}{c \cdot c}}{-{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  14. Applied egg-rr98.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(\frac{\color{blue}{c \cdot c}}{-{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(-5 \cdot a\right) \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  15. Final simplification98.4%

    \[\leadsto a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{\left(a \cdot -5\right) \cdot {c}^{4}}{{b}^{7}}\right) - \frac{c \cdot c}{{b}^{3}}\right) - \frac{c}{b} \]
  16. Add Preprocessing

Alternative 2: 97.0% accurate, 0.2× speedup?

\[\begin{array}{l} \\ \frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (-
   (* -2.0 (/ (* (pow c 3.0) (pow a 2.0)) (pow b 4.0)))
   (+ c (/ (* a (pow c 2.0)) (pow b 2.0))))
  b))
double code(double a, double b, double c) {
	return ((-2.0 * ((pow(c, 3.0) * pow(a, 2.0)) / pow(b, 4.0))) - (c + ((a * pow(c, 2.0)) / pow(b, 2.0)))) / b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (((-2.0d0) * (((c ** 3.0d0) * (a ** 2.0d0)) / (b ** 4.0d0))) - (c + ((a * (c ** 2.0d0)) / (b ** 2.0d0)))) / b
end function
public static double code(double a, double b, double c) {
	return ((-2.0 * ((Math.pow(c, 3.0) * Math.pow(a, 2.0)) / Math.pow(b, 4.0))) - (c + ((a * Math.pow(c, 2.0)) / Math.pow(b, 2.0)))) / b;
}
def code(a, b, c):
	return ((-2.0 * ((math.pow(c, 3.0) * math.pow(a, 2.0)) / math.pow(b, 4.0))) - (c + ((a * math.pow(c, 2.0)) / math.pow(b, 2.0)))) / b
function code(a, b, c)
	return Float64(Float64(Float64(-2.0 * Float64(Float64((c ^ 3.0) * (a ^ 2.0)) / (b ^ 4.0))) - Float64(c + Float64(Float64(a * (c ^ 2.0)) / (b ^ 2.0)))) / b)
end
function tmp = code(a, b, c)
	tmp = ((-2.0 * (((c ^ 3.0) * (a ^ 2.0)) / (b ^ 4.0))) - (c + ((a * (c ^ 2.0)) / (b ^ 2.0)))) / b;
end
code[a_, b_, c_] := N[(N[(N[(-2.0 * N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c + N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 97.8%

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  6. Final simplification97.8%

    \[\leadsto \frac{-2 \cdot \frac{{c}^{3} \cdot {a}^{2}}{{b}^{4}} - \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b} \]
  7. Add Preprocessing

Alternative 3: 97.0% accurate, 0.4× speedup?

\[\begin{array}{l} \\ a \cdot \left({c}^{3} \cdot \left(\frac{a \cdot -2}{{b}^{5}} + \frac{-1}{c \cdot {b}^{3}}\right)\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   a
   (* (pow c 3.0) (+ (/ (* a -2.0) (pow b 5.0)) (/ -1.0 (* c (pow b 3.0))))))
  (/ c b)))
double code(double a, double b, double c) {
	return (a * (pow(c, 3.0) * (((a * -2.0) / pow(b, 5.0)) + (-1.0 / (c * pow(b, 3.0)))))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (a * ((c ** 3.0d0) * (((a * (-2.0d0)) / (b ** 5.0d0)) + ((-1.0d0) / (c * (b ** 3.0d0)))))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (a * (Math.pow(c, 3.0) * (((a * -2.0) / Math.pow(b, 5.0)) + (-1.0 / (c * Math.pow(b, 3.0)))))) - (c / b);
}
def code(a, b, c):
	return (a * (math.pow(c, 3.0) * (((a * -2.0) / math.pow(b, 5.0)) + (-1.0 / (c * math.pow(b, 3.0)))))) - (c / b)
function code(a, b, c)
	return Float64(Float64(a * Float64((c ^ 3.0) * Float64(Float64(Float64(a * -2.0) / (b ^ 5.0)) + Float64(-1.0 / Float64(c * (b ^ 3.0)))))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = (a * ((c ^ 3.0) * (((a * -2.0) / (b ^ 5.0)) + (-1.0 / (c * (b ^ 3.0)))))) - (c / b);
end
code[a_, b_, c_] := N[(N[(a * N[(N[Power[c, 3.0], $MachinePrecision] * N[(N[(N[(a * -2.0), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[(c * N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \left({c}^{3} \cdot \left(\frac{a \cdot -2}{{b}^{5}} + \frac{-1}{c \cdot {b}^{3}}\right)\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 97.8%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right)} \]
  6. Taylor expanded in c around inf 97.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \color{blue}{\left({c}^{3} \cdot \left(-2 \cdot \frac{a}{{b}^{5}} - \frac{1}{{b}^{3} \cdot c}\right)\right)} \]
  7. Step-by-step derivation
    1. associate-*r/97.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left({c}^{3} \cdot \left(\color{blue}{\frac{-2 \cdot a}{{b}^{5}}} - \frac{1}{{b}^{3} \cdot c}\right)\right) \]
    2. *-commutative97.8%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left({c}^{3} \cdot \left(\frac{-2 \cdot a}{{b}^{5}} - \frac{1}{\color{blue}{c \cdot {b}^{3}}}\right)\right) \]
  8. Simplified97.8%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \color{blue}{\left({c}^{3} \cdot \left(\frac{-2 \cdot a}{{b}^{5}} - \frac{1}{c \cdot {b}^{3}}\right)\right)} \]
  9. Final simplification97.8%

    \[\leadsto a \cdot \left({c}^{3} \cdot \left(\frac{a \cdot -2}{{b}^{5}} + \frac{-1}{c \cdot {b}^{3}}\right)\right) - \frac{c}{b} \]
  10. Add Preprocessing

Alternative 4: 96.7% accurate, 0.4× speedup?

\[\begin{array}{l} \\ c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (+
   (* c (- (* -2.0 (/ (* c (pow a 2.0)) (pow b 5.0))) (/ a (pow b 3.0))))
   (/ -1.0 b))))
double code(double a, double b, double c) {
	return c * ((c * ((-2.0 * ((c * pow(a, 2.0)) / pow(b, 5.0))) - (a / pow(b, 3.0)))) + (-1.0 / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * ((c * (((-2.0d0) * ((c * (a ** 2.0d0)) / (b ** 5.0d0))) - (a / (b ** 3.0d0)))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
	return c * ((c * ((-2.0 * ((c * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) - (a / Math.pow(b, 3.0)))) + (-1.0 / b));
}
def code(a, b, c):
	return c * ((c * ((-2.0 * ((c * math.pow(a, 2.0)) / math.pow(b, 5.0))) - (a / math.pow(b, 3.0)))) + (-1.0 / b))
function code(a, b, c)
	return Float64(c * Float64(Float64(c * Float64(Float64(-2.0 * Float64(Float64(c * (a ^ 2.0)) / (b ^ 5.0))) - Float64(a / (b ^ 3.0)))) + Float64(-1.0 / b)))
end
function tmp = code(a, b, c)
	tmp = c * ((c * ((-2.0 * ((c * (a ^ 2.0)) / (b ^ 5.0))) - (a / (b ^ 3.0)))) + (-1.0 / b));
end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(-2.0 * N[(N[(c * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right)
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 97.5%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
  6. Final simplification97.5%

    \[\leadsto c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \]
  7. Add Preprocessing

Alternative 5: 96.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ c \cdot \left(\frac{-2 \cdot {\left(c \cdot \frac{a}{b}\right)}^{2} - c \cdot a}{{b}^{3}} + \frac{-1}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (+ (/ (- (* -2.0 (pow (* c (/ a b)) 2.0)) (* c a)) (pow b 3.0)) (/ -1.0 b))))
double code(double a, double b, double c) {
	return c * ((((-2.0 * pow((c * (a / b)), 2.0)) - (c * a)) / pow(b, 3.0)) + (-1.0 / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * (((((-2.0d0) * ((c * (a / b)) ** 2.0d0)) - (c * a)) / (b ** 3.0d0)) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
	return c * ((((-2.0 * Math.pow((c * (a / b)), 2.0)) - (c * a)) / Math.pow(b, 3.0)) + (-1.0 / b));
}
def code(a, b, c):
	return c * ((((-2.0 * math.pow((c * (a / b)), 2.0)) - (c * a)) / math.pow(b, 3.0)) + (-1.0 / b))
function code(a, b, c)
	return Float64(c * Float64(Float64(Float64(Float64(-2.0 * (Float64(c * Float64(a / b)) ^ 2.0)) - Float64(c * a)) / (b ^ 3.0)) + Float64(-1.0 / b)))
end
function tmp = code(a, b, c)
	tmp = c * ((((-2.0 * ((c * (a / b)) ^ 2.0)) - (c * a)) / (b ^ 3.0)) + (-1.0 / b));
end
code[a_, b_, c_] := N[(c * N[(N[(N[(N[(-2.0 * N[Power[N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] - N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(\frac{-2 \cdot {\left(c \cdot \frac{a}{b}\right)}^{2} - c \cdot a}{{b}^{3}} + \frac{-1}{b}\right)
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 97.5%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
  6. Taylor expanded in b around inf 97.5%

    \[\leadsto c \cdot \left(\color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} + -1 \cdot \left(a \cdot c\right)}{{b}^{3}}} - \frac{1}{b}\right) \]
  7. Step-by-step derivation
    1. mul-1-neg97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} + \color{blue}{\left(-a \cdot c\right)}}{{b}^{3}} - \frac{1}{b}\right) \]
    2. unsub-neg97.5%

      \[\leadsto c \cdot \left(\frac{\color{blue}{-2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} - a \cdot c}}{{b}^{3}} - \frac{1}{b}\right) \]
    3. associate-/l*97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \color{blue}{\left({a}^{2} \cdot \frac{{c}^{2}}{{b}^{2}}\right)} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    4. unpow297.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \left(\color{blue}{\left(a \cdot a\right)} \cdot \frac{{c}^{2}}{{b}^{2}}\right) - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    5. unpow297.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \left(\left(a \cdot a\right) \cdot \frac{\color{blue}{c \cdot c}}{{b}^{2}}\right) - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    6. unpow297.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \left(\left(a \cdot a\right) \cdot \frac{c \cdot c}{\color{blue}{b \cdot b}}\right) - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    7. times-frac97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \left(\left(a \cdot a\right) \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}\right) - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    8. swap-sqr97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \color{blue}{\left(\left(a \cdot \frac{c}{b}\right) \cdot \left(a \cdot \frac{c}{b}\right)\right)} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    9. unpow297.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot \color{blue}{{\left(a \cdot \frac{c}{b}\right)}^{2}} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    10. associate-*r/97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot {\color{blue}{\left(\frac{a \cdot c}{b}\right)}}^{2} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    11. *-commutative97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot {\left(\frac{\color{blue}{c \cdot a}}{b}\right)}^{2} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    12. associate-/l*97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot {\color{blue}{\left(c \cdot \frac{a}{b}\right)}}^{2} - a \cdot c}{{b}^{3}} - \frac{1}{b}\right) \]
    13. *-commutative97.5%

      \[\leadsto c \cdot \left(\frac{-2 \cdot {\left(c \cdot \frac{a}{b}\right)}^{2} - \color{blue}{c \cdot a}}{{b}^{3}} - \frac{1}{b}\right) \]
  8. Simplified97.5%

    \[\leadsto c \cdot \left(\color{blue}{\frac{-2 \cdot {\left(c \cdot \frac{a}{b}\right)}^{2} - c \cdot a}{{b}^{3}}} - \frac{1}{b}\right) \]
  9. Final simplification97.5%

    \[\leadsto c \cdot \left(\frac{-2 \cdot {\left(c \cdot \frac{a}{b}\right)}^{2} - c \cdot a}{{b}^{3}} + \frac{-1}{b}\right) \]
  10. Add Preprocessing

Alternative 6: 95.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{-c}{b} - \frac{a \cdot {\left(\frac{c}{b}\right)}^{2}}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (- (/ (- c) b) (/ (* a (pow (/ c b) 2.0)) b)))
double code(double a, double b, double c) {
	return (-c / b) - ((a * pow((c / b), 2.0)) / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-c / b) - ((a * ((c / b) ** 2.0d0)) / b)
end function
public static double code(double a, double b, double c) {
	return (-c / b) - ((a * Math.pow((c / b), 2.0)) / b);
}
def code(a, b, c):
	return (-c / b) - ((a * math.pow((c / b), 2.0)) / b)
function code(a, b, c)
	return Float64(Float64(Float64(-c) / b) - Float64(Float64(a * (Float64(c / b) ^ 2.0)) / b))
end
function tmp = code(a, b, c)
	tmp = (-c / b) - ((a * ((c / b) ^ 2.0)) / b);
end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(N[(a * N[Power[N[(c / b), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{-c}{b} - \frac{a \cdot {\left(\frac{c}{b}\right)}^{2}}{b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 96.6%

    \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  6. Step-by-step derivation
    1. mul-1-neg96.6%

      \[\leadsto \frac{-1 \cdot c + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
    2. unsub-neg96.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot c - \frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
    3. mul-1-neg96.6%

      \[\leadsto \frac{\color{blue}{\left(-c\right)} - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    4. associate-/l*96.6%

      \[\leadsto \frac{\left(-c\right) - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}}{b} \]
  7. Simplified96.6%

    \[\leadsto \color{blue}{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \]
  8. Step-by-step derivation
    1. add-cube-cbrt94.5%

      \[\leadsto \color{blue}{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \cdot \sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}\right) \cdot \sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}} \]
    2. pow394.5%

      \[\leadsto \color{blue}{{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}\right)}^{3}} \]
    3. div-inv94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \color{blue}{\left({c}^{2} \cdot \frac{1}{{b}^{2}}\right)}}{b}}\right)}^{3} \]
    4. pow-flip94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot \color{blue}{{b}^{\left(-2\right)}}\right)}{b}}\right)}^{3} \]
    5. metadata-eval94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{\color{blue}{-2}}\right)}{b}}\right)}^{3} \]
  9. Applied egg-rr94.5%

    \[\leadsto \color{blue}{{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b}}\right)}^{3}} \]
  10. Step-by-step derivation
    1. rem-cube-cbrt96.6%

      \[\leadsto \color{blue}{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b}} \]
    2. div-sub96.6%

      \[\leadsto \color{blue}{\frac{-c}{b} - \frac{a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b}} \]
    3. un-div-inv96.3%

      \[\leadsto \color{blue}{\left(-c\right) \cdot \frac{1}{b}} - \frac{a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b} \]
    4. distribute-lft-neg-out96.3%

      \[\leadsto \color{blue}{\left(-c \cdot \frac{1}{b}\right)} - \frac{a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b} \]
    5. div-inv96.6%

      \[\leadsto \left(-\color{blue}{\frac{c}{b}}\right) - \frac{a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b} \]
    6. add-sqr-sqrt96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot \color{blue}{\left(\sqrt{{c}^{2} \cdot {b}^{-2}} \cdot \sqrt{{c}^{2} \cdot {b}^{-2}}\right)}}{b} \]
    7. pow296.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot \color{blue}{{\left(\sqrt{{c}^{2} \cdot {b}^{-2}}\right)}^{2}}}{b} \]
    8. sqrt-prod96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\color{blue}{\left(\sqrt{{c}^{2}} \cdot \sqrt{{b}^{-2}}\right)}}^{2}}{b} \]
    9. sqrt-pow196.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left(\color{blue}{{c}^{\left(\frac{2}{2}\right)}} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    10. metadata-eval96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left({c}^{\color{blue}{1}} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    11. pow196.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left(\color{blue}{c} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    12. sqrt-pow196.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left(c \cdot \color{blue}{{b}^{\left(\frac{-2}{2}\right)}}\right)}^{2}}{b} \]
    13. metadata-eval96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left(c \cdot {b}^{\color{blue}{-1}}\right)}^{2}}{b} \]
    14. inv-pow96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\left(c \cdot \color{blue}{\frac{1}{b}}\right)}^{2}}{b} \]
    15. div-inv96.6%

      \[\leadsto \left(-\frac{c}{b}\right) - \frac{a \cdot {\color{blue}{\left(\frac{c}{b}\right)}}^{2}}{b} \]
  11. Applied egg-rr96.6%

    \[\leadsto \color{blue}{\left(-\frac{c}{b}\right) - \frac{a \cdot {\left(\frac{c}{b}\right)}^{2}}{b}} \]
  12. Final simplification96.6%

    \[\leadsto \frac{-c}{b} - \frac{a \cdot {\left(\frac{c}{b}\right)}^{2}}{b} \]
  13. Add Preprocessing

Alternative 7: 95.3% accurate, 8.3× speedup?

\[\begin{array}{l} \\ \frac{\left(-c\right) - a \cdot \left(\frac{c}{b} \cdot \frac{c}{b}\right)}{b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ (- (- c) (* a (* (/ c b) (/ c b)))) b))
double code(double a, double b, double c) {
	return (-c - (a * ((c / b) * (c / b)))) / b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-c - (a * ((c / b) * (c / b)))) / b
end function
public static double code(double a, double b, double c) {
	return (-c - (a * ((c / b) * (c / b)))) / b;
}
def code(a, b, c):
	return (-c - (a * ((c / b) * (c / b)))) / b
function code(a, b, c)
	return Float64(Float64(Float64(-c) - Float64(a * Float64(Float64(c / b) * Float64(c / b)))) / b)
end
function tmp = code(a, b, c)
	tmp = (-c - (a * ((c / b) * (c / b)))) / b;
end
code[a_, b_, c_] := N[(N[((-c) - N[(a * N[(N[(c / b), $MachinePrecision] * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-c\right) - a \cdot \left(\frac{c}{b} \cdot \frac{c}{b}\right)}{b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 96.6%

    \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  6. Step-by-step derivation
    1. mul-1-neg96.6%

      \[\leadsto \frac{-1 \cdot c + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
    2. unsub-neg96.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot c - \frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
    3. mul-1-neg96.6%

      \[\leadsto \frac{\color{blue}{\left(-c\right)} - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    4. associate-/l*96.6%

      \[\leadsto \frac{\left(-c\right) - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}}{b} \]
  7. Simplified96.6%

    \[\leadsto \color{blue}{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \]
  8. Step-by-step derivation
    1. add-cube-cbrt94.5%

      \[\leadsto \color{blue}{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \cdot \sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}\right) \cdot \sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}} \]
    2. pow394.5%

      \[\leadsto \color{blue}{{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}}\right)}^{3}} \]
    3. div-inv94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \color{blue}{\left({c}^{2} \cdot \frac{1}{{b}^{2}}\right)}}{b}}\right)}^{3} \]
    4. pow-flip94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot \color{blue}{{b}^{\left(-2\right)}}\right)}{b}}\right)}^{3} \]
    5. metadata-eval94.5%

      \[\leadsto {\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{\color{blue}{-2}}\right)}{b}}\right)}^{3} \]
  9. Applied egg-rr94.5%

    \[\leadsto \color{blue}{{\left(\sqrt[3]{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b}}\right)}^{3}} \]
  10. Step-by-step derivation
    1. rem-cube-cbrt96.6%

      \[\leadsto \color{blue}{\frac{\left(-c\right) - a \cdot \left({c}^{2} \cdot {b}^{-2}\right)}{b}} \]
    2. add-sqr-sqrt96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot \color{blue}{\left(\sqrt{{c}^{2} \cdot {b}^{-2}} \cdot \sqrt{{c}^{2} \cdot {b}^{-2}}\right)}}{b} \]
    3. pow296.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot \color{blue}{{\left(\sqrt{{c}^{2} \cdot {b}^{-2}}\right)}^{2}}}{b} \]
    4. sqrt-prod96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\color{blue}{\left(\sqrt{{c}^{2}} \cdot \sqrt{{b}^{-2}}\right)}}^{2}}{b} \]
    5. sqrt-pow196.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left(\color{blue}{{c}^{\left(\frac{2}{2}\right)}} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    6. metadata-eval96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left({c}^{\color{blue}{1}} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    7. pow196.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left(\color{blue}{c} \cdot \sqrt{{b}^{-2}}\right)}^{2}}{b} \]
    8. sqrt-pow196.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left(c \cdot \color{blue}{{b}^{\left(\frac{-2}{2}\right)}}\right)}^{2}}{b} \]
    9. metadata-eval96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left(c \cdot {b}^{\color{blue}{-1}}\right)}^{2}}{b} \]
    10. inv-pow96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\left(c \cdot \color{blue}{\frac{1}{b}}\right)}^{2}}{b} \]
    11. div-inv96.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot {\color{blue}{\left(\frac{c}{b}\right)}}^{2}}{b} \]
  11. Applied egg-rr96.6%

    \[\leadsto \color{blue}{\frac{\left(-c\right) - a \cdot {\left(\frac{c}{b}\right)}^{2}}{b}} \]
  12. Step-by-step derivation
    1. unpow296.6%

      \[\leadsto \frac{\left(-c\right) - a \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}}{b} \]
  13. Applied egg-rr96.6%

    \[\leadsto \frac{\left(-c\right) - a \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}}{b} \]
  14. Add Preprocessing

Alternative 8: 90.2% accurate, 29.0× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
	return c / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c / -b
end function
public static double code(double a, double b, double c) {
	return c / -b;
}
def code(a, b, c):
	return c / -b
function code(a, b, c)
	return Float64(c / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = c / -b;
end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b}
\end{array}
Derivation
  1. Initial program 15.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.6%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 92.2%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  6. Step-by-step derivation
    1. associate-*r/92.2%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} \]
    2. mul-1-neg92.2%

      \[\leadsto \frac{\color{blue}{-c}}{b} \]
  7. Simplified92.2%

    \[\leadsto \color{blue}{\frac{-c}{b}} \]
  8. Final simplification92.2%

    \[\leadsto \frac{c}{-b} \]
  9. Add Preprocessing

Reproduce

?
herbie shell --seed 2024139 
(FPCore (a b c)
  :name "Quadratic roots, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))