Quadratic roots, medium range

Percentage Accurate: 30.9% → 95.8%
Time: 16.8s
Alternatives: 8
Speedup: 29.0×

Specification

?
\[\left(\left(1.1102230246251565 \cdot 10^{-16} < a \land a < 9007199254740992\right) \land \left(1.1102230246251565 \cdot 10^{-16} < b \land b < 9007199254740992\right)\right) \land \left(1.1102230246251565 \cdot 10^{-16} < c \land c < 9007199254740992\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 30.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 95.8% accurate, 0.2× speedup?

\[\begin{array}{l} \\ a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   a
   (-
    (*
     a
     (+
      (* -2.0 (/ (pow c 3.0) (pow b 5.0)))
      (* -0.25 (* (* 20.0 (pow c 4.0)) (/ a (pow b 7.0))))))
    (/ (pow c 2.0) (pow b 3.0))))
  (/ c b)))
double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (pow(c, 3.0) / pow(b, 5.0))) + (-0.25 * ((20.0 * pow(c, 4.0)) * (a / pow(b, 7.0)))))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (a * ((a * (((-2.0d0) * ((c ** 3.0d0) / (b ** 5.0d0))) + ((-0.25d0) * ((20.0d0 * (c ** 4.0d0)) * (a / (b ** 7.0d0)))))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))) + (-0.25 * ((20.0 * Math.pow(c, 4.0)) * (a / Math.pow(b, 7.0)))))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c):
	return (a * ((a * ((-2.0 * (math.pow(c, 3.0) / math.pow(b, 5.0))) + (-0.25 * ((20.0 * math.pow(c, 4.0)) * (a / math.pow(b, 7.0)))))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c)
	return Float64(Float64(a * Float64(Float64(a * Float64(Float64(-2.0 * Float64((c ^ 3.0) / (b ^ 5.0))) + Float64(-0.25 * Float64(Float64(20.0 * (c ^ 4.0)) * Float64(a / (b ^ 7.0)))))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = (a * ((a * ((-2.0 * ((c ^ 3.0) / (b ^ 5.0))) + (-0.25 * ((20.0 * (c ^ 4.0)) * (a / (b ^ 7.0)))))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b);
end
code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.25 * N[(N[(20.0 * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision] * N[(a / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 95.4%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(20 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
  7. Step-by-step derivation
    1. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\frac{a \cdot {c}^{4}}{{b}^{7}} \cdot 20\right)}\right)\right) \]
    2. associate-*l/95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\frac{\left(a \cdot {c}^{4}\right) \cdot 20}{{b}^{7}}}\right)\right) \]
    3. associate-*r*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{a \cdot \left({c}^{4} \cdot 20\right)}}{{b}^{7}}\right)\right) \]
    4. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left({c}^{4} \cdot \color{blue}{\left(4 + 16\right)}\right)}{{b}^{7}}\right)\right) \]
    5. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right)}}{{b}^{7}}\right)\right) \]
    6. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot a}}{{b}^{7}}\right)\right) \]
    7. associate-/l*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
    8. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left({c}^{4} \cdot \left(4 + 16\right)\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    9. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left({c}^{4} \cdot \color{blue}{20}\right) \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    10. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left(20 \cdot {c}^{4}\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
  8. Simplified95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
  9. Final simplification95.4%

    \[\leadsto a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
  10. Add Preprocessing

Alternative 2: 95.8% accurate, 0.2× speedup?

\[\begin{array}{l} \\ {c}^{4} \cdot \left(-5 \cdot \frac{{a}^{3}}{{b}^{7}} - \frac{2 \cdot \frac{{a}^{2}}{{b}^{5}} + \frac{a}{c \cdot {b}^{3}}}{c}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   (pow c 4.0)
   (-
    (* -5.0 (/ (pow a 3.0) (pow b 7.0)))
    (/ (+ (* 2.0 (/ (pow a 2.0) (pow b 5.0))) (/ a (* c (pow b 3.0)))) c)))
  (/ c b)))
double code(double a, double b, double c) {
	return (pow(c, 4.0) * ((-5.0 * (pow(a, 3.0) / pow(b, 7.0))) - (((2.0 * (pow(a, 2.0) / pow(b, 5.0))) + (a / (c * pow(b, 3.0)))) / c))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((c ** 4.0d0) * (((-5.0d0) * ((a ** 3.0d0) / (b ** 7.0d0))) - (((2.0d0 * ((a ** 2.0d0) / (b ** 5.0d0))) + (a / (c * (b ** 3.0d0)))) / c))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (Math.pow(c, 4.0) * ((-5.0 * (Math.pow(a, 3.0) / Math.pow(b, 7.0))) - (((2.0 * (Math.pow(a, 2.0) / Math.pow(b, 5.0))) + (a / (c * Math.pow(b, 3.0)))) / c))) - (c / b);
}
def code(a, b, c):
	return (math.pow(c, 4.0) * ((-5.0 * (math.pow(a, 3.0) / math.pow(b, 7.0))) - (((2.0 * (math.pow(a, 2.0) / math.pow(b, 5.0))) + (a / (c * math.pow(b, 3.0)))) / c))) - (c / b)
function code(a, b, c)
	return Float64(Float64((c ^ 4.0) * Float64(Float64(-5.0 * Float64((a ^ 3.0) / (b ^ 7.0))) - Float64(Float64(Float64(2.0 * Float64((a ^ 2.0) / (b ^ 5.0))) + Float64(a / Float64(c * (b ^ 3.0)))) / c))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = ((c ^ 4.0) * ((-5.0 * ((a ^ 3.0) / (b ^ 7.0))) - (((2.0 * ((a ^ 2.0) / (b ^ 5.0))) + (a / (c * (b ^ 3.0)))) / c))) - (c / b);
end
code[a_, b_, c_] := N[(N[(N[Power[c, 4.0], $MachinePrecision] * N[(N[(-5.0 * N[(N[Power[a, 3.0], $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(2.0 * N[(N[Power[a, 2.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(a / N[(c * N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
{c}^{4} \cdot \left(-5 \cdot \frac{{a}^{3}}{{b}^{7}} - \frac{2 \cdot \frac{{a}^{2}}{{b}^{5}} + \frac{a}{c \cdot {b}^{3}}}{c}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 95.4%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(20 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
  7. Step-by-step derivation
    1. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\frac{a \cdot {c}^{4}}{{b}^{7}} \cdot 20\right)}\right)\right) \]
    2. associate-*l/95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\frac{\left(a \cdot {c}^{4}\right) \cdot 20}{{b}^{7}}}\right)\right) \]
    3. associate-*r*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{a \cdot \left({c}^{4} \cdot 20\right)}}{{b}^{7}}\right)\right) \]
    4. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left({c}^{4} \cdot \color{blue}{\left(4 + 16\right)}\right)}{{b}^{7}}\right)\right) \]
    5. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right)}}{{b}^{7}}\right)\right) \]
    6. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot a}}{{b}^{7}}\right)\right) \]
    7. associate-/l*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
    8. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left({c}^{4} \cdot \left(4 + 16\right)\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    9. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left({c}^{4} \cdot \color{blue}{20}\right) \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    10. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left(20 \cdot {c}^{4}\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
  8. Simplified95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
  9. Taylor expanded in c around -inf 95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{{c}^{4} \cdot \left(-5 \cdot \frac{{a}^{3}}{{b}^{7}} + -1 \cdot \frac{2 \cdot \frac{{a}^{2}}{{b}^{5}} + \frac{a}{{b}^{3} \cdot c}}{c}\right)} \]
  10. Final simplification95.4%

    \[\leadsto {c}^{4} \cdot \left(-5 \cdot \frac{{a}^{3}}{{b}^{7}} - \frac{2 \cdot \frac{{a}^{2}}{{b}^{5}} + \frac{a}{c \cdot {b}^{3}}}{c}\right) - \frac{c}{b} \]
  11. Add Preprocessing

Alternative 3: 94.3% accurate, 0.3× speedup?

\[\begin{array}{l} \\ {c}^{2} \cdot \left(c \cdot \frac{-2 \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   (pow c 2.0)
   (- (* c (/ (* -2.0 (pow a 2.0)) (pow b 5.0))) (/ a (pow b 3.0))))
  (/ c b)))
double code(double a, double b, double c) {
	return (pow(c, 2.0) * ((c * ((-2.0 * pow(a, 2.0)) / pow(b, 5.0))) - (a / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((c ** 2.0d0) * ((c * (((-2.0d0) * (a ** 2.0d0)) / (b ** 5.0d0))) - (a / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (Math.pow(c, 2.0) * ((c * ((-2.0 * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) - (a / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c):
	return (math.pow(c, 2.0) * ((c * ((-2.0 * math.pow(a, 2.0)) / math.pow(b, 5.0))) - (a / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c)
	return Float64(Float64((c ^ 2.0) * Float64(Float64(c * Float64(Float64(-2.0 * (a ^ 2.0)) / (b ^ 5.0))) - Float64(a / (b ^ 3.0)))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = ((c ^ 2.0) * ((c * ((-2.0 * (a ^ 2.0)) / (b ^ 5.0))) - (a / (b ^ 3.0)))) - (c / b);
end
code[a_, b_, c_] := N[(N[(N[Power[c, 2.0], $MachinePrecision] * N[(N[(c * N[(N[(-2.0 * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
{c}^{2} \cdot \left(c \cdot \frac{-2 \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 95.4%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(20 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
  7. Step-by-step derivation
    1. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\frac{a \cdot {c}^{4}}{{b}^{7}} \cdot 20\right)}\right)\right) \]
    2. associate-*l/95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\frac{\left(a \cdot {c}^{4}\right) \cdot 20}{{b}^{7}}}\right)\right) \]
    3. associate-*r*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{a \cdot \left({c}^{4} \cdot 20\right)}}{{b}^{7}}\right)\right) \]
    4. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left({c}^{4} \cdot \color{blue}{\left(4 + 16\right)}\right)}{{b}^{7}}\right)\right) \]
    5. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right)}}{{b}^{7}}\right)\right) \]
    6. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot a}}{{b}^{7}}\right)\right) \]
    7. associate-/l*95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
    8. distribute-rgt-out95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left({c}^{4} \cdot \left(4 + 16\right)\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    9. metadata-eval95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\left({c}^{4} \cdot \color{blue}{20}\right) \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
    10. *-commutative95.4%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(\color{blue}{\left(20 \cdot {c}^{4}\right)} \cdot \frac{a}{{b}^{7}}\right)\right)\right) \]
  8. Simplified95.4%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\left(20 \cdot {c}^{4}\right) \cdot \frac{a}{{b}^{7}}\right)}\right)\right) \]
  9. Taylor expanded in a around 0 93.8%

    \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{a \cdot \left(-2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{{c}^{2}}{{b}^{3}}\right)} \]
  10. Simplified93.8%

    \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{{c}^{2} \cdot \left(c \cdot \frac{-2 \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right)} \]
  11. Final simplification93.8%

    \[\leadsto {c}^{2} \cdot \left(c \cdot \frac{-2 \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) - \frac{c}{b} \]
  12. Add Preprocessing

Alternative 4: 94.1% accurate, 0.4× speedup?

\[\begin{array}{l} \\ c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (+
   (* c (- (* -2.0 (/ (* c (pow a 2.0)) (pow b 5.0))) (/ a (pow b 3.0))))
   (/ -1.0 b))))
double code(double a, double b, double c) {
	return c * ((c * ((-2.0 * ((c * pow(a, 2.0)) / pow(b, 5.0))) - (a / pow(b, 3.0)))) + (-1.0 / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * ((c * (((-2.0d0) * ((c * (a ** 2.0d0)) / (b ** 5.0d0))) - (a / (b ** 3.0d0)))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
	return c * ((c * ((-2.0 * ((c * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) - (a / Math.pow(b, 3.0)))) + (-1.0 / b));
}
def code(a, b, c):
	return c * ((c * ((-2.0 * ((c * math.pow(a, 2.0)) / math.pow(b, 5.0))) - (a / math.pow(b, 3.0)))) + (-1.0 / b))
function code(a, b, c)
	return Float64(c * Float64(Float64(c * Float64(Float64(-2.0 * Float64(Float64(c * (a ^ 2.0)) / (b ^ 5.0))) - Float64(a / (b ^ 3.0)))) + Float64(-1.0 / b)))
end
function tmp = code(a, b, c)
	tmp = c * ((c * ((-2.0 * ((c * (a ^ 2.0)) / (b ^ 5.0))) - (a / (b ^ 3.0)))) + (-1.0 / b));
end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(-2.0 * N[(N[(c * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right)
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 93.5%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
  6. Final simplification93.5%

    \[\leadsto c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right) \]
  7. Add Preprocessing

Alternative 5: 91.3% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (- (/ c (- b)) (* a (/ (pow c 2.0) (pow b 3.0)))))
double code(double a, double b, double c) {
	return (c / -b) - (a * (pow(c, 2.0) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (c / -b) - (a * ((c ** 2.0d0) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
	return (c / -b) - (a * (Math.pow(c, 2.0) / Math.pow(b, 3.0)));
}
def code(a, b, c):
	return (c / -b) - (a * (math.pow(c, 2.0) / math.pow(b, 3.0)))
function code(a, b, c)
	return Float64(Float64(c / Float64(-b)) - Float64(a * Float64((c ^ 2.0) / (b ^ 3.0))))
end
function tmp = code(a, b, c)
	tmp = (c / -b) - (a * ((c ^ 2.0) / (b ^ 3.0)));
end
code[a_, b_, c_] := N[(N[(c / (-b)), $MachinePrecision] - N[(a * N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 90.6%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  6. Step-by-step derivation
    1. mul-1-neg90.6%

      \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    2. unsub-neg90.6%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    3. mul-1-neg90.6%

      \[\leadsto \color{blue}{\left(-\frac{c}{b}\right)} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    4. distribute-neg-frac290.6%

      \[\leadsto \color{blue}{\frac{c}{-b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. associate-/l*90.6%

      \[\leadsto \frac{c}{-b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  7. Simplified90.6%

    \[\leadsto \color{blue}{\frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  8. Final simplification90.6%

    \[\leadsto \frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}} \]
  9. Add Preprocessing

Alternative 6: 91.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ (+ c (* a (pow (/ c b) 2.0))) (- b)))
double code(double a, double b, double c) {
	return (c + (a * pow((c / b), 2.0))) / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (c + (a * ((c / b) ** 2.0d0))) / -b
end function
public static double code(double a, double b, double c) {
	return (c + (a * Math.pow((c / b), 2.0))) / -b;
}
def code(a, b, c):
	return (c + (a * math.pow((c / b), 2.0))) / -b
function code(a, b, c)
	return Float64(Float64(c + Float64(a * (Float64(c / b) ^ 2.0))) / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = (c + (a * ((c / b) ^ 2.0))) / -b;
end
code[a_, b_, c_] := N[(N[(c + N[(a * N[Power[N[(c / b), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 90.4%

    \[\leadsto \color{blue}{c \cdot \left(-1 \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{b}\right)} \]
  6. Step-by-step derivation
    1. associate-*r/90.4%

      \[\leadsto c \cdot \left(\color{blue}{\frac{-1 \cdot \left(a \cdot c\right)}{{b}^{3}}} - \frac{1}{b}\right) \]
    2. neg-mul-190.4%

      \[\leadsto c \cdot \left(\frac{\color{blue}{-a \cdot c}}{{b}^{3}} - \frac{1}{b}\right) \]
    3. distribute-rgt-neg-in90.4%

      \[\leadsto c \cdot \left(\frac{\color{blue}{a \cdot \left(-c\right)}}{{b}^{3}} - \frac{1}{b}\right) \]
  7. Simplified90.4%

    \[\leadsto \color{blue}{c \cdot \left(\frac{a \cdot \left(-c\right)}{{b}^{3}} - \frac{1}{b}\right)} \]
  8. Taylor expanded in a around inf 90.2%

    \[\leadsto c \cdot \color{blue}{\left(a \cdot \left(-1 \cdot \frac{c}{{b}^{3}} - \frac{1}{a \cdot b}\right)\right)} \]
  9. Step-by-step derivation
    1. mul-1-neg90.2%

      \[\leadsto c \cdot \left(a \cdot \left(\color{blue}{\left(-\frac{c}{{b}^{3}}\right)} - \frac{1}{a \cdot b}\right)\right) \]
    2. distribute-frac-neg90.2%

      \[\leadsto c \cdot \left(a \cdot \left(\color{blue}{\frac{-c}{{b}^{3}}} - \frac{1}{a \cdot b}\right)\right) \]
    3. associate-/r*90.2%

      \[\leadsto c \cdot \left(a \cdot \left(\frac{-c}{{b}^{3}} - \color{blue}{\frac{\frac{1}{a}}{b}}\right)\right) \]
  10. Simplified90.2%

    \[\leadsto c \cdot \color{blue}{\left(a \cdot \left(\frac{-c}{{b}^{3}} - \frac{\frac{1}{a}}{b}\right)\right)} \]
  11. Taylor expanded in b around inf 90.6%

    \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  12. Step-by-step derivation
    1. distribute-lft-out90.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
    2. associate-*r/90.6%

      \[\leadsto \color{blue}{-1 \cdot \frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    3. mul-1-neg90.6%

      \[\leadsto \color{blue}{-\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    4. distribute-neg-frac290.6%

      \[\leadsto \color{blue}{\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{-b}} \]
    5. associate-/l*90.6%

      \[\leadsto \frac{c + \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}}{-b} \]
    6. unpow290.6%

      \[\leadsto \frac{c + a \cdot \frac{\color{blue}{c \cdot c}}{{b}^{2}}}{-b} \]
    7. unpow290.6%

      \[\leadsto \frac{c + a \cdot \frac{c \cdot c}{\color{blue}{b \cdot b}}}{-b} \]
    8. times-frac90.6%

      \[\leadsto \frac{c + a \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}}{-b} \]
    9. unpow290.6%

      \[\leadsto \frac{c + a \cdot \color{blue}{{\left(\frac{c}{b}\right)}^{2}}}{-b} \]
  13. Simplified90.6%

    \[\leadsto \color{blue}{\frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b}} \]
  14. Final simplification90.6%

    \[\leadsto \frac{c + a \cdot {\left(\frac{c}{b}\right)}^{2}}{-b} \]
  15. Add Preprocessing

Alternative 7: 81.8% accurate, 29.0× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
	return c / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c / -b
end function
public static double code(double a, double b, double c) {
	return c / -b;
}
def code(a, b, c):
	return c / -b
function code(a, b, c)
	return Float64(c / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = c / -b;
end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 80.8%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  6. Step-by-step derivation
    1. associate-*r/80.8%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} \]
    2. mul-1-neg80.8%

      \[\leadsto \frac{\color{blue}{-c}}{b} \]
  7. Simplified80.8%

    \[\leadsto \color{blue}{\frac{-c}{b}} \]
  8. Final simplification80.8%

    \[\leadsto \frac{c}{-b} \]
  9. Add Preprocessing

Alternative 8: 3.2% accurate, 116.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (a b c) :precision binary64 0.0)
double code(double a, double b, double c) {
	return 0.0;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = 0.0d0
end function
public static double code(double a, double b, double c) {
	return 0.0;
}
def code(a, b, c):
	return 0.0
function code(a, b, c)
	return 0.0
end
function tmp = code(a, b, c)
	tmp = 0.0;
end
code[a_, b_, c_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative32.0%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified32.0%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 90.4%

    \[\leadsto \color{blue}{c \cdot \left(-1 \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{b}\right)} \]
  6. Step-by-step derivation
    1. associate-*r/90.4%

      \[\leadsto c \cdot \left(\color{blue}{\frac{-1 \cdot \left(a \cdot c\right)}{{b}^{3}}} - \frac{1}{b}\right) \]
    2. neg-mul-190.4%

      \[\leadsto c \cdot \left(\frac{\color{blue}{-a \cdot c}}{{b}^{3}} - \frac{1}{b}\right) \]
    3. distribute-rgt-neg-in90.4%

      \[\leadsto c \cdot \left(\frac{\color{blue}{a \cdot \left(-c\right)}}{{b}^{3}} - \frac{1}{b}\right) \]
  7. Simplified90.4%

    \[\leadsto \color{blue}{c \cdot \left(\frac{a \cdot \left(-c\right)}{{b}^{3}} - \frac{1}{b}\right)} \]
  8. Taylor expanded in a around 0 80.6%

    \[\leadsto c \cdot \color{blue}{\frac{-1}{b}} \]
  9. Step-by-step derivation
    1. expm1-log1p-u69.0%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(c \cdot \frac{-1}{b}\right)\right)} \]
    2. expm1-undefine30.6%

      \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(c \cdot \frac{-1}{b}\right)} - 1} \]
    3. associate-*r/30.6%

      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{\frac{c \cdot -1}{b}}\right)} - 1 \]
  10. Applied egg-rr30.6%

    \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{c \cdot -1}{b}\right)} - 1} \]
  11. Step-by-step derivation
    1. sub-neg30.6%

      \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{c \cdot -1}{b}\right)} + \left(-1\right)} \]
    2. metadata-eval30.6%

      \[\leadsto e^{\mathsf{log1p}\left(\frac{c \cdot -1}{b}\right)} + \color{blue}{-1} \]
    3. +-commutative30.6%

      \[\leadsto \color{blue}{-1 + e^{\mathsf{log1p}\left(\frac{c \cdot -1}{b}\right)}} \]
    4. log1p-undefine30.6%

      \[\leadsto -1 + e^{\color{blue}{\log \left(1 + \frac{c \cdot -1}{b}\right)}} \]
    5. rem-exp-log42.2%

      \[\leadsto -1 + \color{blue}{\left(1 + \frac{c \cdot -1}{b}\right)} \]
    6. *-commutative42.2%

      \[\leadsto -1 + \left(1 + \frac{\color{blue}{-1 \cdot c}}{b}\right) \]
    7. associate-*r/42.2%

      \[\leadsto -1 + \left(1 + \color{blue}{-1 \cdot \frac{c}{b}}\right) \]
    8. mul-1-neg42.2%

      \[\leadsto -1 + \left(1 + \color{blue}{\left(-\frac{c}{b}\right)}\right) \]
    9. unsub-neg42.2%

      \[\leadsto -1 + \color{blue}{\left(1 - \frac{c}{b}\right)} \]
  12. Simplified42.2%

    \[\leadsto \color{blue}{-1 + \left(1 - \frac{c}{b}\right)} \]
  13. Taylor expanded in c around 0 3.2%

    \[\leadsto -1 + \color{blue}{1} \]
  14. Final simplification3.2%

    \[\leadsto 0 \]
  15. Add Preprocessing

Reproduce

?
herbie shell --seed 2024074 
(FPCore (a b c)
  :name "Quadratic roots, medium range"
  :precision binary64
  :pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))