Quadratic roots, wide range

Percentage Accurate: 17.8% → 97.7%
Time: 14.0s
Alternatives: 9
Speedup: 29.0×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 9 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.7% accurate, 0.2× speedup?

\[\begin{array}{l} \\ a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{20 \cdot {c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (*
   a
   (-
    (*
     a
     (+
      (* -2.0 (/ (pow c 3.0) (pow b 5.0)))
      (* -0.25 (* a (/ (* 20.0 (pow c 4.0)) (pow b 7.0))))))
    (/ (pow c 2.0) (pow b 3.0))))
  (/ c b)))
double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (pow(c, 3.0) / pow(b, 5.0))) + (-0.25 * (a * ((20.0 * pow(c, 4.0)) / pow(b, 7.0)))))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (a * ((a * (((-2.0d0) * ((c ** 3.0d0) / (b ** 5.0d0))) + ((-0.25d0) * (a * ((20.0d0 * (c ** 4.0d0)) / (b ** 7.0d0)))))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
	return (a * ((a * ((-2.0 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))) + (-0.25 * (a * ((20.0 * Math.pow(c, 4.0)) / Math.pow(b, 7.0)))))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c):
	return (a * ((a * ((-2.0 * (math.pow(c, 3.0) / math.pow(b, 5.0))) + (-0.25 * (a * ((20.0 * math.pow(c, 4.0)) / math.pow(b, 7.0)))))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c)
	return Float64(Float64(a * Float64(Float64(a * Float64(Float64(-2.0 * Float64((c ^ 3.0) / (b ^ 5.0))) + Float64(-0.25 * Float64(a * Float64(Float64(20.0 * (c ^ 4.0)) / (b ^ 7.0)))))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = (a * ((a * ((-2.0 * ((c ^ 3.0) / (b ^ 5.0))) + (-0.25 * (a * ((20.0 * (c ^ 4.0)) / (b ^ 7.0)))))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b);
end
code[a_, b_, c_] := N[(N[(a * N[(N[(a * N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.25 * N[(a * N[(N[(20.0 * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{20 \cdot {c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 99.0%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  6. Taylor expanded in c around 0 99.0%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(20 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
  7. Step-by-step derivation
    1. *-commutative99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(\frac{a \cdot {c}^{4}}{{b}^{7}} \cdot 20\right)}\right)\right) \]
    2. associate-*l/99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\frac{\left(a \cdot {c}^{4}\right) \cdot 20}{{b}^{7}}}\right)\right) \]
    3. associate-*r*99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{\color{blue}{a \cdot \left({c}^{4} \cdot 20\right)}}{{b}^{7}}\right)\right) \]
    4. metadata-eval99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \left({c}^{4} \cdot \color{blue}{\left(4 + 16\right)}\right)}{{b}^{7}}\right)\right) \]
    5. distribute-rgt-out99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \frac{a \cdot \color{blue}{\left(4 \cdot {c}^{4} + 16 \cdot {c}^{4}\right)}}{{b}^{7}}\right)\right) \]
    6. associate-/l*99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(a \cdot \frac{4 \cdot {c}^{4} + 16 \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
    7. distribute-rgt-out99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{\color{blue}{{c}^{4} \cdot \left(4 + 16\right)}}{{b}^{7}}\right)\right)\right) \]
    8. metadata-eval99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{{c}^{4} \cdot \color{blue}{20}}{{b}^{7}}\right)\right)\right) \]
    9. *-commutative99.0%

      \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{\color{blue}{20 \cdot {c}^{4}}}{{b}^{7}}\right)\right)\right) \]
  8. Simplified99.0%

    \[\leadsto -1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \color{blue}{\left(a \cdot \frac{20 \cdot {c}^{4}}{{b}^{7}}\right)}\right)\right) \]
  9. Final simplification99.0%

    \[\leadsto a \cdot \left(a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.25 \cdot \left(a \cdot \frac{20 \cdot {c}^{4}}{{b}^{7}}\right)\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b} \]
  10. Add Preprocessing

Alternative 2: 96.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot {\left(\frac{c}{-b}\right)}^{2}}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (-
   (- (* (* -2.0 (pow a 2.0)) (/ (pow c 3.0) (pow b 4.0))) c)
   (* a (pow (/ c (- b)) 2.0)))
  b))
double code(double a, double b, double c) {
	return ((((-2.0 * pow(a, 2.0)) * (pow(c, 3.0) / pow(b, 4.0))) - c) - (a * pow((c / -b), 2.0))) / b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (((((-2.0d0) * (a ** 2.0d0)) * ((c ** 3.0d0) / (b ** 4.0d0))) - c) - (a * ((c / -b) ** 2.0d0))) / b
end function
public static double code(double a, double b, double c) {
	return ((((-2.0 * Math.pow(a, 2.0)) * (Math.pow(c, 3.0) / Math.pow(b, 4.0))) - c) - (a * Math.pow((c / -b), 2.0))) / b;
}
def code(a, b, c):
	return ((((-2.0 * math.pow(a, 2.0)) * (math.pow(c, 3.0) / math.pow(b, 4.0))) - c) - (a * math.pow((c / -b), 2.0))) / b
function code(a, b, c)
	return Float64(Float64(Float64(Float64(Float64(-2.0 * (a ^ 2.0)) * Float64((c ^ 3.0) / (b ^ 4.0))) - c) - Float64(a * (Float64(c / Float64(-b)) ^ 2.0))) / b)
end
function tmp = code(a, b, c)
	tmp = ((((-2.0 * (a ^ 2.0)) * ((c ^ 3.0) / (b ^ 4.0))) - c) - (a * ((c / -b) ^ 2.0))) / b;
end
code[a_, b_, c_] := N[(N[(N[(N[(N[(-2.0 * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] - N[(a * N[Power[N[(c / (-b)), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot {\left(\frac{c}{-b}\right)}^{2}}{b}
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 98.5%

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  6. Step-by-step derivation
    1. associate-+r+98.5%

      \[\leadsto \frac{\color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + -1 \cdot c\right) + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
    2. mul-1-neg98.5%

      \[\leadsto \frac{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + -1 \cdot c\right) + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
    3. unsub-neg98.5%

      \[\leadsto \frac{\color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + -1 \cdot c\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
    4. mul-1-neg98.5%

      \[\leadsto \frac{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \color{blue}{\left(-c\right)}\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    5. unsub-neg98.5%

      \[\leadsto \frac{\color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} - c\right)} - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    6. associate-/l*98.5%

      \[\leadsto \frac{\left(-2 \cdot \color{blue}{\left({a}^{2} \cdot \frac{{c}^{3}}{{b}^{4}}\right)} - c\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    7. associate-*r*98.5%

      \[\leadsto \frac{\left(\color{blue}{\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}}} - c\right) - \frac{a \cdot {c}^{2}}{{b}^{2}}}{b} \]
    8. associate-/l*98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}}{b} \]
  7. Simplified98.5%

    \[\leadsto \color{blue}{\frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \]
  8. Taylor expanded in a around 0 98.5%

    \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - \color{blue}{\frac{a \cdot {c}^{2}}{{b}^{2}}}}{b} \]
  9. Step-by-step derivation
    1. associate-/l*98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{2}}}}{b} \]
    2. unpow298.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \frac{\color{blue}{c \cdot c}}{{b}^{2}}}{b} \]
    3. unpow298.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \frac{c \cdot c}{\color{blue}{b \cdot b}}}{b} \]
    4. times-frac98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}}{b} \]
    5. sqr-neg98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \color{blue}{\left(\left(-\frac{c}{b}\right) \cdot \left(-\frac{c}{b}\right)\right)}}{b} \]
    6. distribute-frac-neg98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \left(\color{blue}{\frac{-c}{b}} \cdot \left(-\frac{c}{b}\right)\right)}{b} \]
    7. distribute-frac-neg98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \left(\frac{-c}{b} \cdot \color{blue}{\frac{-c}{b}}\right)}{b} \]
    8. unpow298.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot \color{blue}{{\left(\frac{-c}{b}\right)}^{2}}}{b} \]
    9. distribute-frac-neg98.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot {\color{blue}{\left(-\frac{c}{b}\right)}}^{2}}{b} \]
    10. distribute-neg-frac298.5%

      \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot {\color{blue}{\left(\frac{c}{-b}\right)}}^{2}}{b} \]
  10. Simplified98.5%

    \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - \color{blue}{a \cdot {\left(\frac{c}{-b}\right)}^{2}}}{b} \]
  11. Final simplification98.5%

    \[\leadsto \frac{\left(\left(-2 \cdot {a}^{2}\right) \cdot \frac{{c}^{3}}{{b}^{4}} - c\right) - a \cdot {\left(\frac{c}{-b}\right)}^{2}}{b} \]
  12. Add Preprocessing

Alternative 3: 96.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} - a \cdot \left(\frac{{c}^{2}}{{b}^{3}} + 2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (/ c (- b))
  (*
   a
   (+ (/ (pow c 2.0) (pow b 3.0)) (* 2.0 (/ (* a (pow c 3.0)) (pow b 5.0)))))))
double code(double a, double b, double c) {
	return (c / -b) - (a * ((pow(c, 2.0) / pow(b, 3.0)) + (2.0 * ((a * pow(c, 3.0)) / pow(b, 5.0)))));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (c / -b) - (a * (((c ** 2.0d0) / (b ** 3.0d0)) + (2.0d0 * ((a * (c ** 3.0d0)) / (b ** 5.0d0)))))
end function
public static double code(double a, double b, double c) {
	return (c / -b) - (a * ((Math.pow(c, 2.0) / Math.pow(b, 3.0)) + (2.0 * ((a * Math.pow(c, 3.0)) / Math.pow(b, 5.0)))));
}
def code(a, b, c):
	return (c / -b) - (a * ((math.pow(c, 2.0) / math.pow(b, 3.0)) + (2.0 * ((a * math.pow(c, 3.0)) / math.pow(b, 5.0)))))
function code(a, b, c)
	return Float64(Float64(c / Float64(-b)) - Float64(a * Float64(Float64((c ^ 2.0) / (b ^ 3.0)) + Float64(2.0 * Float64(Float64(a * (c ^ 3.0)) / (b ^ 5.0))))))
end
function tmp = code(a, b, c)
	tmp = (c / -b) - (a * (((c ^ 2.0) / (b ^ 3.0)) + (2.0 * ((a * (c ^ 3.0)) / (b ^ 5.0)))));
end
code[a_, b_, c_] := N[(N[(c / (-b)), $MachinePrecision] - N[(a * N[(N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] + N[(2.0 * N[(N[(a * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b} - a \cdot \left(\frac{{c}^{2}}{{b}^{3}} + 2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right)
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 98.2%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
  6. Taylor expanded in c around -inf 97.9%

    \[\leadsto \color{blue}{-1 \cdot \left({c}^{3} \cdot \left(-1 \cdot \frac{-1 \cdot \frac{a}{{b}^{3}} - \frac{1}{b \cdot c}}{c} + 2 \cdot \frac{{a}^{2}}{{b}^{5}}\right)\right)} \]
  7. Taylor expanded in a around 0 98.5%

    \[\leadsto -1 \cdot \color{blue}{\left(a \cdot \left(2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + \frac{{c}^{2}}{{b}^{3}}\right) + \frac{c}{b}\right)} \]
  8. Final simplification98.5%

    \[\leadsto \frac{c}{-b} - a \cdot \left(\frac{{c}^{2}}{{b}^{3}} + 2 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right) \]
  9. Add Preprocessing

Alternative 4: 96.6% accurate, 0.4× speedup?

\[\begin{array}{l} \\ c \cdot \left(\frac{-1}{b} - \frac{\mathsf{fma}\left(2, {\left(c \cdot \frac{a}{b}\right)}^{2}, c \cdot a\right)}{{b}^{3}}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (- (/ -1.0 b) (/ (fma 2.0 (pow (* c (/ a b)) 2.0) (* c a)) (pow b 3.0)))))
double code(double a, double b, double c) {
	return c * ((-1.0 / b) - (fma(2.0, pow((c * (a / b)), 2.0), (c * a)) / pow(b, 3.0)));
}
function code(a, b, c)
	return Float64(c * Float64(Float64(-1.0 / b) - Float64(fma(2.0, (Float64(c * Float64(a / b)) ^ 2.0), Float64(c * a)) / (b ^ 3.0))))
end
code[a_, b_, c_] := N[(c * N[(N[(-1.0 / b), $MachinePrecision] - N[(N[(2.0 * N[Power[N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision] + N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(\frac{-1}{b} - \frac{\mathsf{fma}\left(2, {\left(c \cdot \frac{a}{b}\right)}^{2}, c \cdot a\right)}{{b}^{3}}\right)
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 98.2%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-2 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -1 \cdot \frac{a}{{b}^{3}}\right) - \frac{1}{b}\right)} \]
  6. Taylor expanded in b around -inf 98.2%

    \[\leadsto c \cdot \left(\color{blue}{-1 \cdot \frac{2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} + a \cdot c}{{b}^{3}}} - \frac{1}{b}\right) \]
  7. Step-by-step derivation
    1. mul-1-neg98.2%

      \[\leadsto c \cdot \left(\color{blue}{\left(-\frac{2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} + a \cdot c}{{b}^{3}}\right)} - \frac{1}{b}\right) \]
    2. distribute-neg-frac298.2%

      \[\leadsto c \cdot \left(\color{blue}{\frac{2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}} + a \cdot c}{-{b}^{3}}} - \frac{1}{b}\right) \]
    3. fma-define98.2%

      \[\leadsto c \cdot \left(\frac{\color{blue}{\mathsf{fma}\left(2, \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}}, a \cdot c\right)}}{-{b}^{3}} - \frac{1}{b}\right) \]
    4. associate-/l*98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \color{blue}{{a}^{2} \cdot \frac{{c}^{2}}{{b}^{2}}}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    5. unpow298.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \color{blue}{\left(a \cdot a\right)} \cdot \frac{{c}^{2}}{{b}^{2}}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    6. unpow298.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \left(a \cdot a\right) \cdot \frac{\color{blue}{c \cdot c}}{{b}^{2}}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    7. unpow298.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \left(a \cdot a\right) \cdot \frac{c \cdot c}{\color{blue}{b \cdot b}}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    8. times-frac98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \left(a \cdot a\right) \cdot \color{blue}{\left(\frac{c}{b} \cdot \frac{c}{b}\right)}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    9. swap-sqr98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \color{blue}{\left(a \cdot \frac{c}{b}\right) \cdot \left(a \cdot \frac{c}{b}\right)}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    10. unpow298.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, \color{blue}{{\left(a \cdot \frac{c}{b}\right)}^{2}}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    11. associate-*r/98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, {\color{blue}{\left(\frac{a \cdot c}{b}\right)}}^{2}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    12. *-commutative98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, {\left(\frac{\color{blue}{c \cdot a}}{b}\right)}^{2}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    13. associate-/l*98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, {\color{blue}{\left(c \cdot \frac{a}{b}\right)}}^{2}, a \cdot c\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
    14. *-commutative98.2%

      \[\leadsto c \cdot \left(\frac{\mathsf{fma}\left(2, {\left(c \cdot \frac{a}{b}\right)}^{2}, \color{blue}{c \cdot a}\right)}{-{b}^{3}} - \frac{1}{b}\right) \]
  8. Simplified98.2%

    \[\leadsto c \cdot \left(\color{blue}{\frac{\mathsf{fma}\left(2, {\left(c \cdot \frac{a}{b}\right)}^{2}, c \cdot a\right)}{-{b}^{3}}} - \frac{1}{b}\right) \]
  9. Final simplification98.2%

    \[\leadsto c \cdot \left(\frac{-1}{b} - \frac{\mathsf{fma}\left(2, {\left(c \cdot \frac{a}{b}\right)}^{2}, c \cdot a\right)}{{b}^{3}}\right) \]
  10. Add Preprocessing

Alternative 5: 94.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ a \cdot \frac{\frac{c}{a} + {\left(\frac{c}{b}\right)}^{2}}{-b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (* a (/ (+ (/ c a) (pow (/ c b) 2.0)) (- b))))
double code(double a, double b, double c) {
	return a * (((c / a) + pow((c / b), 2.0)) / -b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = a * (((c / a) + ((c / b) ** 2.0d0)) / -b)
end function
public static double code(double a, double b, double c) {
	return a * (((c / a) + Math.pow((c / b), 2.0)) / -b);
}
def code(a, b, c):
	return a * (((c / a) + math.pow((c / b), 2.0)) / -b)
function code(a, b, c)
	return Float64(a * Float64(Float64(Float64(c / a) + (Float64(c / b) ^ 2.0)) / Float64(-b)))
end
function tmp = code(a, b, c)
	tmp = a * (((c / a) + ((c / b) ^ 2.0)) / -b);
end
code[a_, b_, c_] := N[(a * N[(N[(N[(c / a), $MachinePrecision] + N[Power[N[(c / b), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / (-b)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a \cdot \frac{\frac{c}{a} + {\left(\frac{c}{b}\right)}^{2}}{-b}
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 97.1%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  6. Step-by-step derivation
    1. mul-1-neg97.1%

      \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    2. unsub-neg97.1%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    3. associate-*r/97.1%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    4. mul-1-neg97.1%

      \[\leadsto \frac{\color{blue}{-c}}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. associate-/l*97.1%

      \[\leadsto \frac{-c}{b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  7. Simplified97.1%

    \[\leadsto \color{blue}{\frac{-c}{b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  8. Taylor expanded in a around inf 96.7%

    \[\leadsto \color{blue}{a \cdot \left(-1 \cdot \frac{c}{a \cdot b} - \frac{{c}^{2}}{{b}^{3}}\right)} \]
  9. Step-by-step derivation
    1. associate-*r/96.7%

      \[\leadsto a \cdot \left(\color{blue}{\frac{-1 \cdot c}{a \cdot b}} - \frac{{c}^{2}}{{b}^{3}}\right) \]
    2. neg-mul-196.7%

      \[\leadsto a \cdot \left(\frac{\color{blue}{-c}}{a \cdot b} - \frac{{c}^{2}}{{b}^{3}}\right) \]
  10. Simplified96.7%

    \[\leadsto \color{blue}{a \cdot \left(\frac{-c}{a \cdot b} - \frac{{c}^{2}}{{b}^{3}}\right)} \]
  11. Taylor expanded in b around inf 96.7%

    \[\leadsto a \cdot \color{blue}{\frac{-1 \cdot \frac{c}{a} + -1 \cdot \frac{{c}^{2}}{{b}^{2}}}{b}} \]
  12. Step-by-step derivation
    1. distribute-lft-out96.7%

      \[\leadsto a \cdot \frac{\color{blue}{-1 \cdot \left(\frac{c}{a} + \frac{{c}^{2}}{{b}^{2}}\right)}}{b} \]
    2. associate-*r/96.7%

      \[\leadsto a \cdot \color{blue}{\left(-1 \cdot \frac{\frac{c}{a} + \frac{{c}^{2}}{{b}^{2}}}{b}\right)} \]
    3. mul-1-neg96.7%

      \[\leadsto a \cdot \color{blue}{\left(-\frac{\frac{c}{a} + \frac{{c}^{2}}{{b}^{2}}}{b}\right)} \]
    4. distribute-neg-frac296.7%

      \[\leadsto a \cdot \color{blue}{\frac{\frac{c}{a} + \frac{{c}^{2}}{{b}^{2}}}{-b}} \]
    5. unpow296.7%

      \[\leadsto a \cdot \frac{\frac{c}{a} + \frac{\color{blue}{c \cdot c}}{{b}^{2}}}{-b} \]
    6. unpow296.7%

      \[\leadsto a \cdot \frac{\frac{c}{a} + \frac{c \cdot c}{\color{blue}{b \cdot b}}}{-b} \]
    7. times-frac96.7%

      \[\leadsto a \cdot \frac{\frac{c}{a} + \color{blue}{\frac{c}{b} \cdot \frac{c}{b}}}{-b} \]
    8. unpow296.7%

      \[\leadsto a \cdot \frac{\frac{c}{a} + \color{blue}{{\left(\frac{c}{b}\right)}^{2}}}{-b} \]
  13. Simplified96.7%

    \[\leadsto a \cdot \color{blue}{\frac{\frac{c}{a} + {\left(\frac{c}{b}\right)}^{2}}{-b}} \]
  14. Final simplification96.7%

    \[\leadsto a \cdot \frac{\frac{c}{a} + {\left(\frac{c}{b}\right)}^{2}}{-b} \]
  15. Add Preprocessing

Alternative 6: 95.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} - a \cdot \frac{c \cdot c}{{b}^{3}} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (- (/ c (- b)) (* a (/ (* c c) (pow b 3.0)))))
double code(double a, double b, double c) {
	return (c / -b) - (a * ((c * c) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (c / -b) - (a * ((c * c) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
	return (c / -b) - (a * ((c * c) / Math.pow(b, 3.0)));
}
def code(a, b, c):
	return (c / -b) - (a * ((c * c) / math.pow(b, 3.0)))
function code(a, b, c)
	return Float64(Float64(c / Float64(-b)) - Float64(a * Float64(Float64(c * c) / (b ^ 3.0))))
end
function tmp = code(a, b, c)
	tmp = (c / -b) - (a * ((c * c) / (b ^ 3.0)));
end
code[a_, b_, c_] := N[(N[(c / (-b)), $MachinePrecision] - N[(a * N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b} - a \cdot \frac{c \cdot c}{{b}^{3}}
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 97.1%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  6. Step-by-step derivation
    1. mul-1-neg97.1%

      \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    2. unsub-neg97.1%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    3. associate-*r/97.1%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    4. mul-1-neg97.1%

      \[\leadsto \frac{\color{blue}{-c}}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. associate-/l*97.1%

      \[\leadsto \frac{-c}{b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  7. Simplified97.1%

    \[\leadsto \color{blue}{\frac{-c}{b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  8. Step-by-step derivation
    1. unpow297.1%

      \[\leadsto \frac{-c}{b} - a \cdot \frac{\color{blue}{c \cdot c}}{{b}^{3}} \]
  9. Applied egg-rr97.1%

    \[\leadsto \frac{-c}{b} - a \cdot \frac{\color{blue}{c \cdot c}}{{b}^{3}} \]
  10. Final simplification97.1%

    \[\leadsto \frac{c}{-b} - a \cdot \frac{c \cdot c}{{b}^{3}} \]
  11. Add Preprocessing

Alternative 7: 94.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (* c (- (/ -1.0 b) (/ (* c a) (pow b 3.0)))))
double code(double a, double b, double c) {
	return c * ((-1.0 / b) - ((c * a) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * (((-1.0d0) / b) - ((c * a) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
	return c * ((-1.0 / b) - ((c * a) / Math.pow(b, 3.0)));
}
def code(a, b, c):
	return c * ((-1.0 / b) - ((c * a) / math.pow(b, 3.0)))
function code(a, b, c)
	return Float64(c * Float64(Float64(-1.0 / b) - Float64(Float64(c * a) / (b ^ 3.0))))
end
function tmp = code(a, b, c)
	tmp = c * ((-1.0 / b) - ((c * a) / (b ^ 3.0)));
end
code[a_, b_, c_] := N[(c * N[(N[(-1.0 / b), $MachinePrecision] - N[(N[(c * a), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right)
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in c around 0 96.8%

    \[\leadsto \color{blue}{c \cdot \left(-1 \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{b}\right)} \]
  6. Step-by-step derivation
    1. associate-*r/96.8%

      \[\leadsto c \cdot \left(\color{blue}{\frac{-1 \cdot \left(a \cdot c\right)}{{b}^{3}}} - \frac{1}{b}\right) \]
    2. neg-mul-196.8%

      \[\leadsto c \cdot \left(\frac{\color{blue}{-a \cdot c}}{{b}^{3}} - \frac{1}{b}\right) \]
    3. distribute-rgt-neg-in96.8%

      \[\leadsto c \cdot \left(\frac{\color{blue}{a \cdot \left(-c\right)}}{{b}^{3}} - \frac{1}{b}\right) \]
  7. Simplified96.8%

    \[\leadsto \color{blue}{c \cdot \left(\frac{a \cdot \left(-c\right)}{{b}^{3}} - \frac{1}{b}\right)} \]
  8. Final simplification96.8%

    \[\leadsto c \cdot \left(\frac{-1}{b} - \frac{c \cdot a}{{b}^{3}}\right) \]
  9. Add Preprocessing

Alternative 8: 90.5% accurate, 29.0× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
	return c / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c / -b
end function
public static double code(double a, double b, double c) {
	return c / -b;
}
def code(a, b, c):
	return c / -b
function code(a, b, c)
	return Float64(c / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = c / -b;
end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b}
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in b around inf 92.5%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  6. Step-by-step derivation
    1. associate-*r/92.5%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} \]
    2. mul-1-neg92.5%

      \[\leadsto \frac{\color{blue}{-c}}{b} \]
  7. Simplified92.5%

    \[\leadsto \color{blue}{\frac{-c}{b}} \]
  8. Final simplification92.5%

    \[\leadsto \frac{c}{-b} \]
  9. Add Preprocessing

Alternative 9: 3.3% accurate, 116.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (a b c) :precision binary64 0.0)
double code(double a, double b, double c) {
	return 0.0;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = 0.0d0
end function
public static double code(double a, double b, double c) {
	return 0.0;
}
def code(a, b, c):
	return 0.0
function code(a, b, c)
	return 0.0
end
function tmp = code(a, b, c)
	tmp = 0.0;
end
code[a_, b_, c_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 14.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative14.9%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified14.9%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 97.1%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  6. Step-by-step derivation
    1. mul-1-neg97.1%

      \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    2. unsub-neg97.1%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    3. associate-*r/97.1%

      \[\leadsto \color{blue}{\frac{-1 \cdot c}{b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    4. mul-1-neg97.1%

      \[\leadsto \frac{\color{blue}{-c}}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. associate-/l*97.1%

      \[\leadsto \frac{-c}{b} - \color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  7. Simplified97.1%

    \[\leadsto \color{blue}{\frac{-c}{b} - a \cdot \frac{{c}^{2}}{{b}^{3}}} \]
  8. Step-by-step derivation
    1. expm1-log1p-u84.6%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - a \cdot \frac{{c}^{2}}{{b}^{3}}\right)\right)} \]
    2. *-commutative84.6%

      \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - \color{blue}{\frac{{c}^{2}}{{b}^{3}} \cdot a}\right)\right) \]
    3. div-inv84.6%

      \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - \color{blue}{\left({c}^{2} \cdot \frac{1}{{b}^{3}}\right)} \cdot a\right)\right) \]
    4. pow-flip84.6%

      \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - \left({c}^{2} \cdot \color{blue}{{b}^{\left(-3\right)}}\right) \cdot a\right)\right) \]
    5. metadata-eval84.6%

      \[\leadsto \mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{\color{blue}{-3}}\right) \cdot a\right)\right) \]
  9. Applied egg-rr84.6%

    \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)} \]
  10. Step-by-step derivation
    1. expm1-undefine21.3%

      \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)} - 1} \]
    2. sub-neg21.3%

      \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)} + \left(-1\right)} \]
    3. log1p-undefine21.3%

      \[\leadsto e^{\color{blue}{\log \left(1 + \left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)}} + \left(-1\right) \]
    4. rem-exp-log33.9%

      \[\leadsto \color{blue}{\left(1 + \left(\frac{-c}{b} - \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)} + \left(-1\right) \]
    5. sub-neg33.9%

      \[\leadsto \left(1 + \color{blue}{\left(\frac{-c}{b} + \left(-\left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)}\right) + \left(-1\right) \]
    6. distribute-frac-neg33.9%

      \[\leadsto \left(1 + \left(\color{blue}{\left(-\frac{c}{b}\right)} + \left(-\left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)\right) + \left(-1\right) \]
    7. distribute-neg-out33.9%

      \[\leadsto \left(1 + \color{blue}{\left(-\left(\frac{c}{b} + \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)}\right) + \left(-1\right) \]
    8. unsub-neg33.9%

      \[\leadsto \color{blue}{\left(1 - \left(\frac{c}{b} + \left({c}^{2} \cdot {b}^{-3}\right) \cdot a\right)\right)} + \left(-1\right) \]
    9. *-commutative33.9%

      \[\leadsto \left(1 - \left(\frac{c}{b} + \color{blue}{a \cdot \left({c}^{2} \cdot {b}^{-3}\right)}\right)\right) + \left(-1\right) \]
    10. metadata-eval33.9%

      \[\leadsto \left(1 - \left(\frac{c}{b} + a \cdot \left({c}^{2} \cdot {b}^{-3}\right)\right)\right) + \color{blue}{-1} \]
  11. Simplified33.9%

    \[\leadsto \color{blue}{\left(1 - \left(\frac{c}{b} + a \cdot \left({c}^{2} \cdot {b}^{-3}\right)\right)\right) + -1} \]
  12. Taylor expanded in c around 0 31.8%

    \[\leadsto \left(1 - \color{blue}{\frac{c}{b}}\right) + -1 \]
  13. Taylor expanded in c around 0 3.3%

    \[\leadsto \color{blue}{1} + -1 \]
  14. Final simplification3.3%

    \[\leadsto 0 \]
  15. Add Preprocessing

Reproduce

?
herbie shell --seed 2024085 
(FPCore (a b c)
  :name "Quadratic roots, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))