Quadratic roots, wide range

Percentage Accurate: 18.2% → 97.6%
Time: 12.0s
Alternatives: 5
Speedup: 29.0×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 18.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.6% accurate, 0.1× speedup?

\[\begin{array}{l} \\ -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(\left(-0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 4 \cdot {\left(a \cdot c\right)}^{4}}{a \cdot {b}^{7}} - \frac{a \cdot {c}^{2}}{{b}^{3}}\right) - \frac{c}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (+
  (* -2.0 (/ (* (pow a 2.0) (pow c 3.0)) (pow b 5.0)))
  (-
   (-
    (*
     -0.25
     (/
      (+ (* 16.0 (* (pow a 4.0) (pow c 4.0))) (* 4.0 (pow (* a c) 4.0)))
      (* a (pow b 7.0))))
    (/ (* a (pow c 2.0)) (pow b 3.0)))
   (/ c b))))
double code(double a, double b, double c) {
	return (-2.0 * ((pow(a, 2.0) * pow(c, 3.0)) / pow(b, 5.0))) + (((-0.25 * (((16.0 * (pow(a, 4.0) * pow(c, 4.0))) + (4.0 * pow((a * c), 4.0))) / (a * pow(b, 7.0)))) - ((a * pow(c, 2.0)) / pow(b, 3.0))) - (c / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((-2.0d0) * (((a ** 2.0d0) * (c ** 3.0d0)) / (b ** 5.0d0))) + ((((-0.25d0) * (((16.0d0 * ((a ** 4.0d0) * (c ** 4.0d0))) + (4.0d0 * ((a * c) ** 4.0d0))) / (a * (b ** 7.0d0)))) - ((a * (c ** 2.0d0)) / (b ** 3.0d0))) - (c / b))
end function
public static double code(double a, double b, double c) {
	return (-2.0 * ((Math.pow(a, 2.0) * Math.pow(c, 3.0)) / Math.pow(b, 5.0))) + (((-0.25 * (((16.0 * (Math.pow(a, 4.0) * Math.pow(c, 4.0))) + (4.0 * Math.pow((a * c), 4.0))) / (a * Math.pow(b, 7.0)))) - ((a * Math.pow(c, 2.0)) / Math.pow(b, 3.0))) - (c / b));
}
def code(a, b, c):
	return (-2.0 * ((math.pow(a, 2.0) * math.pow(c, 3.0)) / math.pow(b, 5.0))) + (((-0.25 * (((16.0 * (math.pow(a, 4.0) * math.pow(c, 4.0))) + (4.0 * math.pow((a * c), 4.0))) / (a * math.pow(b, 7.0)))) - ((a * math.pow(c, 2.0)) / math.pow(b, 3.0))) - (c / b))
function code(a, b, c)
	return Float64(Float64(-2.0 * Float64(Float64((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + Float64(Float64(Float64(-0.25 * Float64(Float64(Float64(16.0 * Float64((a ^ 4.0) * (c ^ 4.0))) + Float64(4.0 * (Float64(a * c) ^ 4.0))) / Float64(a * (b ^ 7.0)))) - Float64(Float64(a * (c ^ 2.0)) / (b ^ 3.0))) - Float64(c / b)))
end
function tmp = code(a, b, c)
	tmp = (-2.0 * (((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + (((-0.25 * (((16.0 * ((a ^ 4.0) * (c ^ 4.0))) + (4.0 * ((a * c) ^ 4.0))) / (a * (b ^ 7.0)))) - ((a * (c ^ 2.0)) / (b ^ 3.0))) - (c / b));
end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-0.25 * N[(N[(N[(16.0 * N[(N[Power[a, 4.0], $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(4.0 * N[Power[N[(a * c), $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(\left(-0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 4 \cdot {\left(a \cdot c\right)}^{4}}{a \cdot {b}^{7}} - \frac{a \cdot {c}^{2}}{{b}^{3}}\right) - \frac{c}{b}\right)
\end{array}
Derivation
  1. Initial program 15.5%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.5%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.5%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Taylor expanded in b around inf 98.0%

    \[\leadsto \color{blue}{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\left(-2 \cdot \left({a}^{2} \cdot {c}^{2}\right)\right)}^{2}}{a \cdot {b}^{7}}\right)\right)} \]
  5. Step-by-step derivation
    1. *-commutative98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\color{blue}{\left(\left({a}^{2} \cdot {c}^{2}\right) \cdot -2\right)}}^{2}}{a \cdot {b}^{7}}\right)\right) \]
    2. unpow-prod-down98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + \color{blue}{{\left({a}^{2} \cdot {c}^{2}\right)}^{2} \cdot {-2}^{2}}}{a \cdot {b}^{7}}\right)\right) \]
    3. pow-prod-down98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\color{blue}{\left({\left(a \cdot c\right)}^{2}\right)}}^{2} \cdot {-2}^{2}}{a \cdot {b}^{7}}\right)\right) \]
    4. pow-pow98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + \color{blue}{{\left(a \cdot c\right)}^{\left(2 \cdot 2\right)}} \cdot {-2}^{2}}{a \cdot {b}^{7}}\right)\right) \]
    5. metadata-eval98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\left(a \cdot c\right)}^{\color{blue}{4}} \cdot {-2}^{2}}{a \cdot {b}^{7}}\right)\right) \]
    6. metadata-eval98.0%

      \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\left(a \cdot c\right)}^{4} \cdot \color{blue}{4}}{a \cdot {b}^{7}}\right)\right) \]
  6. Applied egg-rr98.0%

    \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + -0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + \color{blue}{{\left(a \cdot c\right)}^{4} \cdot 4}}{a \cdot {b}^{7}}\right)\right) \]
  7. Final simplification98.0%

    \[\leadsto -2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(\left(-0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 4 \cdot {\left(a \cdot c\right)}^{4}}{a \cdot {b}^{7}} - \frac{a \cdot {c}^{2}}{{b}^{3}}\right) - \frac{c}{b}\right) \]

Alternative 2: 97.1% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, c \cdot \frac{a}{b} + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\frac{\frac{{b}^{7}}{{\left(a \cdot c\right)}^{4}}}{20}}\right)\right)}{a \cdot 2} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (fma
   -4.0
   (/ (pow (* a c) 3.0) (pow b 5.0))
   (fma
    -2.0
    (+ (* c (/ a b)) (/ (pow a 2.0) (/ (pow b 3.0) (pow c 2.0))))
    (/ -0.5 (/ (/ (pow b 7.0) (pow (* a c) 4.0)) 20.0))))
  (* a 2.0)))
double code(double a, double b, double c) {
	return fma(-4.0, (pow((a * c), 3.0) / pow(b, 5.0)), fma(-2.0, ((c * (a / b)) + (pow(a, 2.0) / (pow(b, 3.0) / pow(c, 2.0)))), (-0.5 / ((pow(b, 7.0) / pow((a * c), 4.0)) / 20.0)))) / (a * 2.0);
}
function code(a, b, c)
	return Float64(fma(-4.0, Float64((Float64(a * c) ^ 3.0) / (b ^ 5.0)), fma(-2.0, Float64(Float64(c * Float64(a / b)) + Float64((a ^ 2.0) / Float64((b ^ 3.0) / (c ^ 2.0)))), Float64(-0.5 / Float64(Float64((b ^ 7.0) / (Float64(a * c) ^ 4.0)) / 20.0)))) / Float64(a * 2.0))
end
code[a_, b_, c_] := N[(N[(-4.0 * N[(N[Power[N[(a * c), $MachinePrecision], 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision] + N[(N[Power[a, 2.0], $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 / N[(N[(N[Power[b, 7.0], $MachinePrecision] / N[Power[N[(a * c), $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision] / 20.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, c \cdot \frac{a}{b} + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\frac{\frac{{b}^{7}}{{\left(a \cdot c\right)}^{4}}}{20}}\right)\right)}{a \cdot 2}
\end{array}
Derivation
  1. Initial program 15.5%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.5%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.5%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Taylor expanded in b around inf 97.6%

    \[\leadsto \frac{\color{blue}{-4 \cdot \frac{{a}^{3} \cdot {c}^{3}}{{b}^{5}} + \left(-2 \cdot \frac{a \cdot c}{b} + \left(-2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{3}} + -0.5 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\left(-2 \cdot \left({a}^{2} \cdot {c}^{2}\right)\right)}^{2}}{{b}^{7}}\right)\right)}}{a \cdot 2} \]
  5. Simplified97.6%

    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, \frac{a}{b} \cdot c + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\frac{{b}^{7}}{\mathsf{fma}\left(16, {a}^{4} \cdot {c}^{4}, 4 \cdot \left({a}^{4} \cdot {c}^{4}\right)\right)}}\right)\right)}}{a \cdot 2} \]
  6. Taylor expanded in b around 0 97.6%

    \[\leadsto \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, \frac{a}{b} \cdot c + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\color{blue}{\frac{{b}^{7}}{4 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 16 \cdot \left({a}^{4} \cdot {c}^{4}\right)}}}\right)\right)}{a \cdot 2} \]
  7. Step-by-step derivation
    1. distribute-rgt-out97.6%

      \[\leadsto \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, \frac{a}{b} \cdot c + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\frac{{b}^{7}}{\color{blue}{\left({a}^{4} \cdot {c}^{4}\right) \cdot \left(4 + 16\right)}}}\right)\right)}{a \cdot 2} \]
    2. associate-/r*97.6%

      \[\leadsto \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, \frac{a}{b} \cdot c + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\color{blue}{\frac{\frac{{b}^{7}}{{a}^{4} \cdot {c}^{4}}}{4 + 16}}}\right)\right)}{a \cdot 2} \]
  8. Simplified97.6%

    \[\leadsto \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, \frac{a}{b} \cdot c + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\color{blue}{\frac{\frac{{b}^{7}}{{\left(a \cdot c\right)}^{4}}}{20}}}\right)\right)}{a \cdot 2} \]
  9. Final simplification97.6%

    \[\leadsto \frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, \mathsf{fma}\left(-2, c \cdot \frac{a}{b} + \frac{{a}^{2}}{\frac{{b}^{3}}{{c}^{2}}}, \frac{-0.5}{\frac{\frac{{b}^{7}}{{\left(a \cdot c\right)}^{4}}}{20}}\right)\right)}{a \cdot 2} \]

Alternative 3: 96.8% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \left(\frac{-2 \cdot \left({a}^{2} \cdot {c}^{3}\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (-
  (- (/ (* -2.0 (* (pow a 2.0) (pow c 3.0))) (pow b 5.0)) (/ c b))
  (/ a (* (/ (pow b 2.0) c) (/ b c)))))
double code(double a, double b, double c) {
	return (((-2.0 * (pow(a, 2.0) * pow(c, 3.0))) / pow(b, 5.0)) - (c / b)) - (a / ((pow(b, 2.0) / c) * (b / c)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((((-2.0d0) * ((a ** 2.0d0) * (c ** 3.0d0))) / (b ** 5.0d0)) - (c / b)) - (a / (((b ** 2.0d0) / c) * (b / c)))
end function
public static double code(double a, double b, double c) {
	return (((-2.0 * (Math.pow(a, 2.0) * Math.pow(c, 3.0))) / Math.pow(b, 5.0)) - (c / b)) - (a / ((Math.pow(b, 2.0) / c) * (b / c)));
}
def code(a, b, c):
	return (((-2.0 * (math.pow(a, 2.0) * math.pow(c, 3.0))) / math.pow(b, 5.0)) - (c / b)) - (a / ((math.pow(b, 2.0) / c) * (b / c)))
function code(a, b, c)
	return Float64(Float64(Float64(Float64(-2.0 * Float64((a ^ 2.0) * (c ^ 3.0))) / (b ^ 5.0)) - Float64(c / b)) - Float64(a / Float64(Float64((b ^ 2.0) / c) * Float64(b / c))))
end
function tmp = code(a, b, c)
	tmp = (((-2.0 * ((a ^ 2.0) * (c ^ 3.0))) / (b ^ 5.0)) - (c / b)) - (a / (((b ^ 2.0) / c) * (b / c)));
end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(a / N[(N[(N[Power[b, 2.0], $MachinePrecision] / c), $MachinePrecision] * N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(\frac{-2 \cdot \left({a}^{2} \cdot {c}^{3}\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}}
\end{array}
Derivation
  1. Initial program 15.5%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.5%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.5%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Taylor expanded in b around inf 97.4%

    \[\leadsto \color{blue}{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
  5. Step-by-step derivation
    1. associate-+r+97.4%

      \[\leadsto \color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{c}{b}\right) + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    2. mul-1-neg97.4%

      \[\leadsto \left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{c}{b}\right) + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    3. unsub-neg97.4%

      \[\leadsto \color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + -1 \cdot \frac{c}{b}\right) - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    4. mul-1-neg97.4%

      \[\leadsto \left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \color{blue}{\left(-\frac{c}{b}\right)}\right) - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. unsub-neg97.4%

      \[\leadsto \color{blue}{\left(-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} - \frac{c}{b}\right)} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    6. associate-*r/97.4%

      \[\leadsto \left(\color{blue}{\frac{-2 \cdot \left({a}^{2} \cdot {c}^{3}\right)}{{b}^{5}}} - \frac{c}{b}\right) - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    7. *-commutative97.4%

      \[\leadsto \left(\frac{-2 \cdot \color{blue}{\left({c}^{3} \cdot {a}^{2}\right)}}{{b}^{5}} - \frac{c}{b}\right) - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    8. associate-/l*97.4%

      \[\leadsto \left(\frac{-2 \cdot \left({c}^{3} \cdot {a}^{2}\right)}{{b}^{5}} - \frac{c}{b}\right) - \color{blue}{\frac{a}{\frac{{b}^{3}}{{c}^{2}}}} \]
  6. Simplified97.4%

    \[\leadsto \color{blue}{\left(\frac{-2 \cdot \left({c}^{3} \cdot {a}^{2}\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{a}{\frac{{b}^{3}}{{c}^{2}}}} \]
  7. Step-by-step derivation
    1. unpow396.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\color{blue}{\left(b \cdot b\right) \cdot b}}{{c}^{2}}} \]
    2. unpow296.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\left(b \cdot b\right) \cdot b}{\color{blue}{c \cdot c}}} \]
    3. times-frac96.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\color{blue}{\frac{b \cdot b}{c} \cdot \frac{b}{c}}} \]
    4. pow296.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\color{blue}{{b}^{2}}}{c} \cdot \frac{b}{c}} \]
  8. Applied egg-rr97.4%

    \[\leadsto \left(\frac{-2 \cdot \left({c}^{3} \cdot {a}^{2}\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{a}{\color{blue}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}}} \]
  9. Final simplification97.4%

    \[\leadsto \left(\frac{-2 \cdot \left({a}^{2} \cdot {c}^{3}\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}} \]

Alternative 4: 95.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{-c}{b} - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (- (/ (- c) b) (/ a (* (/ (pow b 2.0) c) (/ b c)))))
double code(double a, double b, double c) {
	return (-c / b) - (a / ((pow(b, 2.0) / c) * (b / c)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-c / b) - (a / (((b ** 2.0d0) / c) * (b / c)))
end function
public static double code(double a, double b, double c) {
	return (-c / b) - (a / ((Math.pow(b, 2.0) / c) * (b / c)));
}
def code(a, b, c):
	return (-c / b) - (a / ((math.pow(b, 2.0) / c) * (b / c)))
function code(a, b, c)
	return Float64(Float64(Float64(-c) / b) - Float64(a / Float64(Float64((b ^ 2.0) / c) * Float64(b / c))))
end
function tmp = code(a, b, c)
	tmp = (-c / b) - (a / (((b ^ 2.0) / c) * (b / c)));
end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(a / N[(N[(N[Power[b, 2.0], $MachinePrecision] / c), $MachinePrecision] * N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{-c}{b} - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}}
\end{array}
Derivation
  1. Initial program 15.5%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.5%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.5%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Taylor expanded in b around inf 96.1%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  5. Step-by-step derivation
    1. mul-1-neg96.1%

      \[\leadsto -1 \cdot \frac{c}{b} + \color{blue}{\left(-\frac{a \cdot {c}^{2}}{{b}^{3}}\right)} \]
    2. unsub-neg96.1%

      \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} - \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
    3. mul-1-neg96.1%

      \[\leadsto \color{blue}{\left(-\frac{c}{b}\right)} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    4. distribute-neg-frac96.1%

      \[\leadsto \color{blue}{\frac{-c}{b}} - \frac{a \cdot {c}^{2}}{{b}^{3}} \]
    5. associate-/l*96.1%

      \[\leadsto \frac{-c}{b} - \color{blue}{\frac{a}{\frac{{b}^{3}}{{c}^{2}}}} \]
  6. Simplified96.1%

    \[\leadsto \color{blue}{\frac{-c}{b} - \frac{a}{\frac{{b}^{3}}{{c}^{2}}}} \]
  7. Step-by-step derivation
    1. unpow396.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\color{blue}{\left(b \cdot b\right) \cdot b}}{{c}^{2}}} \]
    2. unpow296.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\left(b \cdot b\right) \cdot b}{\color{blue}{c \cdot c}}} \]
    3. times-frac96.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\color{blue}{\frac{b \cdot b}{c} \cdot \frac{b}{c}}} \]
    4. pow296.1%

      \[\leadsto \frac{-c}{b} - \frac{a}{\frac{\color{blue}{{b}^{2}}}{c} \cdot \frac{b}{c}} \]
  8. Applied egg-rr96.1%

    \[\leadsto \frac{-c}{b} - \frac{a}{\color{blue}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}}} \]
  9. Final simplification96.1%

    \[\leadsto \frac{-c}{b} - \frac{a}{\frac{{b}^{2}}{c} \cdot \frac{b}{c}} \]

Alternative 5: 90.1% accurate, 29.0× speedup?

\[\begin{array}{l} \\ \frac{-c}{b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
	return -c / b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = -c / b
end function
public static double code(double a, double b, double c) {
	return -c / b;
}
def code(a, b, c):
	return -c / b
function code(a, b, c)
	return Float64(Float64(-c) / b)
end
function tmp = code(a, b, c)
	tmp = -c / b;
end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{-c}{b}
\end{array}
Derivation
  1. Initial program 15.5%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Step-by-step derivation
    1. *-commutative15.5%

      \[\leadsto \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{\color{blue}{a \cdot 2}} \]
  3. Simplified15.5%

    \[\leadsto \color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{a \cdot 2}} \]
  4. Taylor expanded in b around inf 91.9%

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  5. Step-by-step derivation
    1. mul-1-neg91.9%

      \[\leadsto \color{blue}{-\frac{c}{b}} \]
    2. distribute-neg-frac91.9%

      \[\leadsto \color{blue}{\frac{-c}{b}} \]
  6. Simplified91.9%

    \[\leadsto \color{blue}{\frac{-c}{b}} \]
  7. Final simplification91.9%

    \[\leadsto \frac{-c}{b} \]

Reproduce

?
herbie shell --seed 2023334 
(FPCore (a b c)
  :name "Quadratic roots, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))