Quadratic roots, wide range

Percentage Accurate: 18.3% → 97.6%
Time: 14.2s
Alternatives: 6
Speedup: 3.6×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 6 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 18.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.6% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := b \cdot \left(b \cdot b\right)\\ \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(20 \cdot a\right)}{t\_0 \cdot \left(b \cdot t\_0\right)}, -0.25, \frac{c \cdot \left(c \cdot \left(c \cdot -2\right)\right)}{\left(b \cdot b\right) \cdot t\_0}\right), a \cdot a, \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}\right) \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* b (* b b))))
   (fma
    (fma
     (/ (* (* c (* c (* c c))) (* 20.0 a)) (* t_0 (* b t_0)))
     -0.25
     (/ (* c (* c (* c -2.0))) (* (* b b) t_0)))
    (* a a)
    (/ (fma (* c c) (/ a (* b b)) c) (- b)))))
double code(double a, double b, double c) {
	double t_0 = b * (b * b);
	return fma(fma((((c * (c * (c * c))) * (20.0 * a)) / (t_0 * (b * t_0))), -0.25, ((c * (c * (c * -2.0))) / ((b * b) * t_0))), (a * a), (fma((c * c), (a / (b * b)), c) / -b));
}
function code(a, b, c)
	t_0 = Float64(b * Float64(b * b))
	return fma(fma(Float64(Float64(Float64(c * Float64(c * Float64(c * c))) * Float64(20.0 * a)) / Float64(t_0 * Float64(b * t_0))), -0.25, Float64(Float64(c * Float64(c * Float64(c * -2.0))) / Float64(Float64(b * b) * t_0))), Float64(a * a), Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b)))
end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(c * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(20.0 * a), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * N[(b * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.25 + N[(N[(c * N[(c * N[(c * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(b * b), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision] + N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(20 \cdot a\right)}{t\_0 \cdot \left(b \cdot t\_0\right)}, -0.25, \frac{c \cdot \left(c \cdot \left(c \cdot -2\right)\right)}{\left(b \cdot b\right) \cdot t\_0}\right), a \cdot a, \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}\right)
\end{array}
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{4} \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Applied rewrites98.4%

    \[\leadsto \color{blue}{\left(-\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}\right) + \left(a \cdot a\right) \cdot \mathsf{fma}\left(\frac{{c}^{4} \cdot 20}{{b}^{6}} \cdot \frac{a}{b}, -0.25, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -2}{{b}^{5}}\right)} \]
  5. Applied rewrites98.4%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(20 \cdot a\right)}{\left(b \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, -0.25, \frac{c \cdot \left(c \cdot \left(c \cdot -2\right)\right)}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right), a \cdot a, \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}\right)} \]
  6. Add Preprocessing

Alternative 2: 96.7% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)} - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (-
   (* (* -2.0 (* a a)) (/ (* c (* c c)) (* (* b b) (* b b))))
   (fma (* c c) (/ a (* b b)) c))
  b))
double code(double a, double b, double c) {
	return (((-2.0 * (a * a)) * ((c * (c * c)) / ((b * b) * (b * b)))) - fma((c * c), (a / (b * b)), c)) / b;
}
function code(a, b, c)
	return Float64(Float64(Float64(Float64(-2.0 * Float64(a * a)) * Float64(Float64(c * Float64(c * c)) / Float64(Float64(b * b) * Float64(b * b)))) - fma(Float64(c * c), Float64(a / Float64(b * b)), c)) / b)
end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(N[(b * b), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)} - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  5. Applied rewrites97.7%

    \[\leadsto \color{blue}{\frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)} - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}} \]
  6. Add Preprocessing

Alternative 3: 95.1% accurate, 1.1× speedup?

\[\begin{array}{l} \\ -\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (- (fma a (/ (* c c) (* b (* b b))) (/ c b))))
double code(double a, double b, double c) {
	return -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
function code(a, b, c)
	return Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b)))
end
code[a_, b_, c_] := (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}

\\
-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b} + a \cdot \left(-1 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-2 \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{4} \cdot \frac{a \cdot \left(4 \cdot \frac{{c}^{4}}{{b}^{6}} + 16 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Applied rewrites98.4%

    \[\leadsto \color{blue}{\left(-\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}\right) + \left(a \cdot a\right) \cdot \mathsf{fma}\left(\frac{{c}^{4} \cdot 20}{{b}^{6}} \cdot \frac{a}{b}, -0.25, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -2}{{b}^{5}}\right)} \]
  5. Taylor expanded in a around 0

    \[\leadsto \color{blue}{-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} - \frac{c}{b}} \]
  6. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \color{blue}{-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} + \left(\mathsf{neg}\left(\frac{c}{b}\right)\right)} \]
    2. mul-1-negN/A

      \[\leadsto \color{blue}{\left(\mathsf{neg}\left(\frac{a \cdot {c}^{2}}{{b}^{3}}\right)\right)} + \left(\mathsf{neg}\left(\frac{c}{b}\right)\right) \]
    3. distribute-neg-outN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\left(\frac{a \cdot {c}^{2}}{{b}^{3}} + \frac{c}{b}\right)\right)} \]
    4. +-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\left(\frac{c}{b} + \frac{a \cdot {c}^{2}}{{b}^{3}}\right)}\right) \]
    5. lower-neg.f64N/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\left(\frac{c}{b} + \frac{a \cdot {c}^{2}}{{b}^{3}}\right)\right)} \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\left(\frac{a \cdot {c}^{2}}{{b}^{3}} + \frac{c}{b}\right)}\right) \]
    7. associate-/l*N/A

      \[\leadsto \mathsf{neg}\left(\left(\color{blue}{a \cdot \frac{{c}^{2}}{{b}^{3}}} + \frac{c}{b}\right)\right) \]
    8. lower-fma.f64N/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\mathsf{fma}\left(a, \frac{{c}^{2}}{{b}^{3}}, \frac{c}{b}\right)}\right) \]
    9. lower-/.f64N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \color{blue}{\frac{{c}^{2}}{{b}^{3}}}, \frac{c}{b}\right)\right) \]
    10. unpow2N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{\color{blue}{c \cdot c}}{{b}^{3}}, \frac{c}{b}\right)\right) \]
    11. lower-*.f64N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{\color{blue}{c \cdot c}}{{b}^{3}}, \frac{c}{b}\right)\right) \]
    12. cube-multN/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{c \cdot c}{\color{blue}{b \cdot \left(b \cdot b\right)}}, \frac{c}{b}\right)\right) \]
    13. unpow2N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \color{blue}{{b}^{2}}}, \frac{c}{b}\right)\right) \]
    14. lower-*.f64N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{c \cdot c}{\color{blue}{b \cdot {b}^{2}}}, \frac{c}{b}\right)\right) \]
    15. unpow2N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \color{blue}{\left(b \cdot b\right)}}, \frac{c}{b}\right)\right) \]
    16. lower-*.f64N/A

      \[\leadsto \mathsf{neg}\left(\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \color{blue}{\left(b \cdot b\right)}}, \frac{c}{b}\right)\right) \]
    17. lower-/.f6496.1

      \[\leadsto -\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \color{blue}{\frac{c}{b}}\right) \]
  7. Applied rewrites96.1%

    \[\leadsto \color{blue}{-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)} \]
  8. Add Preprocessing

Alternative 4: 95.1% accurate, 1.2× speedup?

\[\begin{array}{l} \\ \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ (fma (* c c) (/ a (* b b)) c) (- b)))
double code(double a, double b, double c) {
	return fma((c * c), (a / (b * b)), c) / -b;
}
function code(a, b, c)
	return Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b))
end
code[a_, b_, c_] := N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{-1 \cdot c + -1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  4. Step-by-step derivation
    1. distribute-lft-outN/A

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(c + \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}}{b} \]
    2. associate-/l*N/A

      \[\leadsto \color{blue}{-1 \cdot \frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    3. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}\right)} \]
    4. lower-neg.f64N/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}\right)} \]
    5. lower-/.f64N/A

      \[\leadsto \mathsf{neg}\left(\color{blue}{\frac{c + \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}}\right) \]
    6. +-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\frac{\color{blue}{\frac{a \cdot {c}^{2}}{{b}^{2}} + c}}{b}\right) \]
    7. *-commutativeN/A

      \[\leadsto \mathsf{neg}\left(\frac{\frac{\color{blue}{{c}^{2} \cdot a}}{{b}^{2}} + c}{b}\right) \]
    8. associate-/l*N/A

      \[\leadsto \mathsf{neg}\left(\frac{\color{blue}{{c}^{2} \cdot \frac{a}{{b}^{2}}} + c}{b}\right) \]
    9. lower-fma.f64N/A

      \[\leadsto \mathsf{neg}\left(\frac{\color{blue}{\mathsf{fma}\left({c}^{2}, \frac{a}{{b}^{2}}, c\right)}}{b}\right) \]
    10. unpow2N/A

      \[\leadsto \mathsf{neg}\left(\frac{\mathsf{fma}\left(\color{blue}{c \cdot c}, \frac{a}{{b}^{2}}, c\right)}{b}\right) \]
    11. lower-*.f64N/A

      \[\leadsto \mathsf{neg}\left(\frac{\mathsf{fma}\left(\color{blue}{c \cdot c}, \frac{a}{{b}^{2}}, c\right)}{b}\right) \]
    12. lower-/.f64N/A

      \[\leadsto \mathsf{neg}\left(\frac{\mathsf{fma}\left(c \cdot c, \color{blue}{\frac{a}{{b}^{2}}}, c\right)}{b}\right) \]
    13. unpow2N/A

      \[\leadsto \mathsf{neg}\left(\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{\color{blue}{b \cdot b}}, c\right)}{b}\right) \]
    14. lower-*.f6496.1

      \[\leadsto -\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{\color{blue}{b \cdot b}}, c\right)}{b} \]
  5. Applied rewrites96.1%

    \[\leadsto \color{blue}{-\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}} \]
  6. Final simplification96.1%

    \[\leadsto \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b} \]
  7. Add Preprocessing

Alternative 5: 94.9% accurate, 1.4× speedup?

\[\begin{array}{l} \\ \frac{1}{\frac{a}{b} - \frac{b}{c}} \end{array} \]
(FPCore (a b c) :precision binary64 (/ 1.0 (- (/ a b) (/ b c))))
double code(double a, double b, double c) {
	return 1.0 / ((a / b) - (b / c));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = 1.0d0 / ((a / b) - (b / c))
end function
public static double code(double a, double b, double c) {
	return 1.0 / ((a / b) - (b / c));
}
def code(a, b, c):
	return 1.0 / ((a / b) - (b / c))
function code(a, b, c)
	return Float64(1.0 / Float64(Float64(a / b) - Float64(b / c)))
end
function tmp = code(a, b, c)
	tmp = 1.0 / ((a / b) - (b / c));
end
code[a_, b_, c_] := N[(1.0 / N[(N[(a / b), $MachinePrecision] - N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{\frac{a}{b} - \frac{b}{c}}
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in c around 0

    \[\leadsto \frac{\color{blue}{c \cdot \left(-2 \cdot \frac{a}{b} + -2 \cdot \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}}{2 \cdot a} \]
  4. Step-by-step derivation
    1. distribute-lft-outN/A

      \[\leadsto \frac{c \cdot \color{blue}{\left(-2 \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)\right)}}{2 \cdot a} \]
    2. associate-*r*N/A

      \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right) \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}}{2 \cdot a} \]
    3. *-commutativeN/A

      \[\leadsto \frac{\color{blue}{\left(-2 \cdot c\right)} \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}{2 \cdot a} \]
    4. lower-*.f64N/A

      \[\leadsto \frac{\color{blue}{\left(-2 \cdot c\right) \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}}{2 \cdot a} \]
    5. *-commutativeN/A

      \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right)} \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}{2 \cdot a} \]
    6. lower-*.f64N/A

      \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right)} \cdot \left(\frac{a}{b} + \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}{2 \cdot a} \]
    7. +-commutativeN/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \color{blue}{\left(\frac{{a}^{2} \cdot c}{{b}^{3}} + \frac{a}{b}\right)}}{2 \cdot a} \]
    8. associate-/l*N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(\color{blue}{{a}^{2} \cdot \frac{c}{{b}^{3}}} + \frac{a}{b}\right)}{2 \cdot a} \]
    9. unpow2N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(\color{blue}{\left(a \cdot a\right)} \cdot \frac{c}{{b}^{3}} + \frac{a}{b}\right)}{2 \cdot a} \]
    10. associate-*l*N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(\color{blue}{a \cdot \left(a \cdot \frac{c}{{b}^{3}}\right)} + \frac{a}{b}\right)}{2 \cdot a} \]
    11. associate-/l*N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \color{blue}{\frac{a \cdot c}{{b}^{3}}} + \frac{a}{b}\right)}{2 \cdot a} \]
    12. lower-fma.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \color{blue}{\mathsf{fma}\left(a, \frac{a \cdot c}{{b}^{3}}, \frac{a}{b}\right)}}{2 \cdot a} \]
    13. lower-/.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \color{blue}{\frac{a \cdot c}{{b}^{3}}}, \frac{a}{b}\right)}{2 \cdot a} \]
    14. lower-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{\color{blue}{a \cdot c}}{{b}^{3}}, \frac{a}{b}\right)}{2 \cdot a} \]
    15. cube-multN/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{\color{blue}{b \cdot \left(b \cdot b\right)}}, \frac{a}{b}\right)}{2 \cdot a} \]
    16. unpow2N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \color{blue}{{b}^{2}}}, \frac{a}{b}\right)}{2 \cdot a} \]
    17. lower-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{\color{blue}{b \cdot {b}^{2}}}, \frac{a}{b}\right)}{2 \cdot a} \]
    18. unpow2N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \color{blue}{\left(b \cdot b\right)}}, \frac{a}{b}\right)}{2 \cdot a} \]
    19. lower-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \color{blue}{\left(b \cdot b\right)}}, \frac{a}{b}\right)}{2 \cdot a} \]
    20. lower-/.f6495.8

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \color{blue}{\frac{a}{b}}\right)}{2 \cdot a} \]
  5. Applied rewrites95.8%

    \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}}{2 \cdot a} \]
  6. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right)} \cdot \left(a \cdot \frac{a \cdot c}{b \cdot \left(b \cdot b\right)} + \frac{a}{b}\right)}{2 \cdot a} \]
    2. lift-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \frac{\color{blue}{a \cdot c}}{b \cdot \left(b \cdot b\right)} + \frac{a}{b}\right)}{2 \cdot a} \]
    3. lift-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \frac{a \cdot c}{b \cdot \color{blue}{\left(b \cdot b\right)}} + \frac{a}{b}\right)}{2 \cdot a} \]
    4. lift-*.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \frac{a \cdot c}{\color{blue}{b \cdot \left(b \cdot b\right)}} + \frac{a}{b}\right)}{2 \cdot a} \]
    5. lift-/.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \color{blue}{\frac{a \cdot c}{b \cdot \left(b \cdot b\right)}} + \frac{a}{b}\right)}{2 \cdot a} \]
    6. lift-/.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \left(a \cdot \frac{a \cdot c}{b \cdot \left(b \cdot b\right)} + \color{blue}{\frac{a}{b}}\right)}{2 \cdot a} \]
    7. lift-fma.f64N/A

      \[\leadsto \frac{\left(c \cdot -2\right) \cdot \color{blue}{\mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}}{2 \cdot a} \]
    8. lift-*.f64N/A

      \[\leadsto \frac{\color{blue}{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}}{2 \cdot a} \]
    9. associate-/r*N/A

      \[\leadsto \color{blue}{\frac{\frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}{2}}{a}} \]
    10. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{a}{\frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}{2}}}} \]
    11. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{a}{\frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}{2}}}} \]
    12. lower-/.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{a}{\frac{\left(c \cdot -2\right) \cdot \mathsf{fma}\left(a, \frac{a \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right)}{2}}}} \]
  7. Applied rewrites95.6%

    \[\leadsto \color{blue}{\frac{1}{\frac{a}{\mathsf{fma}\left(a, \frac{c \cdot a}{b \cdot \left(b \cdot b\right)}, \frac{a}{b}\right) \cdot \left(c \cdot -1\right)}}} \]
  8. Taylor expanded in a around 0

    \[\leadsto \frac{1}{\color{blue}{-1 \cdot \frac{b}{c} + \frac{a}{b}}} \]
  9. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \frac{1}{\color{blue}{\frac{a}{b} + -1 \cdot \frac{b}{c}}} \]
    2. mul-1-negN/A

      \[\leadsto \frac{1}{\frac{a}{b} + \color{blue}{\left(\mathsf{neg}\left(\frac{b}{c}\right)\right)}} \]
    3. unsub-negN/A

      \[\leadsto \frac{1}{\color{blue}{\frac{a}{b} - \frac{b}{c}}} \]
    4. lower--.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{a}{b} - \frac{b}{c}}} \]
    5. lower-/.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{a}{b}} - \frac{b}{c}} \]
    6. lower-/.f6495.9

      \[\leadsto \frac{1}{\frac{a}{b} - \color{blue}{\frac{b}{c}}} \]
  10. Applied rewrites95.9%

    \[\leadsto \frac{1}{\color{blue}{\frac{a}{b} - \frac{b}{c}}} \]
  11. Add Preprocessing

Alternative 6: 90.0% accurate, 3.6× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
	return c / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c / -b
end function
public static double code(double a, double b, double c) {
	return c / -b;
}
def code(a, b, c):
	return c / -b
function code(a, b, c)
	return Float64(c / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = c / -b;
end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b}
\end{array}
Derivation
  1. Initial program 17.7%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\frac{c}{b}\right)} \]
    2. distribute-neg-frac2N/A

      \[\leadsto \color{blue}{\frac{c}{\mathsf{neg}\left(b\right)}} \]
    3. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{c}{\mathsf{neg}\left(b\right)}} \]
    4. lower-neg.f6490.8

      \[\leadsto \frac{c}{\color{blue}{-b}} \]
  5. Applied rewrites90.8%

    \[\leadsto \color{blue}{\frac{c}{-b}} \]
  6. Add Preprocessing

Reproduce

?
herbie shell --seed 2024216 
(FPCore (a b c)
  :name "Quadratic roots, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))