Quadratic roots, wide range

Percentage Accurate: 17.8% → 97.7%
Time: 14.9s
Alternatives: 5
Speedup: 3.6×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 5 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}

Alternative 1: 97.7% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := a \cdot \left(c \cdot a\right)\\ \frac{\mathsf{fma}\left(\frac{\left(c \cdot t\_0\right) \cdot \left(t\_0 \cdot -5\right)}{b \cdot \left(\left(a \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, c, \frac{\left(c \cdot c\right) \cdot \left(\frac{c \cdot \left(a \cdot \left(a \cdot -2\right)\right)}{b \cdot b} - a\right)}{b \cdot b}\right) - c}{b} \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* a (* c a))))
   (/
    (-
     (fma
      (/ (* (* c t_0) (* t_0 -5.0)) (* b (* (* a (* b b)) (* b (* b b)))))
      c
      (/ (* (* c c) (- (/ (* c (* a (* a -2.0))) (* b b)) a)) (* b b)))
     c)
    b)))
double code(double a, double b, double c) {
	double t_0 = a * (c * a);
	return (fma((((c * t_0) * (t_0 * -5.0)) / (b * ((a * (b * b)) * (b * (b * b))))), c, (((c * c) * (((c * (a * (a * -2.0))) / (b * b)) - a)) / (b * b))) - c) / b;
}
function code(a, b, c)
	t_0 = Float64(a * Float64(c * a))
	return Float64(Float64(fma(Float64(Float64(Float64(c * t_0) * Float64(t_0 * -5.0)) / Float64(b * Float64(Float64(a * Float64(b * b)) * Float64(b * Float64(b * b))))), c, Float64(Float64(Float64(c * c) * Float64(Float64(Float64(c * Float64(a * Float64(a * -2.0))) / Float64(b * b)) - a)) / Float64(b * b))) - c) / b)
end
code[a_, b_, c_] := Block[{t$95$0 = N[(a * N[(c * a), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(c * t$95$0), $MachinePrecision] * N[(t$95$0 * -5.0), $MachinePrecision]), $MachinePrecision] / N[(b * N[(N[(a * N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * c + N[(N[(N[(c * c), $MachinePrecision] * N[(N[(N[(c * N[(a * N[(a * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - a), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := a \cdot \left(c \cdot a\right)\\
\frac{\mathsf{fma}\left(\frac{\left(c \cdot t\_0\right) \cdot \left(t\_0 \cdot -5\right)}{b \cdot \left(\left(a \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, c, \frac{\left(c \cdot c\right) \cdot \left(\frac{c \cdot \left(a \cdot \left(a \cdot -2\right)\right)}{b \cdot b} - a\right)}{b \cdot b}\right) - c}{b}
\end{array}
\end{array}
Derivation
  1. Initial program 17.8%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}} + \frac{-1}{4} \cdot \frac{4 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 16 \cdot \left({a}^{4} \cdot {c}^{4}\right)}{a \cdot {b}^{6}}\right)\right)}{b}} \]
  4. Simplified98.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(-2, \frac{c \cdot \left(a \cdot \left(a \cdot \left(c \cdot c\right)\right)\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, -0.25 \cdot \left(\left({a}^{4} \cdot {c}^{4}\right) \cdot \frac{20}{a \cdot {b}^{6}}\right) - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)\right)}{b}} \]
  5. Applied egg-rr98.0%

    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\left(c \cdot c\right) \cdot \left(\left(c \cdot c\right) \cdot \left(\left(a \cdot a\right) \cdot \left(a \cdot a\right)\right)\right), \frac{20}{a \cdot \left(\left(b \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)} \cdot -0.25, \frac{\left(-2 \cdot \left(c \cdot a\right)\right) \cdot \left(c \cdot \left(c \cdot a\right)\right)}{b \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right) - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}}{b} \]
  6. Applied egg-rr98.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(c, \left(\left(\left(c \cdot \left(c \cdot a\right)\right) \cdot a\right) \cdot \left(c \cdot \left(a \cdot a\right)\right)\right) \cdot \frac{-5}{b \cdot \left(\left(b \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot a\right)}, \frac{\frac{\left(c \cdot c\right) \cdot \left(a \cdot \left(c \cdot \left(a \cdot -2\right)\right)\right)}{b \cdot b} - c \cdot \left(c \cdot a\right)}{b \cdot b}\right) - c}{b}} \]
  7. Applied egg-rr98.0%

    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\frac{\left(c \cdot \left(a \cdot \left(c \cdot a\right)\right)\right) \cdot \left(\left(a \cdot \left(c \cdot a\right)\right) \cdot -5\right)}{b \cdot \left(\left(a \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, c, \frac{\left(c \cdot c\right) \cdot \left(\frac{c \cdot \left(a \cdot \left(a \cdot -2\right)\right)}{b \cdot b} - a\right)}{b \cdot b}\right)} - c}{b} \]
  8. Add Preprocessing

Alternative 2: 97.0% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, -a \cdot \left(c \cdot c\right)\right)}{b \cdot b} - c}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (-
   (/
    (fma -2.0 (/ (* (* a a) (* c (* c c))) (* b b)) (- (* a (* c c))))
    (* b b))
   c)
  b))
double code(double a, double b, double c) {
	return ((fma(-2.0, (((a * a) * (c * (c * c))) / (b * b)), -(a * (c * c))) / (b * b)) - c) / b;
}
function code(a, b, c)
	return Float64(Float64(Float64(fma(-2.0, Float64(Float64(Float64(a * a) * Float64(c * Float64(c * c))) / Float64(b * b)), Float64(-Float64(a * Float64(c * c)))) / Float64(b * b)) - c) / b)
end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[(N[(N[(a * a), $MachinePrecision] * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + (-N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision])), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, -a \cdot \left(c \cdot c\right)\right)}{b \cdot b} - c}{b}
\end{array}
Derivation
  1. Initial program 17.8%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(-1 \cdot c + \left(-1 \cdot \frac{a \cdot {c}^{2}}{{b}^{2}} + \frac{-1}{4} \cdot \frac{4 \cdot \left({a}^{4} \cdot {c}^{4}\right) + 16 \cdot \left({a}^{4} \cdot {c}^{4}\right)}{a \cdot {b}^{6}}\right)\right)}{b}} \]
  4. Simplified98.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(-2, \frac{c \cdot \left(a \cdot \left(a \cdot \left(c \cdot c\right)\right)\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, -0.25 \cdot \left(\left({a}^{4} \cdot {c}^{4}\right) \cdot \frac{20}{a \cdot {b}^{6}}\right) - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)\right)}{b}} \]
  5. Applied egg-rr98.0%

    \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(\left(c \cdot c\right) \cdot \left(\left(c \cdot c\right) \cdot \left(\left(a \cdot a\right) \cdot \left(a \cdot a\right)\right)\right), \frac{20}{a \cdot \left(\left(b \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)} \cdot -0.25, \frac{\left(-2 \cdot \left(c \cdot a\right)\right) \cdot \left(c \cdot \left(c \cdot a\right)\right)}{b \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right) - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}}{b} \]
  6. Applied egg-rr98.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(c, \left(\left(\left(c \cdot \left(c \cdot a\right)\right) \cdot a\right) \cdot \left(c \cdot \left(a \cdot a\right)\right)\right) \cdot \frac{-5}{b \cdot \left(\left(b \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot a\right)}, \frac{\frac{\left(c \cdot c\right) \cdot \left(a \cdot \left(c \cdot \left(a \cdot -2\right)\right)\right)}{b \cdot b} - c \cdot \left(c \cdot a\right)}{b \cdot b}\right) - c}{b}} \]
  7. Taylor expanded in b around inf

    \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{2}} + -1 \cdot \left(a \cdot {c}^{2}\right)}{{b}^{2}}} - c}{b} \]
  8. Step-by-step derivation
    1. /-lowering-/.f64N/A

      \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{2}} + -1 \cdot \left(a \cdot {c}^{2}\right)}{{b}^{2}}} - c}{b} \]
    2. accelerator-lowering-fma.f64N/A

      \[\leadsto \frac{\frac{\color{blue}{\mathsf{fma}\left(-2, \frac{{a}^{2} \cdot {c}^{3}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}}{{b}^{2}} - c}{b} \]
    3. /-lowering-/.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \color{blue}{\frac{{a}^{2} \cdot {c}^{3}}{{b}^{2}}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    4. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\color{blue}{{a}^{2} \cdot {c}^{3}}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    5. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\color{blue}{\left(a \cdot a\right)} \cdot {c}^{3}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    6. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\color{blue}{\left(a \cdot a\right)} \cdot {c}^{3}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    7. cube-multN/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \color{blue}{\left(c \cdot \left(c \cdot c\right)\right)}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    8. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \color{blue}{{c}^{2}}\right)}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    9. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \color{blue}{\left(c \cdot {c}^{2}\right)}}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    10. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \color{blue}{\left(c \cdot c\right)}\right)}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    11. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \color{blue}{\left(c \cdot c\right)}\right)}{{b}^{2}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    12. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{\color{blue}{b \cdot b}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    13. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{\color{blue}{b \cdot b}}, -1 \cdot \left(a \cdot {c}^{2}\right)\right)}{{b}^{2}} - c}{b} \]
    14. mul-1-negN/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \color{blue}{\mathsf{neg}\left(a \cdot {c}^{2}\right)}\right)}{{b}^{2}} - c}{b} \]
    15. neg-lowering-neg.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \color{blue}{\mathsf{neg}\left(a \cdot {c}^{2}\right)}\right)}{{b}^{2}} - c}{b} \]
    16. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \mathsf{neg}\left(\color{blue}{a \cdot {c}^{2}}\right)\right)}{{b}^{2}} - c}{b} \]
    17. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \mathsf{neg}\left(a \cdot \color{blue}{\left(c \cdot c\right)}\right)\right)}{{b}^{2}} - c}{b} \]
    18. *-lowering-*.f64N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \mathsf{neg}\left(a \cdot \color{blue}{\left(c \cdot c\right)}\right)\right)}{{b}^{2}} - c}{b} \]
    19. unpow2N/A

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, \mathsf{neg}\left(a \cdot \left(c \cdot c\right)\right)\right)}{\color{blue}{b \cdot b}} - c}{b} \]
    20. *-lowering-*.f6497.1

      \[\leadsto \frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, -a \cdot \left(c \cdot c\right)\right)}{\color{blue}{b \cdot b}} - c}{b} \]
  9. Simplified97.1%

    \[\leadsto \frac{\color{blue}{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, -a \cdot \left(c \cdot c\right)\right)}{b \cdot b}} - c}{b} \]
  10. Add Preprocessing

Alternative 3: 95.6% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \frac{c}{\mathsf{fma}\left(c, \frac{a}{b}, -b\right)} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (fma c (/ a b) (- b))))
double code(double a, double b, double c) {
	return c / fma(c, (a / b), -b);
}
function code(a, b, c)
	return Float64(c / fma(c, Float64(a / b), Float64(-b)))
end
code[a_, b_, c_] := N[(c / N[(c * N[(a / b), $MachinePrecision] + (-b)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{\mathsf{fma}\left(c, \frac{a}{b}, -b\right)}
\end{array}
Derivation
  1. Initial program 17.8%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \left(a \cdot c\right) + -2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}}}{b}}}{2 \cdot a} \]
  4. Step-by-step derivation
    1. /-lowering-/.f64N/A

      \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \left(a \cdot c\right) + -2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}}}{b}}}{2 \cdot a} \]
  5. Simplified95.2%

    \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \mathsf{fma}\left(a, \frac{a \cdot \left(c \cdot c\right)}{b \cdot b}, a \cdot c\right)}{b}}}{2 \cdot a} \]
  6. Step-by-step derivation
    1. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{2 \cdot a}{\frac{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)}{b}}}} \]
    2. /-lowering-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{2 \cdot a}{\frac{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)}{b}}}} \]
    3. associate-/r/N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{2 \cdot a}{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)} \cdot b}} \]
    4. *-lowering-*.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{2 \cdot a}{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)} \cdot b}} \]
  7. Applied egg-rr95.0%

    \[\leadsto \color{blue}{\frac{1}{\frac{a \cdot 2}{\left(-2 \cdot a\right) \cdot \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)} \cdot b}} \]
  8. Taylor expanded in c around 0

    \[\leadsto \frac{1}{\color{blue}{\frac{-1 \cdot b + \frac{a \cdot c}{b}}{c}}} \]
  9. Step-by-step derivation
    1. /-lowering-/.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{-1 \cdot b + \frac{a \cdot c}{b}}{c}}} \]
    2. +-commutativeN/A

      \[\leadsto \frac{1}{\frac{\color{blue}{\frac{a \cdot c}{b} + -1 \cdot b}}{c}} \]
    3. associate-/l*N/A

      \[\leadsto \frac{1}{\frac{\color{blue}{a \cdot \frac{c}{b}} + -1 \cdot b}{c}} \]
    4. accelerator-lowering-fma.f64N/A

      \[\leadsto \frac{1}{\frac{\color{blue}{\mathsf{fma}\left(a, \frac{c}{b}, -1 \cdot b\right)}}{c}} \]
    5. /-lowering-/.f64N/A

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \color{blue}{\frac{c}{b}}, -1 \cdot b\right)}{c}} \]
    6. mul-1-negN/A

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, \color{blue}{\mathsf{neg}\left(b\right)}\right)}{c}} \]
    7. neg-lowering-neg.f6495.5

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, \color{blue}{-b}\right)}{c}} \]
  10. Simplified95.5%

    \[\leadsto \frac{1}{\color{blue}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}}} \]
  11. Step-by-step derivation
    1. clear-numN/A

      \[\leadsto \color{blue}{\frac{c}{a \cdot \frac{c}{b} + \left(\mathsf{neg}\left(b\right)\right)}} \]
    2. /-lowering-/.f64N/A

      \[\leadsto \color{blue}{\frac{c}{a \cdot \frac{c}{b} + \left(\mathsf{neg}\left(b\right)\right)}} \]
    3. associate-*r/N/A

      \[\leadsto \frac{c}{\color{blue}{\frac{a \cdot c}{b}} + \left(\mathsf{neg}\left(b\right)\right)} \]
    4. *-commutativeN/A

      \[\leadsto \frac{c}{\frac{\color{blue}{c \cdot a}}{b} + \left(\mathsf{neg}\left(b\right)\right)} \]
    5. associate-/l*N/A

      \[\leadsto \frac{c}{\color{blue}{c \cdot \frac{a}{b}} + \left(\mathsf{neg}\left(b\right)\right)} \]
    6. accelerator-lowering-fma.f64N/A

      \[\leadsto \frac{c}{\color{blue}{\mathsf{fma}\left(c, \frac{a}{b}, \mathsf{neg}\left(b\right)\right)}} \]
    7. /-lowering-/.f64N/A

      \[\leadsto \frac{c}{\mathsf{fma}\left(c, \color{blue}{\frac{a}{b}}, \mathsf{neg}\left(b\right)\right)} \]
    8. neg-lowering-neg.f6495.7

      \[\leadsto \frac{c}{\mathsf{fma}\left(c, \frac{a}{b}, \color{blue}{-b}\right)} \]
  12. Applied egg-rr95.7%

    \[\leadsto \color{blue}{\frac{c}{\mathsf{fma}\left(c, \frac{a}{b}, -b\right)}} \]
  13. Add Preprocessing

Alternative 4: 90.5% accurate, 3.6× speedup?

\[\begin{array}{l} \\ \frac{c}{-b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
	return c / -b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c / -b
end function
public static double code(double a, double b, double c) {
	return c / -b;
}
def code(a, b, c):
	return c / -b
function code(a, b, c)
	return Float64(c / Float64(-b))
end
function tmp = code(a, b, c)
	tmp = c / -b;
end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}

\\
\frac{c}{-b}
\end{array}
Derivation
  1. Initial program 17.8%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{-1 \cdot \frac{c}{b}} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \color{blue}{\mathsf{neg}\left(\frac{c}{b}\right)} \]
    2. distribute-neg-frac2N/A

      \[\leadsto \color{blue}{\frac{c}{\mathsf{neg}\left(b\right)}} \]
    3. /-lowering-/.f64N/A

      \[\leadsto \color{blue}{\frac{c}{\mathsf{neg}\left(b\right)}} \]
    4. neg-lowering-neg.f6490.4

      \[\leadsto \frac{c}{\color{blue}{-b}} \]
  5. Simplified90.4%

    \[\leadsto \color{blue}{\frac{c}{-b}} \]
  6. Add Preprocessing

Alternative 5: 1.6% accurate, 4.2× speedup?

\[\begin{array}{l} \\ \frac{b}{a} \end{array} \]
(FPCore (a b c) :precision binary64 (/ b a))
double code(double a, double b, double c) {
	return b / a;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = b / a
end function
public static double code(double a, double b, double c) {
	return b / a;
}
def code(a, b, c):
	return b / a
function code(a, b, c)
	return Float64(b / a)
end
function tmp = code(a, b, c)
	tmp = b / a;
end
code[a_, b_, c_] := N[(b / a), $MachinePrecision]
\begin{array}{l}

\\
\frac{b}{a}
\end{array}
Derivation
  1. Initial program 17.8%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \left(a \cdot c\right) + -2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}}}{b}}}{2 \cdot a} \]
  4. Step-by-step derivation
    1. /-lowering-/.f64N/A

      \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \left(a \cdot c\right) + -2 \cdot \frac{{a}^{2} \cdot {c}^{2}}{{b}^{2}}}{b}}}{2 \cdot a} \]
  5. Simplified95.2%

    \[\leadsto \frac{\color{blue}{\frac{-2 \cdot \mathsf{fma}\left(a, \frac{a \cdot \left(c \cdot c\right)}{b \cdot b}, a \cdot c\right)}{b}}}{2 \cdot a} \]
  6. Step-by-step derivation
    1. clear-numN/A

      \[\leadsto \color{blue}{\frac{1}{\frac{2 \cdot a}{\frac{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)}{b}}}} \]
    2. /-lowering-/.f64N/A

      \[\leadsto \color{blue}{\frac{1}{\frac{2 \cdot a}{\frac{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)}{b}}}} \]
    3. associate-/r/N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{2 \cdot a}{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)} \cdot b}} \]
    4. *-lowering-*.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{2 \cdot a}{-2 \cdot \left(a \cdot \frac{a \cdot \left(c \cdot c\right)}{b \cdot b} + a \cdot c\right)} \cdot b}} \]
  7. Applied egg-rr95.0%

    \[\leadsto \color{blue}{\frac{1}{\frac{a \cdot 2}{\left(-2 \cdot a\right) \cdot \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)} \cdot b}} \]
  8. Taylor expanded in c around 0

    \[\leadsto \frac{1}{\color{blue}{\frac{-1 \cdot b + \frac{a \cdot c}{b}}{c}}} \]
  9. Step-by-step derivation
    1. /-lowering-/.f64N/A

      \[\leadsto \frac{1}{\color{blue}{\frac{-1 \cdot b + \frac{a \cdot c}{b}}{c}}} \]
    2. +-commutativeN/A

      \[\leadsto \frac{1}{\frac{\color{blue}{\frac{a \cdot c}{b} + -1 \cdot b}}{c}} \]
    3. associate-/l*N/A

      \[\leadsto \frac{1}{\frac{\color{blue}{a \cdot \frac{c}{b}} + -1 \cdot b}{c}} \]
    4. accelerator-lowering-fma.f64N/A

      \[\leadsto \frac{1}{\frac{\color{blue}{\mathsf{fma}\left(a, \frac{c}{b}, -1 \cdot b\right)}}{c}} \]
    5. /-lowering-/.f64N/A

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \color{blue}{\frac{c}{b}}, -1 \cdot b\right)}{c}} \]
    6. mul-1-negN/A

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, \color{blue}{\mathsf{neg}\left(b\right)}\right)}{c}} \]
    7. neg-lowering-neg.f6495.5

      \[\leadsto \frac{1}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, \color{blue}{-b}\right)}{c}} \]
  10. Simplified95.5%

    \[\leadsto \frac{1}{\color{blue}{\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}}} \]
  11. Taylor expanded in a around inf

    \[\leadsto \color{blue}{\frac{b}{a}} \]
  12. Step-by-step derivation
    1. /-lowering-/.f641.6

      \[\leadsto \color{blue}{\frac{b}{a}} \]
  13. Simplified1.6%

    \[\leadsto \color{blue}{\frac{b}{a}} \]
  14. Add Preprocessing

Reproduce

?
herbie shell --seed 2024205 
(FPCore (a b c)
  :name "Quadratic roots, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))