Cubic critical, medium range

Percentage Accurate: 31.5% → 95.4%
Time: 14.8s
Alternatives: 7
Speedup: 2.9×

Specification

?
\[\left(\left(1.1102230246251565 \cdot 10^{-16} < a \land a < 9007199254740992\right) \land \left(1.1102230246251565 \cdot 10^{-16} < b \land b < 9007199254740992\right)\right) \land \left(1.1102230246251565 \cdot 10^{-16} < c \land c < 9007199254740992\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 31.5% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Alternative 1: 95.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := b \cdot \left(b \cdot b\right)\\ t_1 := \left(b \cdot b\right) \cdot t\_0\\ t_2 := c \cdot \left(c \cdot c\right)\\ \mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot t\_2\right) \cdot \left(a \cdot 6.328125\right)}{b \cdot \left(b \cdot t\_1\right)}, -0.16666666666666666, \frac{t\_2 \cdot -0.5625}{t\_1}\right), a, \frac{\left(c \cdot c\right) \cdot -0.375}{t\_0}\right), -0.5 \cdot \frac{c}{b}\right) \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* b (* b b))) (t_1 (* (* b b) t_0)) (t_2 (* c (* c c))))
   (fma
    a
    (fma
     (fma
      (/ (* (* c t_2) (* a 6.328125)) (* b (* b t_1)))
      -0.16666666666666666
      (/ (* t_2 -0.5625) t_1))
     a
     (/ (* (* c c) -0.375) t_0))
    (* -0.5 (/ c b)))))
double code(double a, double b, double c) {
	double t_0 = b * (b * b);
	double t_1 = (b * b) * t_0;
	double t_2 = c * (c * c);
	return fma(a, fma(fma((((c * t_2) * (a * 6.328125)) / (b * (b * t_1))), -0.16666666666666666, ((t_2 * -0.5625) / t_1)), a, (((c * c) * -0.375) / t_0)), (-0.5 * (c / b)));
}
function code(a, b, c)
	t_0 = Float64(b * Float64(b * b))
	t_1 = Float64(Float64(b * b) * t_0)
	t_2 = Float64(c * Float64(c * c))
	return fma(a, fma(fma(Float64(Float64(Float64(c * t_2) * Float64(a * 6.328125)) / Float64(b * Float64(b * t_1))), -0.16666666666666666, Float64(Float64(t_2 * -0.5625) / t_1)), a, Float64(Float64(Float64(c * c) * -0.375) / t_0)), Float64(-0.5 * Float64(c / b)))
end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(b * b), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]}, N[(a * N[(N[(N[(N[(N[(c * t$95$2), $MachinePrecision] * N[(a * 6.328125), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.16666666666666666 + N[(N[(t$95$2 * -0.5625), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision] * a + N[(N[(N[(c * c), $MachinePrecision] * -0.375), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
t_1 := \left(b \cdot b\right) \cdot t\_0\\
t_2 := c \cdot \left(c \cdot c\right)\\
\mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot t\_2\right) \cdot \left(a \cdot 6.328125\right)}{b \cdot \left(b \cdot t\_1\right)}, -0.16666666666666666, \frac{t\_2 \cdot -0.5625}{t\_1}\right), a, \frac{\left(c \cdot c\right) \cdot -0.375}{t\_0}\right), -0.5 \cdot \frac{c}{b}\right)
\end{array}
\end{array}
Derivation
  1. Initial program 30.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Applied rewrites95.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Applied rewrites95.5%

    \[\leadsto \mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(6.328125 \cdot a\right)}{\left(b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right), \color{blue}{a}, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right) \]
  6. Final simplification95.5%

    \[\leadsto \mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(a \cdot 6.328125\right)}{b \cdot \left(b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right)}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right), a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right) \]
  7. Add Preprocessing

Alternative 2: 95.1% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := b \cdot \left(b \cdot b\right)\\ t_1 := b \cdot t\_0\\ \mathsf{fma}\left(\mathsf{fma}\left(a, \frac{c \cdot -0.375}{t\_0}, \frac{-0.5}{b}\right), c, \mathsf{fma}\left(c, \frac{\left(c \cdot c\right) \cdot -0.5625}{b \cdot t\_1}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot -1.0546875\right)}{b \cdot \left(\left(b \cdot b\right) \cdot t\_1\right)}\right) \cdot \left(a \cdot a\right)\right) \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* b (* b b))) (t_1 (* b t_0)))
   (fma
    (fma a (/ (* c -0.375) t_0) (/ -0.5 b))
    c
    (*
     (fma
      c
      (/ (* (* c c) -0.5625) (* b t_1))
      (/ (* a (* (* c (* c (* c c))) -1.0546875)) (* b (* (* b b) t_1))))
     (* a a)))))
double code(double a, double b, double c) {
	double t_0 = b * (b * b);
	double t_1 = b * t_0;
	return fma(fma(a, ((c * -0.375) / t_0), (-0.5 / b)), c, (fma(c, (((c * c) * -0.5625) / (b * t_1)), ((a * ((c * (c * (c * c))) * -1.0546875)) / (b * ((b * b) * t_1)))) * (a * a)));
}
function code(a, b, c)
	t_0 = Float64(b * Float64(b * b))
	t_1 = Float64(b * t_0)
	return fma(fma(a, Float64(Float64(c * -0.375) / t_0), Float64(-0.5 / b)), c, Float64(fma(c, Float64(Float64(Float64(c * c) * -0.5625) / Float64(b * t_1)), Float64(Float64(a * Float64(Float64(c * Float64(c * Float64(c * c))) * -1.0546875)) / Float64(b * Float64(Float64(b * b) * t_1)))) * Float64(a * a)))
end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(b * t$95$0), $MachinePrecision]}, N[(N[(a * N[(N[(c * -0.375), $MachinePrecision] / t$95$0), $MachinePrecision] + N[(-0.5 / b), $MachinePrecision]), $MachinePrecision] * c + N[(N[(c * N[(N[(N[(c * c), $MachinePrecision] * -0.5625), $MachinePrecision] / N[(b * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(a * N[(N[(c * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -1.0546875), $MachinePrecision]), $MachinePrecision] / N[(b * N[(N[(b * b), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
t_1 := b \cdot t\_0\\
\mathsf{fma}\left(\mathsf{fma}\left(a, \frac{c \cdot -0.375}{t\_0}, \frac{-0.5}{b}\right), c, \mathsf{fma}\left(c, \frac{\left(c \cdot c\right) \cdot -0.5625}{b \cdot t\_1}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot -1.0546875\right)}{b \cdot \left(\left(b \cdot b\right) \cdot t\_1\right)}\right) \cdot \left(a \cdot a\right)\right)
\end{array}
\end{array}
Derivation
  1. Initial program 30.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Applied rewrites95.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Applied rewrites95.5%

    \[\leadsto \mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot \left(6.328125 \cdot a\right)}{\left(b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}\right), \color{blue}{a}, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right) \]
  6. Applied rewrites95.2%

    \[\leadsto \mathsf{fma}\left(c, \frac{-0.5}{b}, \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}, \frac{\left(a \cdot \left(c \cdot \left(\left(c \cdot \left(c \cdot c\right)\right) \cdot 6.328125\right)\right)\right) \cdot -0.16666666666666666}{b \cdot \left(b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right)}\right) \cdot \left(a \cdot a\right)\right) + \color{blue}{\frac{a \cdot \left(\left(c \cdot c\right) \cdot -0.375\right)}{b \cdot \left(b \cdot b\right)}} \]
  7. Applied rewrites95.2%

    \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(a, \frac{c \cdot -0.375}{b \cdot \left(b \cdot b\right)}, \frac{-0.5}{b}\right), \color{blue}{c}, \mathsf{fma}\left(c, \frac{\left(c \cdot c\right) \cdot -0.5625}{b \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot -1.0546875\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right)}\right) \cdot \left(a \cdot a\right)\right) \]
  8. Add Preprocessing

Alternative 3: 93.8% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(a, \frac{\mathsf{fma}\left(-0.375, c \cdot c, \frac{-0.5625 \cdot \left(a \cdot \left(c \cdot \left(c \cdot c\right)\right)\right)}{b \cdot b}\right)}{b \cdot \left(b \cdot b\right)}, -0.5 \cdot \frac{c}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (fma
  a
  (/
   (fma -0.375 (* c c) (/ (* -0.5625 (* a (* c (* c c)))) (* b b)))
   (* b (* b b)))
  (* -0.5 (/ c b))))
double code(double a, double b, double c) {
	return fma(a, (fma(-0.375, (c * c), ((-0.5625 * (a * (c * (c * c)))) / (b * b))) / (b * (b * b))), (-0.5 * (c / b)));
}
function code(a, b, c)
	return fma(a, Float64(fma(-0.375, Float64(c * c), Float64(Float64(-0.5625 * Float64(a * Float64(c * Float64(c * c)))) / Float64(b * b))) / Float64(b * Float64(b * b))), Float64(-0.5 * Float64(c / b)))
end
code[a_, b_, c_] := N[(a * N[(N[(-0.375 * N[(c * c), $MachinePrecision] + N[(N[(-0.5625 * N[(a * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(a, \frac{\mathsf{fma}\left(-0.375, c \cdot c, \frac{-0.5625 \cdot \left(a \cdot \left(c \cdot \left(c \cdot c\right)\right)\right)}{b \cdot b}\right)}{b \cdot \left(b \cdot b\right)}, -0.5 \cdot \frac{c}{b}\right)
\end{array}
Derivation
  1. Initial program 30.9%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Applied rewrites95.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Taylor expanded in b around inf

    \[\leadsto \mathsf{fma}\left(a, \frac{\frac{-9}{16} \cdot \frac{a \cdot {c}^{3}}{{b}^{2}} + \frac{-3}{8} \cdot {c}^{2}}{\color{blue}{{b}^{3}}}, \frac{-1}{2} \cdot \frac{c}{b}\right) \]
  6. Step-by-step derivation
    1. Applied rewrites94.3%

      \[\leadsto \mathsf{fma}\left(a, \frac{\mathsf{fma}\left(-0.375, c \cdot c, \frac{-0.5625 \cdot \left(a \cdot \left(c \cdot \left(c \cdot c\right)\right)\right)}{b \cdot b}\right)}{\color{blue}{b \cdot \left(b \cdot b\right)}}, -0.5 \cdot \frac{c}{b}\right) \]
    2. Add Preprocessing

    Alternative 4: 90.7% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ \frac{\mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (/ (fma a (/ (* (* c c) -0.375) (* b b)) (* c -0.5)) b))
    double code(double a, double b, double c) {
    	return fma(a, (((c * c) * -0.375) / (b * b)), (c * -0.5)) / b;
    }
    
    function code(a, b, c)
    	return Float64(fma(a, Float64(Float64(Float64(c * c) * -0.375) / Float64(b * b)), Float64(c * -0.5)) / b)
    end
    
    code[a_, b_, c_] := N[(N[(a * N[(N[(N[(c * c), $MachinePrecision] * -0.375), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + N[(c * -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{\mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)}{b}
    \end{array}
    
    Derivation
    1. Initial program 30.9%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
    2. Add Preprocessing
    3. Taylor expanded in b around inf

      \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    4. Step-by-step derivation
      1. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
    5. Applied rewrites91.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)}{b}} \]
    6. Add Preprocessing

    Alternative 5: 90.5% accurate, 1.0× speedup?

    \[\begin{array}{l} \\ c \cdot \mathsf{fma}\left(a, -0.375 \cdot \frac{c}{b \cdot \left(b \cdot b\right)}, \frac{-0.5}{b}\right) \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (* c (fma a (* -0.375 (/ c (* b (* b b)))) (/ -0.5 b))))
    double code(double a, double b, double c) {
    	return c * fma(a, (-0.375 * (c / (b * (b * b)))), (-0.5 / b));
    }
    
    function code(a, b, c)
    	return Float64(c * fma(a, Float64(-0.375 * Float64(c / Float64(b * Float64(b * b)))), Float64(-0.5 / b)))
    end
    
    code[a_, b_, c_] := N[(c * N[(a * N[(-0.375 * N[(c / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    c \cdot \mathsf{fma}\left(a, -0.375 \cdot \frac{c}{b \cdot \left(b \cdot b\right)}, \frac{-0.5}{b}\right)
    \end{array}
    
    Derivation
    1. Initial program 30.9%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
    2. Add Preprocessing
    3. Taylor expanded in c around 0

      \[\leadsto \color{blue}{c \cdot \left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{2} \cdot \frac{1}{b}\right)} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto c \cdot \color{blue}{\left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      2. distribute-lft-inN/A

        \[\leadsto \color{blue}{c \cdot \left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}}\right) + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)} \]
      3. associate-*r/N/A

        \[\leadsto c \cdot \color{blue}{\frac{\frac{-3}{8} \cdot \left(a \cdot c\right)}{{b}^{3}}} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      4. associate-*r*N/A

        \[\leadsto c \cdot \frac{\color{blue}{\left(\frac{-3}{8} \cdot a\right) \cdot c}}{{b}^{3}} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      5. associate-*l/N/A

        \[\leadsto c \cdot \color{blue}{\left(\frac{\frac{-3}{8} \cdot a}{{b}^{3}} \cdot c\right)} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      6. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right)} \cdot c\right) + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      7. distribute-lft-inN/A

        \[\leadsto \color{blue}{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right) \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      8. lower-*.f64N/A

        \[\leadsto \color{blue}{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right) \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      9. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{\frac{-3}{8} \cdot a}{{b}^{3}}} \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      10. associate-*l/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{\left(\frac{-3}{8} \cdot a\right) \cdot c}{{b}^{3}}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      11. associate-*r*N/A

        \[\leadsto c \cdot \left(\frac{\color{blue}{\frac{-3}{8} \cdot \left(a \cdot c\right)}}{{b}^{3}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      12. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      13. *-commutativeN/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{a \cdot c}{{b}^{3}} \cdot \frac{-3}{8}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
    5. Applied rewrites91.3%

      \[\leadsto \color{blue}{c \cdot \mathsf{fma}\left(a, \frac{c}{b \cdot \left(b \cdot b\right)} \cdot -0.375, \frac{-0.5}{b}\right)} \]
    6. Final simplification91.3%

      \[\leadsto c \cdot \mathsf{fma}\left(a, -0.375 \cdot \frac{c}{b \cdot \left(b \cdot b\right)}, \frac{-0.5}{b}\right) \]
    7. Add Preprocessing

    Alternative 6: 90.4% accurate, 1.1× speedup?

    \[\begin{array}{l} \\ c \cdot \frac{\mathsf{fma}\left(-0.375, a \cdot \frac{c}{b \cdot b}, -0.5\right)}{b} \end{array} \]
    (FPCore (a b c)
     :precision binary64
     (* c (/ (fma -0.375 (* a (/ c (* b b))) -0.5) b)))
    double code(double a, double b, double c) {
    	return c * (fma(-0.375, (a * (c / (b * b))), -0.5) / b);
    }
    
    function code(a, b, c)
    	return Float64(c * Float64(fma(-0.375, Float64(a * Float64(c / Float64(b * b))), -0.5) / b))
    end
    
    code[a_, b_, c_] := N[(c * N[(N[(-0.375 * N[(a * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    c \cdot \frac{\mathsf{fma}\left(-0.375, a \cdot \frac{c}{b \cdot b}, -0.5\right)}{b}
    \end{array}
    
    Derivation
    1. Initial program 30.9%

      \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
    2. Add Preprocessing
    3. Taylor expanded in c around 0

      \[\leadsto \color{blue}{c \cdot \left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}} - \frac{1}{2} \cdot \frac{1}{b}\right)} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto c \cdot \color{blue}{\left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      2. distribute-lft-inN/A

        \[\leadsto \color{blue}{c \cdot \left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}}\right) + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)} \]
      3. associate-*r/N/A

        \[\leadsto c \cdot \color{blue}{\frac{\frac{-3}{8} \cdot \left(a \cdot c\right)}{{b}^{3}}} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      4. associate-*r*N/A

        \[\leadsto c \cdot \frac{\color{blue}{\left(\frac{-3}{8} \cdot a\right) \cdot c}}{{b}^{3}} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      5. associate-*l/N/A

        \[\leadsto c \cdot \color{blue}{\left(\frac{\frac{-3}{8} \cdot a}{{b}^{3}} \cdot c\right)} + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      6. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right)} \cdot c\right) + c \cdot \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right) \]
      7. distribute-lft-inN/A

        \[\leadsto \color{blue}{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right) \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      8. lower-*.f64N/A

        \[\leadsto \color{blue}{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{3}}\right) \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right)} \]
      9. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{\frac{-3}{8} \cdot a}{{b}^{3}}} \cdot c + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      10. associate-*l/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{\left(\frac{-3}{8} \cdot a\right) \cdot c}{{b}^{3}}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      11. associate-*r*N/A

        \[\leadsto c \cdot \left(\frac{\color{blue}{\frac{-3}{8} \cdot \left(a \cdot c\right)}}{{b}^{3}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      12. associate-*r/N/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{3}}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
      13. *-commutativeN/A

        \[\leadsto c \cdot \left(\color{blue}{\frac{a \cdot c}{{b}^{3}} \cdot \frac{-3}{8}} + \left(\mathsf{neg}\left(\frac{1}{2} \cdot \frac{1}{b}\right)\right)\right) \]
    5. Applied rewrites91.3%

      \[\leadsto \color{blue}{c \cdot \mathsf{fma}\left(a, \frac{c}{b \cdot \left(b \cdot b\right)} \cdot -0.375, \frac{-0.5}{b}\right)} \]
    6. Taylor expanded in b around inf

      \[\leadsto c \cdot \frac{\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{2}} - \frac{1}{2}}{\color{blue}{b}} \]
    7. Step-by-step derivation
      1. Applied rewrites91.3%

        \[\leadsto c \cdot \frac{\mathsf{fma}\left(-0.375, a \cdot \frac{c}{b \cdot b}, -0.5\right)}{\color{blue}{b}} \]
      2. Add Preprocessing

      Alternative 7: 81.2% accurate, 2.9× speedup?

      \[\begin{array}{l} \\ -0.5 \cdot \frac{c}{b} \end{array} \]
      (FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
      double code(double a, double b, double c) {
      	return -0.5 * (c / b);
      }
      
      real(8) function code(a, b, c)
          real(8), intent (in) :: a
          real(8), intent (in) :: b
          real(8), intent (in) :: c
          code = (-0.5d0) * (c / b)
      end function
      
      public static double code(double a, double b, double c) {
      	return -0.5 * (c / b);
      }
      
      def code(a, b, c):
      	return -0.5 * (c / b)
      
      function code(a, b, c)
      	return Float64(-0.5 * Float64(c / b))
      end
      
      function tmp = code(a, b, c)
      	tmp = -0.5 * (c / b);
      end
      
      code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      -0.5 \cdot \frac{c}{b}
      \end{array}
      
      Derivation
      1. Initial program 30.9%

        \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
      2. Add Preprocessing
      3. Taylor expanded in b around inf

        \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b}} \]
      4. Step-by-step derivation
        1. lower-*.f64N/A

          \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b}} \]
        2. lower-/.f6482.1

          \[\leadsto -0.5 \cdot \color{blue}{\frac{c}{b}} \]
      5. Applied rewrites82.1%

        \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b}} \]
      6. Add Preprocessing

      Reproduce

      ?
      herbie shell --seed 2024233 
      (FPCore (a b c)
        :name "Cubic critical, medium range"
        :precision binary64
        :pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
        (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))