Cubic critical, medium range

Percentage Accurate: 31.6% → 95.5%
Time: 13.5s
Alternatives: 7
Speedup: 2.9×

Specification

?
\[\left(\left(1.1102230246251565 \cdot 10^{-16} < a \land a < 9007199254740992\right) \land \left(1.1102230246251565 \cdot 10^{-16} < b \land b < 9007199254740992\right)\right) \land \left(1.1102230246251565 \cdot 10^{-16} < c \land c < 9007199254740992\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 31.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Alternative 1: 95.5% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := b \cdot \left(b \cdot b\right)\\ \mathsf{fma}\left(\mathsf{fma}\left(c, c \cdot \frac{-0.375}{t\_0}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot t\_0}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot t\_0\right)\right)} \cdot -0.16666666666666666\right)\right), a, \frac{c \cdot -0.5}{b}\right) \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* b (* b b))))
   (fma
    (fma
     c
     (* c (/ -0.375 t_0))
     (*
      a
      (fma
       c
       (* (* c c) (/ -0.5625 (* (* b b) t_0)))
       (*
        (/ (* a (* (* c (* c (* c c))) 6.328125)) (* b (* (* b b) (* b t_0))))
        -0.16666666666666666))))
    a
    (/ (* c -0.5) b))))
double code(double a, double b, double c) {
	double t_0 = b * (b * b);
	return fma(fma(c, (c * (-0.375 / t_0)), (a * fma(c, ((c * c) * (-0.5625 / ((b * b) * t_0))), (((a * ((c * (c * (c * c))) * 6.328125)) / (b * ((b * b) * (b * t_0)))) * -0.16666666666666666)))), a, ((c * -0.5) / b));
}
function code(a, b, c)
	t_0 = Float64(b * Float64(b * b))
	return fma(fma(c, Float64(c * Float64(-0.375 / t_0)), Float64(a * fma(c, Float64(Float64(c * c) * Float64(-0.5625 / Float64(Float64(b * b) * t_0))), Float64(Float64(Float64(a * Float64(Float64(c * Float64(c * Float64(c * c))) * 6.328125)) / Float64(b * Float64(Float64(b * b) * Float64(b * t_0)))) * -0.16666666666666666)))), a, Float64(Float64(c * -0.5) / b))
end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, N[(N[(c * N[(c * N[(-0.375 / t$95$0), $MachinePrecision]), $MachinePrecision] + N[(a * N[(c * N[(N[(c * c), $MachinePrecision] * N[(-0.5625 / N[(N[(b * b), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * N[(N[(c * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 6.328125), $MachinePrecision]), $MachinePrecision] / N[(b * N[(N[(b * b), $MachinePrecision] * N[(b * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a + N[(N[(c * -0.5), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(c, c \cdot \frac{-0.375}{t\_0}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot t\_0}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot t\_0\right)\right)} \cdot -0.16666666666666666\right)\right), a, \frac{c \cdot -0.5}{b}\right)
\end{array}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Simplified95.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Applied egg-rr95.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(c, c \cdot \frac{-0.375}{b \cdot \left(b \cdot b\right)}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}, \frac{\left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right) \cdot a}{\left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot b} \cdot -0.16666666666666666\right)\right), a, \frac{c \cdot -0.5}{b}\right)} \]
  6. Final simplification95.8%

    \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(c, c \cdot \frac{-0.375}{b \cdot \left(b \cdot b\right)}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right)} \cdot -0.16666666666666666\right)\right), a, \frac{c \cdot -0.5}{b}\right) \]
  7. Add Preprocessing

Alternative 2: 95.2% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := b \cdot \left(b \cdot b\right)\\ \mathsf{fma}\left(c, \frac{-0.5}{b}, a \cdot \mathsf{fma}\left(c, c \cdot \frac{-0.375}{t\_0}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot t\_0}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot t\_0\right)\right)} \cdot -0.16666666666666666\right)\right)\right) \end{array} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (let* ((t_0 (* b (* b b))))
   (fma
    c
    (/ -0.5 b)
    (*
     a
     (fma
      c
      (* c (/ -0.375 t_0))
      (*
       a
       (fma
        c
        (* (* c c) (/ -0.5625 (* (* b b) t_0)))
        (*
         (/ (* a (* (* c (* c (* c c))) 6.328125)) (* b (* (* b b) (* b t_0))))
         -0.16666666666666666))))))))
double code(double a, double b, double c) {
	double t_0 = b * (b * b);
	return fma(c, (-0.5 / b), (a * fma(c, (c * (-0.375 / t_0)), (a * fma(c, ((c * c) * (-0.5625 / ((b * b) * t_0))), (((a * ((c * (c * (c * c))) * 6.328125)) / (b * ((b * b) * (b * t_0)))) * -0.16666666666666666))))));
}
function code(a, b, c)
	t_0 = Float64(b * Float64(b * b))
	return fma(c, Float64(-0.5 / b), Float64(a * fma(c, Float64(c * Float64(-0.375 / t_0)), Float64(a * fma(c, Float64(Float64(c * c) * Float64(-0.5625 / Float64(Float64(b * b) * t_0))), Float64(Float64(Float64(a * Float64(Float64(c * Float64(c * Float64(c * c))) * 6.328125)) / Float64(b * Float64(Float64(b * b) * Float64(b * t_0)))) * -0.16666666666666666))))))
end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, N[(c * N[(-0.5 / b), $MachinePrecision] + N[(a * N[(c * N[(c * N[(-0.375 / t$95$0), $MachinePrecision]), $MachinePrecision] + N[(a * N[(c * N[(N[(c * c), $MachinePrecision] * N[(-0.5625 / N[(N[(b * b), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * N[(N[(c * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 6.328125), $MachinePrecision]), $MachinePrecision] / N[(b * N[(N[(b * b), $MachinePrecision] * N[(b * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
\mathsf{fma}\left(c, \frac{-0.5}{b}, a \cdot \mathsf{fma}\left(c, c \cdot \frac{-0.375}{t\_0}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot t\_0}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot t\_0\right)\right)} \cdot -0.16666666666666666\right)\right)\right)
\end{array}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Simplified95.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Applied egg-rr95.5%

    \[\leadsto \color{blue}{\mathsf{fma}\left(c, \frac{-0.5}{b}, a \cdot \mathsf{fma}\left(c, c \cdot \frac{-0.375}{b \cdot \left(b \cdot b\right)}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}, \frac{\left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right) \cdot a}{\left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right) \cdot b} \cdot -0.16666666666666666\right)\right)\right)} \]
  6. Final simplification95.5%

    \[\leadsto \mathsf{fma}\left(c, \frac{-0.5}{b}, a \cdot \mathsf{fma}\left(c, c \cdot \frac{-0.375}{b \cdot \left(b \cdot b\right)}, a \cdot \mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \frac{-0.5625}{\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)}, \frac{a \cdot \left(\left(c \cdot \left(c \cdot \left(c \cdot c\right)\right)\right) \cdot 6.328125\right)}{b \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)\right)} \cdot -0.16666666666666666\right)\right)\right) \]
  7. Add Preprocessing

Alternative 3: 93.9% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(a, \frac{\mathsf{fma}\left(c, c \cdot -0.375, \frac{a \cdot \left(c \cdot \left(\left(c \cdot c\right) \cdot -0.5625\right)\right)}{b \cdot b}\right)}{b \cdot \left(b \cdot b\right)}, -0.5 \cdot \frac{c}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (fma
  a
  (/
   (fma c (* c -0.375) (/ (* a (* c (* (* c c) -0.5625))) (* b b)))
   (* b (* b b)))
  (* -0.5 (/ c b))))
double code(double a, double b, double c) {
	return fma(a, (fma(c, (c * -0.375), ((a * (c * ((c * c) * -0.5625))) / (b * b))) / (b * (b * b))), (-0.5 * (c / b)));
}
function code(a, b, c)
	return fma(a, Float64(fma(c, Float64(c * -0.375), Float64(Float64(a * Float64(c * Float64(Float64(c * c) * -0.5625))) / Float64(b * b))) / Float64(b * Float64(b * b))), Float64(-0.5 * Float64(c / b)))
end
code[a_, b_, c_] := N[(a * N[(N[(c * N[(c * -0.375), $MachinePrecision] + N[(N[(a * N[(c * N[(N[(c * c), $MachinePrecision] * -0.5625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(a, \frac{\mathsf{fma}\left(c, c \cdot -0.375, \frac{a \cdot \left(c \cdot \left(\left(c \cdot c\right) \cdot -0.5625\right)\right)}{b \cdot b}\right)}{b \cdot \left(b \cdot b\right)}, -0.5 \cdot \frac{c}{b}\right)
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b} + a \cdot \left(\frac{-3}{8} \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(\frac{-9}{16} \cdot \frac{{c}^{3}}{{b}^{5}} + \frac{-1}{6} \cdot \frac{a \cdot \left(\frac{81}{64} \cdot \frac{{c}^{4}}{{b}^{6}} + \frac{81}{16} \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Simplified95.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{{c}^{4}}{{b}^{6}} \cdot \left(6.328125 \cdot a\right)}{b}, -0.16666666666666666, \frac{\left(c \cdot \left(c \cdot c\right)\right) \cdot -0.5625}{{b}^{5}}\right), \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot \left(b \cdot b\right)}\right), -0.5 \cdot \frac{c}{b}\right)} \]
  5. Taylor expanded in b around inf

    \[\leadsto \mathsf{fma}\left(a, \color{blue}{\frac{\frac{-9}{16} \cdot \frac{a \cdot {c}^{3}}{{b}^{2}} + \frac{-3}{8} \cdot {c}^{2}}{{b}^{3}}}, \frac{-1}{2} \cdot \frac{c}{b}\right) \]
  6. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \mathsf{fma}\left(a, \color{blue}{\frac{\frac{-9}{16} \cdot \frac{a \cdot {c}^{3}}{{b}^{2}} + \frac{-3}{8} \cdot {c}^{2}}{{b}^{3}}}, \frac{-1}{2} \cdot \frac{c}{b}\right) \]
  7. Simplified94.1%

    \[\leadsto \mathsf{fma}\left(a, \color{blue}{\frac{\mathsf{fma}\left(c, c \cdot -0.375, \frac{a \cdot \left(c \cdot \left(\left(c \cdot c\right) \cdot -0.5625\right)\right)}{b \cdot b}\right)}{b \cdot \left(b \cdot b\right)}}, -0.5 \cdot \frac{c}{b}\right) \]
  8. Add Preprocessing

Alternative 4: 93.8% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \frac{c \cdot \mathsf{fma}\left(c, \mathsf{fma}\left(-0.5625, \frac{a \cdot \left(c \cdot a\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, \frac{-0.375 \cdot a}{b \cdot b}\right), -0.5\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/
  (*
   c
   (fma
    c
    (fma
     -0.5625
     (/ (* a (* c a)) (* (* b b) (* b b)))
     (/ (* -0.375 a) (* b b)))
    -0.5))
  b))
double code(double a, double b, double c) {
	return (c * fma(c, fma(-0.5625, ((a * (c * a)) / ((b * b) * (b * b))), ((-0.375 * a) / (b * b))), -0.5)) / b;
}
function code(a, b, c)
	return Float64(Float64(c * fma(c, fma(-0.5625, Float64(Float64(a * Float64(c * a)) / Float64(Float64(b * b) * Float64(b * b))), Float64(Float64(-0.375 * a) / Float64(b * b))), -0.5)) / b)
end
code[a_, b_, c_] := N[(N[(c * N[(c * N[(-0.5625 * N[(N[(a * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[(N[(b * b), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.375 * a), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{c \cdot \mathsf{fma}\left(c, \mathsf{fma}\left(-0.5625, \frac{a \cdot \left(c \cdot a\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, \frac{-0.375 \cdot a}{b \cdot b}\right), -0.5\right)}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{\frac{-9}{16} \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{\frac{-9}{16} \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{4}} + \left(\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}\right)}{b}} \]
  5. Simplified94.1%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(\left(a \cdot a\right) \cdot -0.5625, \frac{c \cdot \left(c \cdot c\right)}{{b}^{4}}, \mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)\right)}{b}} \]
  6. Taylor expanded in c around 0

    \[\leadsto \frac{\color{blue}{c \cdot \left(c \cdot \left(\frac{-9}{16} \cdot \frac{{a}^{2} \cdot c}{{b}^{4}} + \frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) - \frac{1}{2}\right)}}{b} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \frac{\color{blue}{c \cdot \left(c \cdot \left(\frac{-9}{16} \cdot \frac{{a}^{2} \cdot c}{{b}^{4}} + \frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) - \frac{1}{2}\right)}}{b} \]
    2. sub-negN/A

      \[\leadsto \frac{c \cdot \color{blue}{\left(c \cdot \left(\frac{-9}{16} \cdot \frac{{a}^{2} \cdot c}{{b}^{4}} + \frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}}{b} \]
    3. metadata-evalN/A

      \[\leadsto \frac{c \cdot \left(c \cdot \left(\frac{-9}{16} \cdot \frac{{a}^{2} \cdot c}{{b}^{4}} + \frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) + \color{blue}{\frac{-1}{2}}\right)}{b} \]
    4. lower-fma.f64N/A

      \[\leadsto \frac{c \cdot \color{blue}{\mathsf{fma}\left(c, \frac{-9}{16} \cdot \frac{{a}^{2} \cdot c}{{b}^{4}} + \frac{-3}{8} \cdot \frac{a}{{b}^{2}}, \frac{-1}{2}\right)}}{b} \]
  8. Simplified94.0%

    \[\leadsto \frac{\color{blue}{c \cdot \mathsf{fma}\left(c, \mathsf{fma}\left(-0.5625, \frac{a \cdot \left(a \cdot c\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, \frac{a \cdot -0.375}{b \cdot b}\right), -0.5\right)}}{b} \]
  9. Final simplification94.0%

    \[\leadsto \frac{c \cdot \mathsf{fma}\left(c, \mathsf{fma}\left(-0.5625, \frac{a \cdot \left(c \cdot a\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)}, \frac{-0.375 \cdot a}{b \cdot b}\right), -0.5\right)}{b} \]
  10. Add Preprocessing

Alternative 5: 90.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\mathsf{fma}\left(a, \frac{-0.375 \cdot \left(c \cdot c\right)}{b \cdot b}, c \cdot -0.5\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (fma a (/ (* -0.375 (* c c)) (* b b)) (* c -0.5)) b))
double code(double a, double b, double c) {
	return fma(a, ((-0.375 * (c * c)) / (b * b)), (c * -0.5)) / b;
}
function code(a, b, c)
	return Float64(fma(a, Float64(Float64(-0.375 * Float64(c * c)) / Float64(b * b)), Float64(c * -0.5)) / b)
end
code[a_, b_, c_] := N[(N[(a * N[(N[(-0.375 * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + N[(c * -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{\mathsf{fma}\left(a, \frac{-0.375 \cdot \left(c \cdot c\right)}{b \cdot b}, c \cdot -0.5\right)}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  5. Simplified90.8%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)}{b}} \]
  6. Final simplification90.8%

    \[\leadsto \frac{\mathsf{fma}\left(a, \frac{-0.375 \cdot \left(c \cdot c\right)}{b \cdot b}, c \cdot -0.5\right)}{b} \]
  7. Add Preprocessing

Alternative 6: 90.6% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \frac{c \cdot \mathsf{fma}\left(-0.375 \cdot a, \frac{c}{b \cdot b}, -0.5\right)}{b} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (* c (fma (* -0.375 a) (/ c (* b b)) -0.5)) b))
double code(double a, double b, double c) {
	return (c * fma((-0.375 * a), (c / (b * b)), -0.5)) / b;
}
function code(a, b, c)
	return Float64(Float64(c * fma(Float64(-0.375 * a), Float64(c / Float64(b * b)), -0.5)) / b)
end
code[a_, b_, c_] := N[(N[(c * N[(N[(-0.375 * a), $MachinePrecision] * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{c \cdot \mathsf{fma}\left(-0.375 \cdot a, \frac{c}{b \cdot b}, -0.5\right)}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{\frac{-1}{2} \cdot c + \frac{-3}{8} \cdot \frac{a \cdot {c}^{2}}{{b}^{2}}}{b}} \]
  5. Simplified90.8%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(a, \frac{\left(c \cdot c\right) \cdot -0.375}{b \cdot b}, c \cdot -0.5\right)}{b}} \]
  6. Taylor expanded in c around 0

    \[\leadsto \frac{\color{blue}{c \cdot \left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{2}} - \frac{1}{2}\right)}}{b} \]
  7. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \frac{c \cdot \color{blue}{\left(\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{2}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}}{b} \]
    2. associate-*r/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\frac{\frac{-3}{8} \cdot \left(a \cdot c\right)}{{b}^{2}}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}{b} \]
    3. associate-*r*N/A

      \[\leadsto \frac{c \cdot \left(\frac{\color{blue}{\left(\frac{-3}{8} \cdot a\right) \cdot c}}{{b}^{2}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}{b} \]
    4. associate-*l/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\frac{\frac{-3}{8} \cdot a}{{b}^{2}} \cdot c} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}{b} \]
    5. associate-*r/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\left(\frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right)} \cdot c + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}{b} \]
    6. metadata-evalN/A

      \[\leadsto \frac{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) \cdot c + \color{blue}{\frac{-1}{2}}\right)}{b} \]
    7. lower-*.f64N/A

      \[\leadsto \frac{\color{blue}{c \cdot \left(\left(\frac{-3}{8} \cdot \frac{a}{{b}^{2}}\right) \cdot c + \frac{-1}{2}\right)}}{b} \]
    8. associate-*r/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\frac{\frac{-3}{8} \cdot a}{{b}^{2}}} \cdot c + \frac{-1}{2}\right)}{b} \]
    9. associate-*l/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\frac{\left(\frac{-3}{8} \cdot a\right) \cdot c}{{b}^{2}}} + \frac{-1}{2}\right)}{b} \]
    10. associate-*r*N/A

      \[\leadsto \frac{c \cdot \left(\frac{\color{blue}{\frac{-3}{8} \cdot \left(a \cdot c\right)}}{{b}^{2}} + \frac{-1}{2}\right)}{b} \]
    11. associate-*r/N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\frac{-3}{8} \cdot \frac{a \cdot c}{{b}^{2}}} + \frac{-1}{2}\right)}{b} \]
    12. associate-/l*N/A

      \[\leadsto \frac{c \cdot \left(\frac{-3}{8} \cdot \color{blue}{\left(a \cdot \frac{c}{{b}^{2}}\right)} + \frac{-1}{2}\right)}{b} \]
    13. associate-*r*N/A

      \[\leadsto \frac{c \cdot \left(\color{blue}{\left(\frac{-3}{8} \cdot a\right) \cdot \frac{c}{{b}^{2}}} + \frac{-1}{2}\right)}{b} \]
    14. lower-fma.f64N/A

      \[\leadsto \frac{c \cdot \color{blue}{\mathsf{fma}\left(\frac{-3}{8} \cdot a, \frac{c}{{b}^{2}}, \frac{-1}{2}\right)}}{b} \]
  8. Simplified90.7%

    \[\leadsto \frac{\color{blue}{c \cdot \mathsf{fma}\left(a \cdot -0.375, \frac{c}{b \cdot b}, -0.5\right)}}{b} \]
  9. Final simplification90.7%

    \[\leadsto \frac{c \cdot \mathsf{fma}\left(-0.375 \cdot a, \frac{c}{b \cdot b}, -0.5\right)}{b} \]
  10. Add Preprocessing

Alternative 7: 81.1% accurate, 2.9× speedup?

\[\begin{array}{l} \\ -0.5 \cdot \frac{c}{b} \end{array} \]
(FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
double code(double a, double b, double c) {
	return -0.5 * (c / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-0.5d0) * (c / b)
end function
public static double code(double a, double b, double c) {
	return -0.5 * (c / b);
}
def code(a, b, c):
	return -0.5 * (c / b)
function code(a, b, c)
	return Float64(-0.5 * Float64(c / b))
end
function tmp = code(a, b, c)
	tmp = -0.5 * (c / b);
end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-0.5 \cdot \frac{c}{b}
\end{array}
Derivation
  1. Initial program 32.0%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf

    \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b}} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\frac{-1}{2} \cdot \frac{c}{b}} \]
    2. lower-/.f6480.7

      \[\leadsto -0.5 \cdot \color{blue}{\frac{c}{b}} \]
  5. Simplified80.7%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b}} \]
  6. Add Preprocessing

Reproduce

?
herbie shell --seed 2024208 
(FPCore (a b c)
  :name "Cubic critical, medium range"
  :precision binary64
  :pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))