Cubic critical, wide range

Percentage Accurate: 17.8% → 97.6%
Time: 15.3s
Alternatives: 8
Speedup: 23.2×

Specification

?
\[\left(\left(4.930380657631324 \cdot 10^{-32} < a \land a < 2.028240960365167 \cdot 10^{+31}\right) \land \left(4.930380657631324 \cdot 10^{-32} < b \land b < 2.028240960365167 \cdot 10^{+31}\right)\right) \land \left(4.930380657631324 \cdot 10^{-32} < c \land c < 2.028240960365167 \cdot 10^{+31}\right)\]
\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 17.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
	return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
	return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c):
	return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c)
	return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a))
end
function tmp = code(a, b, c)
	tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}

Alternative 1: 97.6% accurate, 0.2× speedup?

\[\begin{array}{l} \\ -0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-0.5625 \cdot \frac{{c}^{3}}{{b}^{5}} + -1.0546875 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (+
  (* -0.5 (/ c b))
  (*
   a
   (+
    (* -0.375 (/ (pow c 2.0) (pow b 3.0)))
    (*
     a
     (+
      (* -0.5625 (/ (pow c 3.0) (pow b 5.0)))
      (* -1.0546875 (/ (* a (pow c 4.0)) (pow b 7.0)))))))))
double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (a * ((-0.375 * (pow(c, 2.0) / pow(b, 3.0))) + (a * ((-0.5625 * (pow(c, 3.0) / pow(b, 5.0))) + (-1.0546875 * ((a * pow(c, 4.0)) / pow(b, 7.0)))))));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((-0.5d0) * (c / b)) + (a * (((-0.375d0) * ((c ** 2.0d0) / (b ** 3.0d0))) + (a * (((-0.5625d0) * ((c ** 3.0d0) / (b ** 5.0d0))) + ((-1.0546875d0) * ((a * (c ** 4.0d0)) / (b ** 7.0d0)))))))
end function
public static double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (a * ((-0.375 * (Math.pow(c, 2.0) / Math.pow(b, 3.0))) + (a * ((-0.5625 * (Math.pow(c, 3.0) / Math.pow(b, 5.0))) + (-1.0546875 * ((a * Math.pow(c, 4.0)) / Math.pow(b, 7.0)))))));
}
def code(a, b, c):
	return (-0.5 * (c / b)) + (a * ((-0.375 * (math.pow(c, 2.0) / math.pow(b, 3.0))) + (a * ((-0.5625 * (math.pow(c, 3.0) / math.pow(b, 5.0))) + (-1.0546875 * ((a * math.pow(c, 4.0)) / math.pow(b, 7.0)))))))
function code(a, b, c)
	return Float64(Float64(-0.5 * Float64(c / b)) + Float64(a * Float64(Float64(-0.375 * Float64((c ^ 2.0) / (b ^ 3.0))) + Float64(a * Float64(Float64(-0.5625 * Float64((c ^ 3.0) / (b ^ 5.0))) + Float64(-1.0546875 * Float64(Float64(a * (c ^ 4.0)) / (b ^ 7.0))))))))
end
function tmp = code(a, b, c)
	tmp = (-0.5 * (c / b)) + (a * ((-0.375 * ((c ^ 2.0) / (b ^ 3.0))) + (a * ((-0.5625 * ((c ^ 3.0) / (b ^ 5.0))) + (-1.0546875 * ((a * (c ^ 4.0)) / (b ^ 7.0)))))));
end
code[a_, b_, c_] := N[(N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision] + N[(a * N[(N[(-0.375 * N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(a * N[(N[(-0.5625 * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0546875 * N[(N[(a * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-0.5625 \cdot \frac{{c}^{3}}{{b}^{5}} + -1.0546875 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)\right)
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0 98.8%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-0.5625 \cdot \frac{{c}^{3}}{{b}^{5}} + -0.16666666666666666 \cdot \frac{a \cdot \left(1.265625 \cdot \frac{{c}^{4}}{{b}^{6}} + 5.0625 \cdot \frac{{c}^{4}}{{b}^{6}}\right)}{b}\right)\right)} \]
  4. Taylor expanded in c around 0 98.8%

    \[\leadsto -0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-0.5625 \cdot \frac{{c}^{3}}{{b}^{5}} + \color{blue}{-1.0546875 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}}\right)\right) \]
  5. Final simplification98.8%

    \[\leadsto -0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + a \cdot \left(-0.5625 \cdot \frac{{c}^{3}}{{b}^{5}} + -1.0546875 \cdot \frac{a \cdot {c}^{4}}{{b}^{7}}\right)\right) \]
  6. Add Preprocessing

Alternative 2: 96.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ -0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + -0.5625 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (+
  (* -0.5 (/ c b))
  (*
   a
   (+
    (* -0.375 (/ (pow c 2.0) (pow b 3.0)))
    (* -0.5625 (/ (* a (pow c 3.0)) (pow b 5.0)))))))
double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (a * ((-0.375 * (pow(c, 2.0) / pow(b, 3.0))) + (-0.5625 * ((a * pow(c, 3.0)) / pow(b, 5.0)))));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((-0.5d0) * (c / b)) + (a * (((-0.375d0) * ((c ** 2.0d0) / (b ** 3.0d0))) + ((-0.5625d0) * ((a * (c ** 3.0d0)) / (b ** 5.0d0)))))
end function
public static double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (a * ((-0.375 * (Math.pow(c, 2.0) / Math.pow(b, 3.0))) + (-0.5625 * ((a * Math.pow(c, 3.0)) / Math.pow(b, 5.0)))));
}
def code(a, b, c):
	return (-0.5 * (c / b)) + (a * ((-0.375 * (math.pow(c, 2.0) / math.pow(b, 3.0))) + (-0.5625 * ((a * math.pow(c, 3.0)) / math.pow(b, 5.0)))))
function code(a, b, c)
	return Float64(Float64(-0.5 * Float64(c / b)) + Float64(a * Float64(Float64(-0.375 * Float64((c ^ 2.0) / (b ^ 3.0))) + Float64(-0.5625 * Float64(Float64(a * (c ^ 3.0)) / (b ^ 5.0))))))
end
function tmp = code(a, b, c)
	tmp = (-0.5 * (c / b)) + (a * ((-0.375 * ((c ^ 2.0) / (b ^ 3.0))) + (-0.5625 * ((a * (c ^ 3.0)) / (b ^ 5.0)))));
end
code[a_, b_, c_] := N[(N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision] + N[(a * N[(N[(-0.375 * N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5625 * N[(N[(a * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + -0.5625 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right)
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0 98.1%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b} + a \cdot \left(-0.5625 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}} + -0.375 \cdot \frac{{c}^{2}}{{b}^{3}}\right)} \]
  4. Final simplification98.1%

    \[\leadsto -0.5 \cdot \frac{c}{b} + a \cdot \left(-0.375 \cdot \frac{{c}^{2}}{{b}^{3}} + -0.5625 \cdot \frac{a \cdot {c}^{3}}{{b}^{5}}\right) \]
  5. Add Preprocessing

Alternative 3: 96.5% accurate, 0.4× speedup?

\[\begin{array}{l} \\ c \cdot \left(c \cdot \left(-0.5625 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} + -0.375 \cdot \frac{a}{{b}^{3}}\right) + 0.5 \cdot \frac{-1}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (*
  c
  (+
   (*
    c
    (+
     (* -0.5625 (/ (* c (pow a 2.0)) (pow b 5.0)))
     (* -0.375 (/ a (pow b 3.0)))))
   (* 0.5 (/ -1.0 b)))))
double code(double a, double b, double c) {
	return c * ((c * ((-0.5625 * ((c * pow(a, 2.0)) / pow(b, 5.0))) + (-0.375 * (a / pow(b, 3.0))))) + (0.5 * (-1.0 / b)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * ((c * (((-0.5625d0) * ((c * (a ** 2.0d0)) / (b ** 5.0d0))) + ((-0.375d0) * (a / (b ** 3.0d0))))) + (0.5d0 * ((-1.0d0) / b)))
end function
public static double code(double a, double b, double c) {
	return c * ((c * ((-0.5625 * ((c * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) + (-0.375 * (a / Math.pow(b, 3.0))))) + (0.5 * (-1.0 / b)));
}
def code(a, b, c):
	return c * ((c * ((-0.5625 * ((c * math.pow(a, 2.0)) / math.pow(b, 5.0))) + (-0.375 * (a / math.pow(b, 3.0))))) + (0.5 * (-1.0 / b)))
function code(a, b, c)
	return Float64(c * Float64(Float64(c * Float64(Float64(-0.5625 * Float64(Float64(c * (a ^ 2.0)) / (b ^ 5.0))) + Float64(-0.375 * Float64(a / (b ^ 3.0))))) + Float64(0.5 * Float64(-1.0 / b))))
end
function tmp = code(a, b, c)
	tmp = c * ((c * ((-0.5625 * ((c * (a ^ 2.0)) / (b ^ 5.0))) + (-0.375 * (a / (b ^ 3.0))))) + (0.5 * (-1.0 / b)));
end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(-0.5625 * N[(N[(c * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(c \cdot \left(-0.5625 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} + -0.375 \cdot \frac{a}{{b}^{3}}\right) + 0.5 \cdot \frac{-1}{b}\right)
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in c around 0 97.8%

    \[\leadsto \color{blue}{c \cdot \left(c \cdot \left(-0.5625 \cdot \frac{{a}^{2} \cdot c}{{b}^{5}} + -0.375 \cdot \frac{a}{{b}^{3}}\right) - 0.5 \cdot \frac{1}{b}\right)} \]
  4. Final simplification97.8%

    \[\leadsto c \cdot \left(c \cdot \left(-0.5625 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} + -0.375 \cdot \frac{a}{{b}^{3}}\right) + 0.5 \cdot \frac{-1}{b}\right) \]
  5. Add Preprocessing

Alternative 4: 95.3% accurate, 0.5× speedup?

\[\begin{array}{l} \\ -0.5 \cdot \frac{c}{b} + -0.375 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} \end{array} \]
(FPCore (a b c)
 :precision binary64
 (+ (* -0.5 (/ c b)) (* -0.375 (/ (* a (pow c 2.0)) (pow b 3.0)))))
double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (-0.375 * ((a * pow(c, 2.0)) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((-0.5d0) * (c / b)) + ((-0.375d0) * ((a * (c ** 2.0d0)) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
	return (-0.5 * (c / b)) + (-0.375 * ((a * Math.pow(c, 2.0)) / Math.pow(b, 3.0)));
}
def code(a, b, c):
	return (-0.5 * (c / b)) + (-0.375 * ((a * math.pow(c, 2.0)) / math.pow(b, 3.0)))
function code(a, b, c)
	return Float64(Float64(-0.5 * Float64(c / b)) + Float64(-0.375 * Float64(Float64(a * (c ^ 2.0)) / (b ^ 3.0))))
end
function tmp = code(a, b, c)
	tmp = (-0.5 * (c / b)) + (-0.375 * ((a * (c ^ 2.0)) / (b ^ 3.0)));
end
code[a_, b_, c_] := N[(N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-0.5 \cdot \frac{c}{b} + -0.375 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in a around 0 96.6%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b} + -0.375 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}}} \]
  4. Final simplification96.6%

    \[\leadsto -0.5 \cdot \frac{c}{b} + -0.375 \cdot \frac{a \cdot {c}^{2}}{{b}^{3}} \]
  5. Add Preprocessing

Alternative 5: 95.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \frac{0.5}{b}\right) \end{array} \]
(FPCore (a b c)
 :precision binary64
 (* c (- (* -0.375 (* a (/ c (pow b 3.0)))) (/ 0.5 b))))
double code(double a, double b, double c) {
	return c * ((-0.375 * (a * (c / pow(b, 3.0)))) - (0.5 / b));
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * (((-0.375d0) * (a * (c / (b ** 3.0d0)))) - (0.5d0 / b))
end function
public static double code(double a, double b, double c) {
	return c * ((-0.375 * (a * (c / Math.pow(b, 3.0)))) - (0.5 / b));
}
def code(a, b, c):
	return c * ((-0.375 * (a * (c / math.pow(b, 3.0)))) - (0.5 / b))
function code(a, b, c)
	return Float64(c * Float64(Float64(-0.375 * Float64(a * Float64(c / (b ^ 3.0)))) - Float64(0.5 / b)))
end
function tmp = code(a, b, c)
	tmp = c * ((-0.375 * (a * (c / (b ^ 3.0)))) - (0.5 / b));
end
code[a_, b_, c_] := N[(c * N[(N[(-0.375 * N[(a * N[(c / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(0.5 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \frac{0.5}{b}\right)
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in c around 0 96.0%

    \[\leadsto \frac{\color{blue}{c \cdot \left(-1.5 \cdot \frac{a}{b} + -1.125 \cdot \frac{{a}^{2} \cdot c}{{b}^{3}}\right)}}{3 \cdot a} \]
  4. Taylor expanded in c around 0 96.3%

    \[\leadsto \color{blue}{c \cdot \left(-0.375 \cdot \frac{a \cdot c}{{b}^{3}} - 0.5 \cdot \frac{1}{b}\right)} \]
  5. Step-by-step derivation
    1. associate-/l*96.3%

      \[\leadsto c \cdot \left(-0.375 \cdot \color{blue}{\left(a \cdot \frac{c}{{b}^{3}}\right)} - 0.5 \cdot \frac{1}{b}\right) \]
    2. associate-*r/96.3%

      \[\leadsto c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \color{blue}{\frac{0.5 \cdot 1}{b}}\right) \]
    3. metadata-eval96.3%

      \[\leadsto c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \frac{\color{blue}{0.5}}{b}\right) \]
  6. Simplified96.3%

    \[\leadsto \color{blue}{c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \frac{0.5}{b}\right)} \]
  7. Final simplification96.3%

    \[\leadsto c \cdot \left(-0.375 \cdot \left(a \cdot \frac{c}{{b}^{3}}\right) - \frac{0.5}{b}\right) \]
  8. Add Preprocessing

Alternative 6: 90.1% accurate, 23.2× speedup?

\[\begin{array}{l} \\ c \cdot \frac{-0.5}{b} \end{array} \]
(FPCore (a b c) :precision binary64 (* c (/ -0.5 b)))
double code(double a, double b, double c) {
	return c * (-0.5 / b);
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = c * ((-0.5d0) / b)
end function
public static double code(double a, double b, double c) {
	return c * (-0.5 / b);
}
def code(a, b, c):
	return c * (-0.5 / b)
function code(a, b, c)
	return Float64(c * Float64(-0.5 / b))
end
function tmp = code(a, b, c)
	tmp = c * (-0.5 / b);
end
code[a_, b_, c_] := N[(c * N[(-0.5 / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
c \cdot \frac{-0.5}{b}
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Step-by-step derivation
    1. /-rgt-identity16.6%

      \[\leadsto \frac{\color{blue}{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{1}}}{3 \cdot a} \]
    2. metadata-eval16.6%

      \[\leadsto \frac{\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{\color{blue}{-1 \cdot -1}}}{3 \cdot a} \]
  3. Simplified16.6%

    \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot -3\right)\right)} - b}{3 \cdot a}} \]
  4. Add Preprocessing
  5. Taylor expanded in a around 0 13.0%

    \[\leadsto \frac{\color{blue}{\left(b + -1.5 \cdot \frac{a \cdot c}{b}\right)} - b}{3 \cdot a} \]
  6. Step-by-step derivation
    1. div-sub12.9%

      \[\leadsto \color{blue}{\frac{b + -1.5 \cdot \frac{a \cdot c}{b}}{3 \cdot a} - \frac{b}{3 \cdot a}} \]
    2. +-commutative12.9%

      \[\leadsto \frac{\color{blue}{-1.5 \cdot \frac{a \cdot c}{b} + b}}{3 \cdot a} - \frac{b}{3 \cdot a} \]
    3. fma-define12.9%

      \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right)}}{3 \cdot a} - \frac{b}{3 \cdot a} \]
    4. *-commutative12.9%

      \[\leadsto \frac{\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right)}{\color{blue}{a \cdot 3}} - \frac{b}{3 \cdot a} \]
    5. *-commutative12.9%

      \[\leadsto \frac{\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right)}{a \cdot 3} - \frac{b}{\color{blue}{a \cdot 3}} \]
  7. Applied egg-rr12.9%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right)}{a \cdot 3} - \frac{b}{a \cdot 3}} \]
  8. Step-by-step derivation
    1. div-sub13.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right) - b}{a \cdot 3}} \]
    2. *-rgt-identity13.0%

      \[\leadsto \frac{\color{blue}{\left(\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right) - b\right) \cdot 1}}{a \cdot 3} \]
    3. associate-*r/13.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right) - b\right) \cdot \frac{1}{a \cdot 3}} \]
    4. *-lft-identity13.0%

      \[\leadsto \color{blue}{\left(1 \cdot \left(\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right) - b\right)\right)} \cdot \frac{1}{a \cdot 3} \]
    5. *-lft-identity13.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(-1.5, \frac{a \cdot c}{b}, b\right) - b\right)} \cdot \frac{1}{a \cdot 3} \]
    6. fma-undefine13.0%

      \[\leadsto \left(\color{blue}{\left(-1.5 \cdot \frac{a \cdot c}{b} + b\right)} - b\right) \cdot \frac{1}{a \cdot 3} \]
    7. associate--l+90.7%

      \[\leadsto \color{blue}{\left(-1.5 \cdot \frac{a \cdot c}{b} + \left(b - b\right)\right)} \cdot \frac{1}{a \cdot 3} \]
    8. +-inverses90.7%

      \[\leadsto \left(-1.5 \cdot \frac{a \cdot c}{b} + \color{blue}{0}\right) \cdot \frac{1}{a \cdot 3} \]
    9. +-rgt-identity90.7%

      \[\leadsto \color{blue}{\left(-1.5 \cdot \frac{a \cdot c}{b}\right)} \cdot \frac{1}{a \cdot 3} \]
    10. associate-*r/90.7%

      \[\leadsto \color{blue}{\frac{-1.5 \cdot \left(a \cdot c\right)}{b}} \cdot \frac{1}{a \cdot 3} \]
    11. *-commutative90.7%

      \[\leadsto \frac{\color{blue}{\left(a \cdot c\right) \cdot -1.5}}{b} \cdot \frac{1}{a \cdot 3} \]
    12. associate-/l*90.7%

      \[\leadsto \color{blue}{\left(\left(a \cdot c\right) \cdot \frac{-1.5}{b}\right)} \cdot \frac{1}{a \cdot 3} \]
    13. *-commutative90.7%

      \[\leadsto \left(\left(a \cdot c\right) \cdot \frac{-1.5}{b}\right) \cdot \frac{1}{\color{blue}{3 \cdot a}} \]
    14. associate-/r*90.6%

      \[\leadsto \left(\left(a \cdot c\right) \cdot \frac{-1.5}{b}\right) \cdot \color{blue}{\frac{\frac{1}{3}}{a}} \]
    15. metadata-eval90.6%

      \[\leadsto \left(\left(a \cdot c\right) \cdot \frac{-1.5}{b}\right) \cdot \frac{\color{blue}{0.3333333333333333}}{a} \]
  9. Simplified90.6%

    \[\leadsto \color{blue}{\left(\left(a \cdot c\right) \cdot \frac{-1.5}{b}\right) \cdot \frac{0.3333333333333333}{a}} \]
  10. Taylor expanded in a around 0 91.3%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b}} \]
  11. Step-by-step derivation
    1. associate-*r/91.3%

      \[\leadsto \color{blue}{\frac{-0.5 \cdot c}{b}} \]
    2. *-commutative91.3%

      \[\leadsto \frac{\color{blue}{c \cdot -0.5}}{b} \]
    3. associate-/l*91.0%

      \[\leadsto \color{blue}{c \cdot \frac{-0.5}{b}} \]
  12. Simplified91.0%

    \[\leadsto \color{blue}{c \cdot \frac{-0.5}{b}} \]
  13. Final simplification91.0%

    \[\leadsto c \cdot \frac{-0.5}{b} \]
  14. Add Preprocessing

Alternative 7: 90.4% accurate, 23.2× speedup?

\[\begin{array}{l} \\ \frac{-0.5 \cdot c}{b} \end{array} \]
(FPCore (a b c) :precision binary64 (/ (* -0.5 c) b))
double code(double a, double b, double c) {
	return (-0.5 * c) / b;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = ((-0.5d0) * c) / b
end function
public static double code(double a, double b, double c) {
	return (-0.5 * c) / b;
}
def code(a, b, c):
	return (-0.5 * c) / b
function code(a, b, c)
	return Float64(Float64(-0.5 * c) / b)
end
function tmp = code(a, b, c)
	tmp = (-0.5 * c) / b;
end
code[a_, b_, c_] := N[(N[(-0.5 * c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}

\\
\frac{-0.5 \cdot c}{b}
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Taylor expanded in b around inf 91.3%

    \[\leadsto \color{blue}{-0.5 \cdot \frac{c}{b}} \]
  4. Step-by-step derivation
    1. associate-*r/91.3%

      \[\leadsto \color{blue}{\frac{-0.5 \cdot c}{b}} \]
    2. *-commutative91.3%

      \[\leadsto \frac{\color{blue}{c \cdot -0.5}}{b} \]
  5. Simplified91.3%

    \[\leadsto \color{blue}{\frac{c \cdot -0.5}{b}} \]
  6. Final simplification91.3%

    \[\leadsto \frac{-0.5 \cdot c}{b} \]
  7. Add Preprocessing

Alternative 8: 3.3% accurate, 38.7× speedup?

\[\begin{array}{l} \\ \frac{0}{a} \end{array} \]
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
	return 0.0 / a;
}
real(8) function code(a, b, c)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    real(8), intent (in) :: c
    code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
	return 0.0 / a;
}
def code(a, b, c):
	return 0.0 / a
function code(a, b, c)
	return Float64(0.0 / a)
end
function tmp = code(a, b, c)
	tmp = 0.0 / a;
end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}

\\
\frac{0}{a}
\end{array}
Derivation
  1. Initial program 16.6%

    \[\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a} \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\color{blue}{\frac{\left(b \cdot b\right) \cdot \left(b \cdot b\right) - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}}{3 \cdot a} \]
    2. div-inv16.8%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\color{blue}{\left(\left(b \cdot b\right) \cdot \left(b \cdot b\right) - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}}{3 \cdot a} \]
    3. pow216.8%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left(\color{blue}{{b}^{2}} \cdot \left(b \cdot b\right) - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    4. pow216.8%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{2} \cdot \color{blue}{{b}^{2}} - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    5. pow-prod-up16.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left(\color{blue}{{b}^{\left(2 + 2\right)}} - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    6. metadata-eval16.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{\color{blue}{4}} - \left(\left(3 \cdot a\right) \cdot c\right) \cdot \left(\left(3 \cdot a\right) \cdot c\right)\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    7. pow216.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{4} - \color{blue}{{\left(\left(3 \cdot a\right) \cdot c\right)}^{2}}\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    8. associate-*l*16.7%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{4} - {\color{blue}{\left(3 \cdot \left(a \cdot c\right)\right)}}^{2}\right) \cdot \frac{1}{b \cdot b + \left(3 \cdot a\right) \cdot c}}}{3 \cdot a} \]
    9. fma-define16.8%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{4} - {\left(3 \cdot \left(a \cdot c\right)\right)}^{2}\right) \cdot \frac{1}{\color{blue}{\mathsf{fma}\left(b, b, \left(3 \cdot a\right) \cdot c\right)}}}}{3 \cdot a} \]
    10. associate-*l*16.8%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\left({b}^{4} - {\left(3 \cdot \left(a \cdot c\right)\right)}^{2}\right) \cdot \frac{1}{\mathsf{fma}\left(b, b, \color{blue}{3 \cdot \left(a \cdot c\right)}\right)}}}{3 \cdot a} \]
  4. Applied egg-rr16.8%

    \[\leadsto \frac{\left(-b\right) + \sqrt{\color{blue}{\left({b}^{4} - {\left(3 \cdot \left(a \cdot c\right)\right)}^{2}\right) \cdot \frac{1}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}}{3 \cdot a} \]
  5. Step-by-step derivation
    1. associate-*r/16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\color{blue}{\frac{\left({b}^{4} - {\left(3 \cdot \left(a \cdot c\right)\right)}^{2}\right) \cdot 1}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}}{3 \cdot a} \]
    2. *-rgt-identity16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{\color{blue}{{b}^{4} - {\left(3 \cdot \left(a \cdot c\right)\right)}^{2}}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    3. *-commutative16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\color{blue}{\left(\left(a \cdot c\right) \cdot 3\right)}}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    4. metadata-eval16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(\left(a \cdot c\right) \cdot \color{blue}{\left(--3\right)}\right)}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    5. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\color{blue}{\left(-\left(a \cdot c\right) \cdot -3\right)}}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    6. associate-*r*16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(-\color{blue}{a \cdot \left(c \cdot -3\right)}\right)}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    7. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\color{blue}{\left(a \cdot \left(-c \cdot -3\right)\right)}}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    8. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \color{blue}{\left(c \cdot \left(--3\right)\right)}\right)}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    9. metadata-eval16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot \color{blue}{3}\right)\right)}^{2}}{\mathsf{fma}\left(b, b, 3 \cdot \left(a \cdot c\right)\right)}}}{3 \cdot a} \]
    10. *-commutative16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, \color{blue}{\left(a \cdot c\right) \cdot 3}\right)}}}{3 \cdot a} \]
    11. metadata-eval16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, \left(a \cdot c\right) \cdot \color{blue}{\left(--3\right)}\right)}}}{3 \cdot a} \]
    12. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, \color{blue}{-\left(a \cdot c\right) \cdot -3}\right)}}}{3 \cdot a} \]
    13. associate-*r*16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, -\color{blue}{a \cdot \left(c \cdot -3\right)}\right)}}}{3 \cdot a} \]
    14. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, \color{blue}{a \cdot \left(-c \cdot -3\right)}\right)}}}{3 \cdot a} \]
    15. distribute-rgt-neg-in16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \color{blue}{\left(c \cdot \left(--3\right)\right)}\right)}}}{3 \cdot a} \]
    16. metadata-eval16.6%

      \[\leadsto \frac{\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot \color{blue}{3}\right)\right)}}}{3 \cdot a} \]
  6. Simplified16.6%

    \[\leadsto \frac{\left(-b\right) + \sqrt{\color{blue}{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot 3\right)\right)}}}}{3 \cdot a} \]
  7. Step-by-step derivation
    1. div-inv16.6%

      \[\leadsto \color{blue}{\left(\left(-b\right) + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot 3\right)\right)}}\right) \cdot \frac{1}{3 \cdot a}} \]
    2. neg-mul-116.6%

      \[\leadsto \left(\color{blue}{-1 \cdot b} + \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot 3\right)\right)}}\right) \cdot \frac{1}{3 \cdot a} \]
    3. fma-define16.6%

      \[\leadsto \color{blue}{\mathsf{fma}\left(-1, b, \sqrt{\frac{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot 3\right)\right)}}\right)} \cdot \frac{1}{3 \cdot a} \]
    4. sqrt-div16.7%

      \[\leadsto \mathsf{fma}\left(-1, b, \color{blue}{\frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\sqrt{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot 3\right)\right)}}}\right) \cdot \frac{1}{3 \cdot a} \]
    5. fma-undefine16.6%

      \[\leadsto \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\sqrt{\color{blue}{b \cdot b + a \cdot \left(c \cdot 3\right)}}}\right) \cdot \frac{1}{3 \cdot a} \]
    6. add-sqr-sqrt16.6%

      \[\leadsto \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\sqrt{b \cdot b + \color{blue}{\sqrt{a \cdot \left(c \cdot 3\right)} \cdot \sqrt{a \cdot \left(c \cdot 3\right)}}}}\right) \cdot \frac{1}{3 \cdot a} \]
    7. hypot-define16.7%

      \[\leadsto \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\color{blue}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}}\right) \cdot \frac{1}{3 \cdot a} \]
    8. *-commutative16.7%

      \[\leadsto \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \cdot \frac{1}{\color{blue}{a \cdot 3}} \]
  8. Applied egg-rr16.7%

    \[\leadsto \color{blue}{\mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \cdot \frac{1}{a \cdot 3}} \]
  9. Step-by-step derivation
    1. *-commutative16.7%

      \[\leadsto \color{blue}{\frac{1}{a \cdot 3} \cdot \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right)} \]
    2. *-commutative16.7%

      \[\leadsto \frac{1}{\color{blue}{3 \cdot a}} \cdot \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \]
    3. associate-/r*16.7%

      \[\leadsto \color{blue}{\frac{\frac{1}{3}}{a}} \cdot \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \]
    4. metadata-eval16.7%

      \[\leadsto \frac{\color{blue}{0.3333333333333333}}{a} \cdot \mathsf{fma}\left(-1, b, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \]
    5. fma-undefine16.7%

      \[\leadsto \frac{0.3333333333333333}{a} \cdot \color{blue}{\left(-1 \cdot b + \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right)} \]
    6. *-commutative16.7%

      \[\leadsto \frac{0.3333333333333333}{a} \cdot \left(\color{blue}{b \cdot -1} + \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right) \]
    7. fma-define16.7%

      \[\leadsto \frac{0.3333333333333333}{a} \cdot \color{blue}{\mathsf{fma}\left(b, -1, \frac{\sqrt{{b}^{4} - {\left(a \cdot \left(c \cdot 3\right)\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right)} \]
  10. Simplified16.7%

    \[\leadsto \color{blue}{\frac{0.3333333333333333}{a} \cdot \mathsf{fma}\left(b, -1, \frac{\sqrt{{b}^{4} - 9 \cdot {\left(a \cdot c\right)}^{2}}}{\mathsf{hypot}\left(b, \sqrt{a \cdot \left(c \cdot 3\right)}\right)}\right)} \]
  11. Taylor expanded in a around 0 3.3%

    \[\leadsto \color{blue}{0.3333333333333333 \cdot \frac{b + -1 \cdot b}{a}} \]
  12. Step-by-step derivation
    1. associate-*r/3.3%

      \[\leadsto \color{blue}{\frac{0.3333333333333333 \cdot \left(b + -1 \cdot b\right)}{a}} \]
    2. distribute-rgt1-in3.3%

      \[\leadsto \frac{0.3333333333333333 \cdot \color{blue}{\left(\left(-1 + 1\right) \cdot b\right)}}{a} \]
    3. metadata-eval3.3%

      \[\leadsto \frac{0.3333333333333333 \cdot \left(\color{blue}{0} \cdot b\right)}{a} \]
    4. mul0-lft3.3%

      \[\leadsto \frac{0.3333333333333333 \cdot \color{blue}{0}}{a} \]
    5. metadata-eval3.3%

      \[\leadsto \frac{\color{blue}{0}}{a} \]
  13. Simplified3.3%

    \[\leadsto \color{blue}{\frac{0}{a}} \]
  14. Final simplification3.3%

    \[\leadsto \frac{0}{a} \]
  15. Add Preprocessing

Reproduce

?
herbie shell --seed 2024078 
(FPCore (a b c)
  :name "Cubic critical, wide range"
  :precision binary64
  :pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
  (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))