
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(fma
-0.16666666666666666
(/ (* (pow c 4.0) 6.328125) (/ (pow b 7.0) (pow a 3.0)))
(fma
-0.5
(/ c b)
(fma
-0.375
(* a (/ c (/ (pow b 3.0) c)))
(* -0.5625 (* (/ (pow c 3.0) (pow b 5.0)) (* a a)))))))
double code(double a, double b, double c) {
return fma(-0.16666666666666666, ((pow(c, 4.0) * 6.328125) / (pow(b, 7.0) / pow(a, 3.0))), fma(-0.5, (c / b), fma(-0.375, (a * (c / (pow(b, 3.0) / c))), (-0.5625 * ((pow(c, 3.0) / pow(b, 5.0)) * (a * a))))));
}
function code(a, b, c) return fma(-0.16666666666666666, Float64(Float64((c ^ 4.0) * 6.328125) / Float64((b ^ 7.0) / (a ^ 3.0))), fma(-0.5, Float64(c / b), fma(-0.375, Float64(a * Float64(c / Float64((b ^ 3.0) / c))), Float64(-0.5625 * Float64(Float64((c ^ 3.0) / (b ^ 5.0)) * Float64(a * a)))))) end
code[a_, b_, c_] := N[(-0.16666666666666666 * N[(N[(N[Power[c, 4.0], $MachinePrecision] * 6.328125), $MachinePrecision] / N[(N[Power[b, 7.0], $MachinePrecision] / N[Power[a, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision] + N[(-0.375 * N[(a * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5625 * N[(N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.16666666666666666, \frac{{c}^{4} \cdot 6.328125}{\frac{{b}^{7}}{{a}^{3}}}, \mathsf{fma}\left(-0.5, \frac{c}{b}, \mathsf{fma}\left(-0.375, a \cdot \frac{c}{\frac{{b}^{3}}{c}}, -0.5625 \cdot \left(\frac{{c}^{3}}{{b}^{5}} \cdot \left(a \cdot a\right)\right)\right)\right)\right)
\end{array}
Initial program 30.7%
Taylor expanded in b around 0 30.7%
div-inv30.7%
+-commutative30.7%
unpow230.7%
associate-*r*30.7%
*-commutative30.7%
*-commutative30.7%
fma-udef30.8%
sub-neg30.8%
*-commutative30.8%
Applied egg-rr30.8%
Taylor expanded in a around 0 95.5%
Simplified95.5%
Taylor expanded in b around 0 95.5%
associate-/l*95.5%
distribute-rgt-out95.5%
metadata-eval95.5%
Simplified95.5%
Final simplification95.5%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* -1.125 (* c (* c (* a a))))))
(fma
-0.5625
(/ (pow c 3.0) (/ (pow b 5.0) (* a a)))
(fma
-0.16666666666666666
(/ (+ (* t_0 t_0) (* 5.0625 (pow (* c a) 4.0))) (* (pow b 7.0) a))
(fma -0.5 (/ c b) (* -0.375 (/ (* c c) (/ (pow b 3.0) a))))))))
double code(double a, double b, double c) {
double t_0 = -1.125 * (c * (c * (a * a)));
return fma(-0.5625, (pow(c, 3.0) / (pow(b, 5.0) / (a * a))), fma(-0.16666666666666666, (((t_0 * t_0) + (5.0625 * pow((c * a), 4.0))) / (pow(b, 7.0) * a)), fma(-0.5, (c / b), (-0.375 * ((c * c) / (pow(b, 3.0) / a))))));
}
function code(a, b, c) t_0 = Float64(-1.125 * Float64(c * Float64(c * Float64(a * a)))) return fma(-0.5625, Float64((c ^ 3.0) / Float64((b ^ 5.0) / Float64(a * a))), fma(-0.16666666666666666, Float64(Float64(Float64(t_0 * t_0) + Float64(5.0625 * (Float64(c * a) ^ 4.0))) / Float64((b ^ 7.0) * a)), fma(-0.5, Float64(c / b), Float64(-0.375 * Float64(Float64(c * c) / Float64((b ^ 3.0) / a)))))) end
code[a_, b_, c_] := Block[{t$95$0 = N[(-1.125 * N[(c * N[(c * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(-0.5625 * N[(N[Power[c, 3.0], $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] + N[(5.0625 * N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[b, 7.0], $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision] + N[(-0.375 * N[(N[(c * c), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -1.125 \cdot \left(c \cdot \left(c \cdot \left(a \cdot a\right)\right)\right)\\
\mathsf{fma}\left(-0.5625, \frac{{c}^{3}}{\frac{{b}^{5}}{a \cdot a}}, \mathsf{fma}\left(-0.16666666666666666, \frac{t_0 \cdot t_0 + 5.0625 \cdot {\left(c \cdot a\right)}^{4}}{{b}^{7} \cdot a}, \mathsf{fma}\left(-0.5, \frac{c}{b}, -0.375 \cdot \frac{c \cdot c}{\frac{{b}^{3}}{a}}\right)\right)\right)
\end{array}
\end{array}
Initial program 30.7%
Taylor expanded in b around inf 95.5%
fma-def95.5%
associate-/l*95.5%
unpow295.5%
fma-def95.5%
Simplified95.5%
expm1-log1p-u95.5%
expm1-udef94.8%
pow-prod-down94.8%
Applied egg-rr94.8%
expm1-def95.5%
expm1-log1p95.5%
Simplified95.5%
unpow295.5%
associate-*l*95.5%
associate-*l*95.5%
Applied egg-rr95.5%
Final simplification95.5%
(FPCore (a b c) :precision binary64 (fma -0.5625 (/ (pow c 3.0) (/ (pow b 5.0) (* a a))) (fma -0.5 (/ c b) (/ (* -0.375 (* c c)) (/ (pow b 3.0) a)))))
double code(double a, double b, double c) {
return fma(-0.5625, (pow(c, 3.0) / (pow(b, 5.0) / (a * a))), fma(-0.5, (c / b), ((-0.375 * (c * c)) / (pow(b, 3.0) / a))));
}
function code(a, b, c) return fma(-0.5625, Float64((c ^ 3.0) / Float64((b ^ 5.0) / Float64(a * a))), fma(-0.5, Float64(c / b), Float64(Float64(-0.375 * Float64(c * c)) / Float64((b ^ 3.0) / a)))) end
code[a_, b_, c_] := N[(-0.5625 * N[(N[Power[c, 3.0], $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision] + N[(N[(-0.375 * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5625, \frac{{c}^{3}}{\frac{{b}^{5}}{a \cdot a}}, \mathsf{fma}\left(-0.5, \frac{c}{b}, \frac{-0.375 \cdot \left(c \cdot c\right)}{\frac{{b}^{3}}{a}}\right)\right)
\end{array}
Initial program 30.7%
Taylor expanded in b around inf 93.8%
fma-def93.8%
associate-/l*93.8%
unpow293.8%
fma-def93.8%
associate-/l*93.8%
associate-*r/93.8%
unpow293.8%
Simplified93.8%
Final simplification93.8%
(FPCore (a b c) :precision binary64 (fma -0.5 (/ c b) (fma -0.375 (* a (/ c (/ (pow b 3.0) c))) (* -0.5625 (* (/ (pow c 3.0) (pow b 5.0)) (* a a))))))
double code(double a, double b, double c) {
return fma(-0.5, (c / b), fma(-0.375, (a * (c / (pow(b, 3.0) / c))), (-0.5625 * ((pow(c, 3.0) / pow(b, 5.0)) * (a * a)))));
}
function code(a, b, c) return fma(-0.5, Float64(c / b), fma(-0.375, Float64(a * Float64(c / Float64((b ^ 3.0) / c))), Float64(-0.5625 * Float64(Float64((c ^ 3.0) / (b ^ 5.0)) * Float64(a * a))))) end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision] + N[(-0.375 * N[(a * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5625 * N[(N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, \frac{c}{b}, \mathsf{fma}\left(-0.375, a \cdot \frac{c}{\frac{{b}^{3}}{c}}, -0.5625 \cdot \left(\frac{{c}^{3}}{{b}^{5}} \cdot \left(a \cdot a\right)\right)\right)\right)
\end{array}
Initial program 30.7%
Taylor expanded in b around 0 30.7%
div-inv30.7%
+-commutative30.7%
unpow230.7%
associate-*r*30.7%
*-commutative30.7%
*-commutative30.7%
fma-udef30.8%
sub-neg30.8%
*-commutative30.8%
Applied egg-rr30.8%
Taylor expanded in b around inf 93.8%
+-commutative93.8%
associate-+l+93.8%
fma-def93.8%
fma-def93.8%
associate-/l*93.8%
associate-/r/93.8%
unpow293.8%
associate-/l*93.8%
associate-/l*93.8%
associate-/r/93.8%
unpow293.8%
Simplified93.8%
Final simplification93.8%
(FPCore (a b c) :precision binary64 (+ (* -0.5 (/ c b)) (/ (* a -0.375) (/ (pow b 3.0) (* c c)))))
double code(double a, double b, double c) {
return (-0.5 * (c / b)) + ((a * -0.375) / (pow(b, 3.0) / (c * c)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-0.5d0) * (c / b)) + ((a * (-0.375d0)) / ((b ** 3.0d0) / (c * c)))
end function
public static double code(double a, double b, double c) {
return (-0.5 * (c / b)) + ((a * -0.375) / (Math.pow(b, 3.0) / (c * c)));
}
def code(a, b, c): return (-0.5 * (c / b)) + ((a * -0.375) / (math.pow(b, 3.0) / (c * c)))
function code(a, b, c) return Float64(Float64(-0.5 * Float64(c / b)) + Float64(Float64(a * -0.375) / Float64((b ^ 3.0) / Float64(c * c)))) end
function tmp = code(a, b, c) tmp = (-0.5 * (c / b)) + ((a * -0.375) / ((b ^ 3.0) / (c * c))); end
code[a_, b_, c_] := N[(N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision] + N[(N[(a * -0.375), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b} + \frac{a \cdot -0.375}{\frac{{b}^{3}}{c \cdot c}}
\end{array}
Initial program 30.7%
Taylor expanded in b around 0 30.7%
Taylor expanded in b around inf 90.6%
fma-def90.6%
associate-*r/90.6%
*-commutative90.6%
associate-*r*90.6%
unpow290.6%
Simplified90.6%
fma-udef90.6%
associate-/l*90.6%
*-commutative90.6%
Applied egg-rr90.6%
Final simplification90.6%
(FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
double code(double a, double b, double c) {
return -0.5 * (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-0.5d0) * (c / b)
end function
public static double code(double a, double b, double c) {
return -0.5 * (c / b);
}
def code(a, b, c): return -0.5 * (c / b)
function code(a, b, c) return Float64(-0.5 * Float64(c / b)) end
function tmp = code(a, b, c) tmp = -0.5 * (c / b); end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b}
\end{array}
Initial program 30.7%
Taylor expanded in b around inf 81.7%
Final simplification81.7%
herbie shell --seed 2023279
(FPCore (a b c)
:name "Cubic critical, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))