
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(fma
-0.5625
(/ (pow c 3.0) (/ (pow b 5.0) (* a a)))
(fma
-0.16666666666666666
(/
(+ (pow (* (* (* a a) (* c c)) -1.125) 2.0) (* 5.0625 (pow (* c a) 4.0)))
(* a (pow b 7.0)))
(fma -0.5 (/ c b) (/ (* -0.375 (* a (* c c))) (pow b 3.0))))))
double code(double a, double b, double c) {
return fma(-0.5625, (pow(c, 3.0) / (pow(b, 5.0) / (a * a))), fma(-0.16666666666666666, ((pow((((a * a) * (c * c)) * -1.125), 2.0) + (5.0625 * pow((c * a), 4.0))) / (a * pow(b, 7.0))), fma(-0.5, (c / b), ((-0.375 * (a * (c * c))) / pow(b, 3.0)))));
}
function code(a, b, c) return fma(-0.5625, Float64((c ^ 3.0) / Float64((b ^ 5.0) / Float64(a * a))), fma(-0.16666666666666666, Float64(Float64((Float64(Float64(Float64(a * a) * Float64(c * c)) * -1.125) ^ 2.0) + Float64(5.0625 * (Float64(c * a) ^ 4.0))) / Float64(a * (b ^ 7.0))), fma(-0.5, Float64(c / b), Float64(Float64(-0.375 * Float64(a * Float64(c * c))) / (b ^ 3.0))))) end
code[a_, b_, c_] := N[(-0.5625 * N[(N[Power[c, 3.0], $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.16666666666666666 * N[(N[(N[Power[N[(N[(N[(a * a), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision] * -1.125), $MachinePrecision], 2.0], $MachinePrecision] + N[(5.0625 * N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision] + N[(N[(-0.375 * N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5625, \frac{{c}^{3}}{\frac{{b}^{5}}{a \cdot a}}, \mathsf{fma}\left(-0.16666666666666666, \frac{{\left(\left(\left(a \cdot a\right) \cdot \left(c \cdot c\right)\right) \cdot -1.125\right)}^{2} + 5.0625 \cdot {\left(c \cdot a\right)}^{4}}{a \cdot {b}^{7}}, \mathsf{fma}\left(-0.5, \frac{c}{b}, \frac{-0.375 \cdot \left(a \cdot \left(c \cdot c\right)\right)}{{b}^{3}}\right)\right)\right)
\end{array}
Initial program 18.8%
/-rgt-identity18.8%
metadata-eval18.8%
associate-/l*18.8%
associate-*r/18.8%
*-commutative18.8%
associate-*l/18.8%
associate-*r/18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
neg-mul-118.8%
distribute-rgt-neg-in18.8%
times-frac18.8%
metadata-eval18.8%
neg-mul-118.8%
Simplified18.8%
Taylor expanded in b around inf 97.6%
fma-def97.6%
associate-/l*97.6%
unpow297.6%
fma-def97.6%
Simplified97.6%
pow197.6%
pow-prod-down97.6%
Applied egg-rr97.6%
unpow197.6%
Simplified97.6%
Final simplification97.6%
(FPCore (a b c)
:precision binary64
(fma
-0.16666666666666666
(/ (* (pow (* c a) 4.0) 6.328125) (* a (pow b 7.0)))
(fma
-0.5
(/ c b)
(*
a
(+
(* -0.375 (/ (* c c) (pow b 3.0)))
(* -0.5625 (* a (/ (pow c 3.0) (pow b 5.0)))))))))
double code(double a, double b, double c) {
return fma(-0.16666666666666666, ((pow((c * a), 4.0) * 6.328125) / (a * pow(b, 7.0))), fma(-0.5, (c / b), (a * ((-0.375 * ((c * c) / pow(b, 3.0))) + (-0.5625 * (a * (pow(c, 3.0) / pow(b, 5.0))))))));
}
function code(a, b, c) return fma(-0.16666666666666666, Float64(Float64((Float64(c * a) ^ 4.0) * 6.328125) / Float64(a * (b ^ 7.0))), fma(-0.5, Float64(c / b), Float64(a * Float64(Float64(-0.375 * Float64(Float64(c * c) / (b ^ 3.0))) + Float64(-0.5625 * Float64(a * Float64((c ^ 3.0) / (b ^ 5.0)))))))) end
code[a_, b_, c_] := N[(-0.16666666666666666 * N[(N[(N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision] * 6.328125), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision] + N[(a * N[(N[(-0.375 * N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5625 * N[(a * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.16666666666666666, \frac{{\left(c \cdot a\right)}^{4} \cdot 6.328125}{a \cdot {b}^{7}}, \mathsf{fma}\left(-0.5, \frac{c}{b}, a \cdot \left(-0.375 \cdot \frac{c \cdot c}{{b}^{3}} + -0.5625 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right)\right)\right)\right)
\end{array}
Initial program 18.8%
neg-sub018.8%
associate-+l-18.8%
sub0-neg18.8%
neg-mul-118.8%
associate-*r/18.8%
*-commutative18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
*-commutative18.8%
times-frac18.8%
Simplified18.8%
div-inv18.8%
Applied egg-rr18.8%
Taylor expanded in b around inf 97.6%
+-commutative97.6%
associate-+l+97.6%
Simplified97.6%
Final simplification97.6%
(FPCore (a b c) :precision binary64 (fma -0.5625 (/ (pow c 3.0) (/ (pow b 5.0) (* a a))) (fma -0.375 (/ (* c c) (/ (pow b 3.0) a)) (* -0.5 (/ c b)))))
double code(double a, double b, double c) {
return fma(-0.5625, (pow(c, 3.0) / (pow(b, 5.0) / (a * a))), fma(-0.375, ((c * c) / (pow(b, 3.0) / a)), (-0.5 * (c / b))));
}
function code(a, b, c) return fma(-0.5625, Float64((c ^ 3.0) / Float64((b ^ 5.0) / Float64(a * a))), fma(-0.375, Float64(Float64(c * c) / Float64((b ^ 3.0) / a)), Float64(-0.5 * Float64(c / b)))) end
code[a_, b_, c_] := N[(-0.5625 * N[(N[Power[c, 3.0], $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(N[(c * c), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5625, \frac{{c}^{3}}{\frac{{b}^{5}}{a \cdot a}}, \mathsf{fma}\left(-0.375, \frac{c \cdot c}{\frac{{b}^{3}}{a}}, -0.5 \cdot \frac{c}{b}\right)\right)
\end{array}
Initial program 18.8%
/-rgt-identity18.8%
metadata-eval18.8%
associate-/l*18.8%
associate-*r/18.8%
*-commutative18.8%
associate-*l/18.8%
associate-*r/18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
neg-mul-118.8%
distribute-rgt-neg-in18.8%
times-frac18.8%
metadata-eval18.8%
neg-mul-118.8%
Simplified18.8%
Taylor expanded in b around inf 96.8%
fma-def96.8%
associate-/l*96.8%
unpow296.8%
+-commutative96.8%
fma-def96.8%
associate-/l*96.8%
unpow296.8%
Simplified96.8%
Final simplification96.8%
(FPCore (a b c)
:precision binary64
(fma
-0.5
(/ c b)
(*
a
(+
(* -0.375 (/ (* c c) (pow b 3.0)))
(* -0.5625 (* a (/ (pow c 3.0) (pow b 5.0))))))))
double code(double a, double b, double c) {
return fma(-0.5, (c / b), (a * ((-0.375 * ((c * c) / pow(b, 3.0))) + (-0.5625 * (a * (pow(c, 3.0) / pow(b, 5.0)))))));
}
function code(a, b, c) return fma(-0.5, Float64(c / b), Float64(a * Float64(Float64(-0.375 * Float64(Float64(c * c) / (b ^ 3.0))) + Float64(-0.5625 * Float64(a * Float64((c ^ 3.0) / (b ^ 5.0))))))) end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision] + N[(a * N[(N[(-0.375 * N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.5625 * N[(a * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, \frac{c}{b}, a \cdot \left(-0.375 \cdot \frac{c \cdot c}{{b}^{3}} + -0.5625 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right)\right)\right)
\end{array}
Initial program 18.8%
neg-sub018.8%
associate-+l-18.8%
sub0-neg18.8%
neg-mul-118.8%
associate-*r/18.8%
*-commutative18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
*-commutative18.8%
times-frac18.8%
Simplified18.8%
div-inv18.8%
Applied egg-rr18.8%
Taylor expanded in b around inf 96.8%
+-commutative96.8%
associate-+l+96.8%
+-commutative96.8%
fma-def96.8%
+-commutative96.8%
associate-*l/96.8%
associate-*r*96.8%
associate-*l/96.8%
unpow296.8%
associate-*r*96.8%
associate-*r*96.8%
Simplified96.8%
Final simplification96.8%
(FPCore (a b c) :precision binary64 (+ (* -0.5 (/ c b)) (* -0.375 (* a (/ (* c c) (pow b 3.0))))))
double code(double a, double b, double c) {
return (-0.5 * (c / b)) + (-0.375 * (a * ((c * c) / pow(b, 3.0))));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-0.5d0) * (c / b)) + ((-0.375d0) * (a * ((c * c) / (b ** 3.0d0))))
end function
public static double code(double a, double b, double c) {
return (-0.5 * (c / b)) + (-0.375 * (a * ((c * c) / Math.pow(b, 3.0))));
}
def code(a, b, c): return (-0.5 * (c / b)) + (-0.375 * (a * ((c * c) / math.pow(b, 3.0))))
function code(a, b, c) return Float64(Float64(-0.5 * Float64(c / b)) + Float64(-0.375 * Float64(a * Float64(Float64(c * c) / (b ^ 3.0))))) end
function tmp = code(a, b, c) tmp = (-0.5 * (c / b)) + (-0.375 * (a * ((c * c) / (b ^ 3.0)))); end
code[a_, b_, c_] := N[(N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision] + N[(-0.375 * N[(a * N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b} + -0.375 \cdot \left(a \cdot \frac{c \cdot c}{{b}^{3}}\right)
\end{array}
Initial program 18.8%
/-rgt-identity18.8%
metadata-eval18.8%
associate-/l*18.8%
associate-*r/18.8%
*-commutative18.8%
associate-*l/18.8%
associate-*r/18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
neg-mul-118.8%
distribute-rgt-neg-in18.8%
times-frac18.8%
metadata-eval18.8%
neg-mul-118.8%
Simplified18.8%
Taylor expanded in b around inf 95.3%
+-commutative95.3%
fma-def95.3%
associate-/l*95.3%
unpow295.3%
Simplified95.3%
fma-udef95.3%
associate-/r/95.3%
Applied egg-rr95.3%
Final simplification95.3%
(FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
double code(double a, double b, double c) {
return -0.5 * (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-0.5d0) * (c / b)
end function
public static double code(double a, double b, double c) {
return -0.5 * (c / b);
}
def code(a, b, c): return -0.5 * (c / b)
function code(a, b, c) return Float64(-0.5 * Float64(c / b)) end
function tmp = code(a, b, c) tmp = -0.5 * (c / b); end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b}
\end{array}
Initial program 18.8%
/-rgt-identity18.8%
metadata-eval18.8%
associate-/l*18.8%
associate-*r/18.8%
*-commutative18.8%
associate-*l/18.8%
associate-*r/18.8%
metadata-eval18.8%
metadata-eval18.8%
times-frac18.8%
neg-mul-118.8%
distribute-rgt-neg-in18.8%
times-frac18.8%
metadata-eval18.8%
neg-mul-118.8%
Simplified18.8%
Taylor expanded in b around inf 89.8%
Final simplification89.8%
herbie shell --seed 2023174
(FPCore (a b c)
:name "Cubic critical, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))