
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(-
(-
(fma
-2.0
(* (/ (pow c 3.0) (pow b 5.0)) (* a a))
(* -5.0 (/ (* (pow c 4.0) (pow a 3.0)) (pow b 7.0))))
(/ c b))
(* a (/ c (/ (pow b 3.0) c)))))
double code(double a, double b, double c) {
return (fma(-2.0, ((pow(c, 3.0) / pow(b, 5.0)) * (a * a)), (-5.0 * ((pow(c, 4.0) * pow(a, 3.0)) / pow(b, 7.0)))) - (c / b)) - (a * (c / (pow(b, 3.0) / c)));
}
function code(a, b, c) return Float64(Float64(fma(-2.0, Float64(Float64((c ^ 3.0) / (b ^ 5.0)) * Float64(a * a)), Float64(-5.0 * Float64(Float64((c ^ 4.0) * (a ^ 3.0)) / (b ^ 7.0)))) - Float64(c / b)) - Float64(a * Float64(c / Float64((b ^ 3.0) / c)))) end
code[a_, b_, c_] := N[(N[(N[(-2.0 * N[(N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision]), $MachinePrecision] + N[(-5.0 * N[(N[(N[Power[c, 4.0], $MachinePrecision] * N[Power[a, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(a * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-2, \frac{{c}^{3}}{{b}^{5}} \cdot \left(a \cdot a\right), -5 \cdot \frac{{c}^{4} \cdot {a}^{3}}{{b}^{7}}\right) - \frac{c}{b}\right) - a \cdot \frac{c}{\frac{{b}^{3}}{c}}
\end{array}
Initial program 28.9%
Taylor expanded in a around 0 96.5%
+-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
Simplified96.5%
Taylor expanded in c around 0 96.5%
Final simplification96.5%
(FPCore (a b c) :precision binary64 (- (fma -0.25 (* (/ (pow (* c a) 4.0) a) (/ 20.0 (pow b 7.0))) (- (/ (* (pow c 3.0) (* a (* -2.0 a))) (pow b 5.0)) (/ c b))) (* c (/ c (/ (pow b 3.0) a)))))
double code(double a, double b, double c) {
return fma(-0.25, ((pow((c * a), 4.0) / a) * (20.0 / pow(b, 7.0))), (((pow(c, 3.0) * (a * (-2.0 * a))) / pow(b, 5.0)) - (c / b))) - (c * (c / (pow(b, 3.0) / a)));
}
function code(a, b, c) return Float64(fma(-0.25, Float64(Float64((Float64(c * a) ^ 4.0) / a) * Float64(20.0 / (b ^ 7.0))), Float64(Float64(Float64((c ^ 3.0) * Float64(a * Float64(-2.0 * a))) / (b ^ 5.0)) - Float64(c / b))) - Float64(c * Float64(c / Float64((b ^ 3.0) / a)))) end
code[a_, b_, c_] := N[(N[(-0.25 * N[(N[(N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision] / a), $MachinePrecision] * N[(20.0 / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[(a * N[(-2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.25, \frac{{\left(c \cdot a\right)}^{4}}{a} \cdot \frac{20}{{b}^{7}}, \frac{{c}^{3} \cdot \left(a \cdot \left(-2 \cdot a\right)\right)}{{b}^{5}} - \frac{c}{b}\right) - c \cdot \frac{c}{\frac{{b}^{3}}{a}}
\end{array}
Initial program 28.9%
sub-neg28.9%
flip-+28.7%
pow228.7%
pow228.7%
pow-prod-up28.9%
metadata-eval28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
metadata-eval28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
metadata-eval28.9%
Applied egg-rr28.9%
log1p-expm1-u22.2%
pow222.2%
*-commutative22.2%
Applied egg-rr22.2%
Taylor expanded in b around inf 96.5%
+-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
Simplified96.5%
Taylor expanded in c around 0 96.5%
distribute-rgt-out96.5%
associate-*r*96.5%
times-frac96.5%
Simplified96.5%
Final simplification96.5%
(FPCore (a b c) :precision binary64 (- (- (/ (* -2.0 (pow c 3.0)) (/ (pow b 5.0) (* a a))) (/ c b)) (* a (/ c (/ (pow b 3.0) c)))))
double code(double a, double b, double c) {
return (((-2.0 * pow(c, 3.0)) / (pow(b, 5.0) / (a * a))) - (c / b)) - (a * (c / (pow(b, 3.0) / c)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((-2.0d0) * (c ** 3.0d0)) / ((b ** 5.0d0) / (a * a))) - (c / b)) - (a * (c / ((b ** 3.0d0) / c)))
end function
public static double code(double a, double b, double c) {
return (((-2.0 * Math.pow(c, 3.0)) / (Math.pow(b, 5.0) / (a * a))) - (c / b)) - (a * (c / (Math.pow(b, 3.0) / c)));
}
def code(a, b, c): return (((-2.0 * math.pow(c, 3.0)) / (math.pow(b, 5.0) / (a * a))) - (c / b)) - (a * (c / (math.pow(b, 3.0) / c)))
function code(a, b, c) return Float64(Float64(Float64(Float64(-2.0 * (c ^ 3.0)) / Float64((b ^ 5.0) / Float64(a * a))) - Float64(c / b)) - Float64(a * Float64(c / Float64((b ^ 3.0) / c)))) end
function tmp = code(a, b, c) tmp = (((-2.0 * (c ^ 3.0)) / ((b ^ 5.0) / (a * a))) - (c / b)) - (a * (c / ((b ^ 3.0) / c))); end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(a * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{-2 \cdot {c}^{3}}{\frac{{b}^{5}}{a \cdot a}} - \frac{c}{b}\right) - a \cdot \frac{c}{\frac{{b}^{3}}{c}}
\end{array}
Initial program 28.9%
Taylor expanded in b around inf 95.0%
+-commutative95.0%
mul-1-neg95.0%
unsub-neg95.0%
+-commutative95.0%
mul-1-neg95.0%
unsub-neg95.0%
associate-/l*95.0%
associate-*r/95.0%
unpow295.0%
associate-/l*95.0%
associate-/r/95.0%
Simplified95.0%
Final simplification95.0%
(FPCore (a b c) :precision binary64 (- (- (/ (* (pow c 3.0) (* a (* -2.0 a))) (pow b 5.0)) (/ c b)) (* c (/ c (/ (pow b 3.0) a)))))
double code(double a, double b, double c) {
return (((pow(c, 3.0) * (a * (-2.0 * a))) / pow(b, 5.0)) - (c / b)) - (c * (c / (pow(b, 3.0) / a)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((c ** 3.0d0) * (a * ((-2.0d0) * a))) / (b ** 5.0d0)) - (c / b)) - (c * (c / ((b ** 3.0d0) / a)))
end function
public static double code(double a, double b, double c) {
return (((Math.pow(c, 3.0) * (a * (-2.0 * a))) / Math.pow(b, 5.0)) - (c / b)) - (c * (c / (Math.pow(b, 3.0) / a)));
}
def code(a, b, c): return (((math.pow(c, 3.0) * (a * (-2.0 * a))) / math.pow(b, 5.0)) - (c / b)) - (c * (c / (math.pow(b, 3.0) / a)))
function code(a, b, c) return Float64(Float64(Float64(Float64((c ^ 3.0) * Float64(a * Float64(-2.0 * a))) / (b ^ 5.0)) - Float64(c / b)) - Float64(c * Float64(c / Float64((b ^ 3.0) / a)))) end
function tmp = code(a, b, c) tmp = ((((c ^ 3.0) * (a * (-2.0 * a))) / (b ^ 5.0)) - (c / b)) - (c * (c / ((b ^ 3.0) / a))); end
code[a_, b_, c_] := N[(N[(N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[(a * N[(-2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(c * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{{c}^{3} \cdot \left(a \cdot \left(-2 \cdot a\right)\right)}{{b}^{5}} - \frac{c}{b}\right) - c \cdot \frac{c}{\frac{{b}^{3}}{a}}
\end{array}
Initial program 28.9%
sub-neg28.9%
flip-+28.7%
pow228.7%
pow228.7%
pow-prod-up28.9%
metadata-eval28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
metadata-eval28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
*-commutative28.9%
distribute-rgt-neg-in28.9%
metadata-eval28.9%
Applied egg-rr28.9%
log1p-expm1-u22.2%
pow222.2%
*-commutative22.2%
Applied egg-rr22.2%
Taylor expanded in b around inf 95.0%
+-commutative95.0%
mul-1-neg95.0%
unsub-neg95.0%
Simplified95.0%
Final simplification95.0%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (* a (/ c (/ (pow b 3.0) c)))))
double code(double a, double b, double c) {
return (-c / b) - (a * (c / (pow(b, 3.0) / c)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - (a * (c / ((b ** 3.0d0) / c)))
end function
public static double code(double a, double b, double c) {
return (-c / b) - (a * (c / (Math.pow(b, 3.0) / c)));
}
def code(a, b, c): return (-c / b) - (a * (c / (math.pow(b, 3.0) / c)))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64(a * Float64(c / Float64((b ^ 3.0) / c)))) end
function tmp = code(a, b, c) tmp = (-c / b) - (a * (c / ((b ^ 3.0) / c))); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(a * N[(c / N[(N[Power[b, 3.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - a \cdot \frac{c}{\frac{{b}^{3}}{c}}
\end{array}
Initial program 28.9%
Taylor expanded in b around inf 92.0%
+-commutative92.0%
mul-1-neg92.0%
unsub-neg92.0%
associate-*r/92.0%
neg-mul-192.0%
associate-/l*92.0%
associate-/r/92.0%
unpow292.0%
associate-/l*92.0%
Simplified92.0%
Final simplification92.0%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 28.9%
Taylor expanded in b around inf 83.0%
associate-*r/83.0%
neg-mul-183.0%
Simplified83.0%
Final simplification83.0%
herbie shell --seed 2023187
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))