
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (pow (* a c) 4.0)))
(+
(* -2.0 (/ (* (pow a 2.0) (pow c 3.0)) (pow b 5.0)))
(-
(-
(* -0.25 (/ (+ (* 16.0 t_0) (* 4.0 t_0)) (* a (pow b 7.0))))
(/ (* a (pow c 2.0)) (pow b 3.0)))
(/ c b)))))
double code(double a, double b, double c) {
double t_0 = pow((a * c), 4.0);
return (-2.0 * ((pow(a, 2.0) * pow(c, 3.0)) / pow(b, 5.0))) + (((-0.25 * (((16.0 * t_0) + (4.0 * t_0)) / (a * pow(b, 7.0)))) - ((a * pow(c, 2.0)) / pow(b, 3.0))) - (c / b));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8) :: t_0
t_0 = (a * c) ** 4.0d0
code = ((-2.0d0) * (((a ** 2.0d0) * (c ** 3.0d0)) / (b ** 5.0d0))) + ((((-0.25d0) * (((16.0d0 * t_0) + (4.0d0 * t_0)) / (a * (b ** 7.0d0)))) - ((a * (c ** 2.0d0)) / (b ** 3.0d0))) - (c / b))
end function
public static double code(double a, double b, double c) {
double t_0 = Math.pow((a * c), 4.0);
return (-2.0 * ((Math.pow(a, 2.0) * Math.pow(c, 3.0)) / Math.pow(b, 5.0))) + (((-0.25 * (((16.0 * t_0) + (4.0 * t_0)) / (a * Math.pow(b, 7.0)))) - ((a * Math.pow(c, 2.0)) / Math.pow(b, 3.0))) - (c / b));
}
def code(a, b, c): t_0 = math.pow((a * c), 4.0) return (-2.0 * ((math.pow(a, 2.0) * math.pow(c, 3.0)) / math.pow(b, 5.0))) + (((-0.25 * (((16.0 * t_0) + (4.0 * t_0)) / (a * math.pow(b, 7.0)))) - ((a * math.pow(c, 2.0)) / math.pow(b, 3.0))) - (c / b))
function code(a, b, c) t_0 = Float64(a * c) ^ 4.0 return Float64(Float64(-2.0 * Float64(Float64((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + Float64(Float64(Float64(-0.25 * Float64(Float64(Float64(16.0 * t_0) + Float64(4.0 * t_0)) / Float64(a * (b ^ 7.0)))) - Float64(Float64(a * (c ^ 2.0)) / (b ^ 3.0))) - Float64(c / b))) end
function tmp = code(a, b, c) t_0 = (a * c) ^ 4.0; tmp = (-2.0 * (((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + (((-0.25 * (((16.0 * t_0) + (4.0 * t_0)) / (a * (b ^ 7.0)))) - ((a * (c ^ 2.0)) / (b ^ 3.0))) - (c / b)); end
code[a_, b_, c_] := Block[{t$95$0 = N[Power[N[(a * c), $MachinePrecision], 4.0], $MachinePrecision]}, N[(N[(-2.0 * N[(N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-0.25 * N[(N[(N[(16.0 * t$95$0), $MachinePrecision] + N[(4.0 * t$95$0), $MachinePrecision]), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(a \cdot c\right)}^{4}\\
-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(\left(-0.25 \cdot \frac{16 \cdot t\_0 + 4 \cdot t\_0}{a \cdot {b}^{7}} - \frac{a \cdot {c}^{2}}{{b}^{3}}\right) - \frac{c}{b}\right)
\end{array}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Taylor expanded in b around inf 96.4%
*-commutative96.4%
unpow-prod-down96.4%
pow-prod-down96.4%
pow-pow96.4%
metadata-eval96.4%
metadata-eval96.4%
Applied egg-rr96.4%
expm1-log1p-u96.4%
expm1-udef96.0%
pow-prod-down96.0%
Applied egg-rr96.0%
expm1-def96.4%
expm1-log1p96.4%
Simplified96.4%
Final simplification96.4%
(FPCore (a b c) :precision binary64 (- (- (/ -2.0 (/ (pow b 5.0) (* (pow a 2.0) (pow c 3.0)))) (/ c b)) (* (pow c 2.0) (/ a (pow b 3.0)))))
double code(double a, double b, double c) {
return ((-2.0 / (pow(b, 5.0) / (pow(a, 2.0) * pow(c, 3.0)))) - (c / b)) - (pow(c, 2.0) * (a / pow(b, 3.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (((-2.0d0) / ((b ** 5.0d0) / ((a ** 2.0d0) * (c ** 3.0d0)))) - (c / b)) - ((c ** 2.0d0) * (a / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
return ((-2.0 / (Math.pow(b, 5.0) / (Math.pow(a, 2.0) * Math.pow(c, 3.0)))) - (c / b)) - (Math.pow(c, 2.0) * (a / Math.pow(b, 3.0)));
}
def code(a, b, c): return ((-2.0 / (math.pow(b, 5.0) / (math.pow(a, 2.0) * math.pow(c, 3.0)))) - (c / b)) - (math.pow(c, 2.0) * (a / math.pow(b, 3.0)))
function code(a, b, c) return Float64(Float64(Float64(-2.0 / Float64((b ^ 5.0) / Float64((a ^ 2.0) * (c ^ 3.0)))) - Float64(c / b)) - Float64((c ^ 2.0) * Float64(a / (b ^ 3.0)))) end
function tmp = code(a, b, c) tmp = ((-2.0 / ((b ^ 5.0) / ((a ^ 2.0) * (c ^ 3.0)))) - (c / b)) - ((c ^ 2.0) * (a / (b ^ 3.0))); end
code[a_, b_, c_] := N[(N[(N[(-2.0 / N[(N[Power[b, 5.0], $MachinePrecision] / N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] * N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{-2}{\frac{{b}^{5}}{{a}^{2} \cdot {c}^{3}}} - \frac{c}{b}\right) - {c}^{2} \cdot \frac{a}{{b}^{3}}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Taylor expanded in b around inf 95.2%
associate-+r+95.2%
mul-1-neg95.2%
unsub-neg95.2%
mul-1-neg95.2%
unsub-neg95.2%
associate-*r/95.2%
associate-/l*95.2%
*-commutative95.2%
associate-/l*95.2%
associate-/r/95.2%
Simplified95.2%
Final simplification95.2%
(FPCore (a b c) :precision binary64 (/ (fma -4.0 (/ (pow (* a c) 3.0) (pow b 5.0)) (* -2.0 (+ (* c (/ a b)) (/ (* (* a c) (* a c)) (pow b 3.0))))) (* a 2.0)))
double code(double a, double b, double c) {
return fma(-4.0, (pow((a * c), 3.0) / pow(b, 5.0)), (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / pow(b, 3.0))))) / (a * 2.0);
}
function code(a, b, c) return Float64(fma(-4.0, Float64((Float64(a * c) ^ 3.0) / (b ^ 5.0)), Float64(-2.0 * Float64(Float64(c * Float64(a / b)) + Float64(Float64(Float64(a * c) * Float64(a * c)) / (b ^ 3.0))))) / Float64(a * 2.0)) end
code[a_, b_, c_] := N[(N[(-4.0 * N[(N[Power[N[(a * c), $MachinePrecision], 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * c), $MachinePrecision] * N[(a * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-4, \frac{{\left(a \cdot c\right)}^{3}}{{b}^{5}}, -2 \cdot \left(c \cdot \frac{a}{b} + \frac{\left(a \cdot c\right) \cdot \left(a \cdot c\right)}{{b}^{3}}\right)\right)}{a \cdot 2}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Applied egg-rr28.3%
Taylor expanded in b around inf 94.8%
fma-def94.8%
cube-prod94.8%
distribute-lft-out94.8%
associate-/l*94.8%
associate-/r/94.7%
unpow294.7%
unpow294.7%
swap-sqr94.7%
unpow294.7%
Simplified94.7%
unpow292.0%
Applied egg-rr94.7%
Final simplification94.7%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (* (pow c 2.0) (/ a (pow b 3.0)))))
double code(double a, double b, double c) {
return (-c / b) - (pow(c, 2.0) * (a / pow(b, 3.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - ((c ** 2.0d0) * (a / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
return (-c / b) - (Math.pow(c, 2.0) * (a / Math.pow(b, 3.0)));
}
def code(a, b, c): return (-c / b) - (math.pow(c, 2.0) * (a / math.pow(b, 3.0)))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64((c ^ 2.0) * Float64(a / (b ^ 3.0)))) end
function tmp = code(a, b, c) tmp = (-c / b) - ((c ^ 2.0) * (a / (b ^ 3.0))); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] * N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - {c}^{2} \cdot \frac{a}{{b}^{3}}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Taylor expanded in b around inf 92.5%
mul-1-neg92.5%
unsub-neg92.5%
mul-1-neg92.5%
distribute-neg-frac92.5%
associate-/l*92.5%
associate-/r/92.5%
Simplified92.5%
Final simplification92.5%
(FPCore (a b c) :precision binary64 (/ (* -2.0 (- (/ (* (* a c) (* a c)) (pow b 3.0)) (/ (* a c) (- b)))) (* a 2.0)))
double code(double a, double b, double c) {
return (-2.0 * ((((a * c) * (a * c)) / pow(b, 3.0)) - ((a * c) / -b))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-2.0d0) * ((((a * c) * (a * c)) / (b ** 3.0d0)) - ((a * c) / -b))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
return (-2.0 * ((((a * c) * (a * c)) / Math.pow(b, 3.0)) - ((a * c) / -b))) / (a * 2.0);
}
def code(a, b, c): return (-2.0 * ((((a * c) * (a * c)) / math.pow(b, 3.0)) - ((a * c) / -b))) / (a * 2.0)
function code(a, b, c) return Float64(Float64(-2.0 * Float64(Float64(Float64(Float64(a * c) * Float64(a * c)) / (b ^ 3.0)) - Float64(Float64(a * c) / Float64(-b)))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) tmp = (-2.0 * ((((a * c) * (a * c)) / (b ^ 3.0)) - ((a * c) / -b))) / (a * 2.0); end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(N[(N[(a * c), $MachinePrecision] * N[(a * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] - N[(N[(a * c), $MachinePrecision] / (-b)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2 \cdot \left(\frac{\left(a \cdot c\right) \cdot \left(a \cdot c\right)}{{b}^{3}} - \frac{a \cdot c}{-b}\right)}{a \cdot 2}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Applied egg-rr28.3%
Taylor expanded in b around inf 92.1%
distribute-lft-out92.1%
associate-/l*92.1%
associate-/r/92.0%
unpow292.0%
unpow292.0%
swap-sqr92.0%
unpow292.0%
Simplified92.0%
associate-*l/92.1%
frac-2neg92.1%
Applied egg-rr92.1%
unpow292.0%
Applied egg-rr92.1%
Final simplification92.1%
(FPCore (a b c) :precision binary64 (/ (* -2.0 (+ (* c (/ a b)) (/ (* (* a c) (* a c)) (pow b 3.0)))) (* a 2.0)))
double code(double a, double b, double c) {
return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / pow(b, 3.0)))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-2.0d0) * ((c * (a / b)) + (((a * c) * (a * c)) / (b ** 3.0d0)))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / Math.pow(b, 3.0)))) / (a * 2.0);
}
def code(a, b, c): return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / math.pow(b, 3.0)))) / (a * 2.0)
function code(a, b, c) return Float64(Float64(-2.0 * Float64(Float64(c * Float64(a / b)) + Float64(Float64(Float64(a * c) * Float64(a * c)) / (b ^ 3.0)))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) tmp = (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / (b ^ 3.0)))) / (a * 2.0); end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * c), $MachinePrecision] * N[(a * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2 \cdot \left(c \cdot \frac{a}{b} + \frac{\left(a \cdot c\right) \cdot \left(a \cdot c\right)}{{b}^{3}}\right)}{a \cdot 2}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Applied egg-rr28.3%
Taylor expanded in b around inf 92.1%
distribute-lft-out92.1%
associate-/l*92.1%
associate-/r/92.0%
unpow292.0%
unpow292.0%
swap-sqr92.0%
unpow292.0%
Simplified92.0%
unpow292.0%
Applied egg-rr92.0%
Final simplification92.0%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 28.1%
*-commutative28.1%
Simplified28.1%
Taylor expanded in b around inf 83.9%
mul-1-neg83.9%
distribute-neg-frac83.9%
Simplified83.9%
Final simplification83.9%
herbie shell --seed 2024031
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))