
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (- (* b b) (* c (* a 4.0)))))
(/
(/
(/
(fma
64.0
(* (* c a) (* (* c a) (* c a)))
(fma
a
(* (* c (pow b 4.0)) 12.0)
(* (* a a) (* (* (* b b) (* c c)) (- 48.0)))))
(- (pow (- b) 3.0) (pow (- (* b b) (* (* c a) 4.0)) 1.5)))
(+ (pow (- b) 2.0) (+ t_0 (* b (sqrt t_0)))))
(* a 2.0))))
double code(double a, double b, double c) {
double t_0 = (b * b) - (c * (a * 4.0));
return ((fma(64.0, ((c * a) * ((c * a) * (c * a))), fma(a, ((c * pow(b, 4.0)) * 12.0), ((a * a) * (((b * b) * (c * c)) * -48.0)))) / (pow(-b, 3.0) - pow(((b * b) - ((c * a) * 4.0)), 1.5))) / (pow(-b, 2.0) + (t_0 + (b * sqrt(t_0))))) / (a * 2.0);
}
function code(a, b, c) t_0 = Float64(Float64(b * b) - Float64(c * Float64(a * 4.0))) return Float64(Float64(Float64(fma(64.0, Float64(Float64(c * a) * Float64(Float64(c * a) * Float64(c * a))), fma(a, Float64(Float64(c * (b ^ 4.0)) * 12.0), Float64(Float64(a * a) * Float64(Float64(Float64(b * b) * Float64(c * c)) * Float64(-48.0))))) / Float64((Float64(-b) ^ 3.0) - (Float64(Float64(b * b) - Float64(Float64(c * a) * 4.0)) ^ 1.5))) / Float64((Float64(-b) ^ 2.0) + Float64(t_0 + Float64(b * sqrt(t_0))))) / Float64(a * 2.0)) end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(64.0 * N[(N[(c * a), $MachinePrecision] * N[(N[(c * a), $MachinePrecision] * N[(c * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(a * N[(N[(c * N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision] * 12.0), $MachinePrecision] + N[(N[(a * a), $MachinePrecision] * N[(N[(N[(b * b), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision] * (-48.0)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[(-b), 3.0], $MachinePrecision] - N[Power[N[(N[(b * b), $MachinePrecision] - N[(N[(c * a), $MachinePrecision] * 4.0), $MachinePrecision]), $MachinePrecision], 1.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[(-b), 2.0], $MachinePrecision] + N[(t$95$0 + N[(b * N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := b \cdot b - c \cdot \left(a \cdot 4\right)\\
\frac{\frac{\frac{\mathsf{fma}\left(64, \left(c \cdot a\right) \cdot \left(\left(c \cdot a\right) \cdot \left(c \cdot a\right)\right), \mathsf{fma}\left(a, \left(c \cdot {b}^{4}\right) \cdot 12, \left(a \cdot a\right) \cdot \left(\left(\left(b \cdot b\right) \cdot \left(c \cdot c\right)\right) \cdot \left(-48\right)\right)\right)\right)}{{\left(-b\right)}^{3} - {\left(b \cdot b - \left(c \cdot a\right) \cdot 4\right)}^{1.5}}}{{\left(-b\right)}^{2} + \left(t_0 + b \cdot \sqrt{t_0}\right)}}{a \cdot 2}
\end{array}
\end{array}
Initial program 31.9%
flip3-+31.9%
pow1/231.9%
pow-pow32.9%
*-commutative32.9%
*-commutative32.9%
metadata-eval32.9%
pow232.9%
Applied egg-rr32.9%
flip-+32.9%
Applied egg-rr32.9%
neg-mul-132.9%
neg-mul-132.9%
pow-sqr33.6%
neg-mul-133.6%
metadata-eval33.6%
pow-sqr33.7%
associate-*r*33.7%
*-commutative33.7%
metadata-eval33.7%
associate-*r*33.7%
*-commutative33.7%
Simplified33.7%
Taylor expanded in a around -inf 98.7%
fma-def98.7%
cube-prod98.7%
fma-def98.8%
distribute-rgt-out98.8%
metadata-eval98.8%
mul-1-neg98.8%
distribute-rgt-neg-in98.8%
unpow298.8%
distribute-rgt-out98.8%
Simplified98.8%
unpow398.8%
Applied egg-rr98.8%
Final simplification98.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (- (* b b) (* c (* a 4.0)))))
(/
(/
(/
(fma
64.0
(pow (* c a) 3.0)
(+
(* (pow b 4.0) (* (* c a) 12.0))
(* (* b b) (* (* (* c a) (* c a)) -48.0))))
(- (pow (- b) 3.0) (pow (- (* b b) (* (* c a) 4.0)) 1.5)))
(+ (pow (- b) 2.0) (+ t_0 (* b (sqrt t_0)))))
(* a 2.0))))
double code(double a, double b, double c) {
double t_0 = (b * b) - (c * (a * 4.0));
return ((fma(64.0, pow((c * a), 3.0), ((pow(b, 4.0) * ((c * a) * 12.0)) + ((b * b) * (((c * a) * (c * a)) * -48.0)))) / (pow(-b, 3.0) - pow(((b * b) - ((c * a) * 4.0)), 1.5))) / (pow(-b, 2.0) + (t_0 + (b * sqrt(t_0))))) / (a * 2.0);
}
function code(a, b, c) t_0 = Float64(Float64(b * b) - Float64(c * Float64(a * 4.0))) return Float64(Float64(Float64(fma(64.0, (Float64(c * a) ^ 3.0), Float64(Float64((b ^ 4.0) * Float64(Float64(c * a) * 12.0)) + Float64(Float64(b * b) * Float64(Float64(Float64(c * a) * Float64(c * a)) * -48.0)))) / Float64((Float64(-b) ^ 3.0) - (Float64(Float64(b * b) - Float64(Float64(c * a) * 4.0)) ^ 1.5))) / Float64((Float64(-b) ^ 2.0) + Float64(t_0 + Float64(b * sqrt(t_0))))) / Float64(a * 2.0)) end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(64.0 * N[Power[N[(c * a), $MachinePrecision], 3.0], $MachinePrecision] + N[(N[(N[Power[b, 4.0], $MachinePrecision] * N[(N[(c * a), $MachinePrecision] * 12.0), $MachinePrecision]), $MachinePrecision] + N[(N[(b * b), $MachinePrecision] * N[(N[(N[(c * a), $MachinePrecision] * N[(c * a), $MachinePrecision]), $MachinePrecision] * -48.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[(-b), 3.0], $MachinePrecision] - N[Power[N[(N[(b * b), $MachinePrecision] - N[(N[(c * a), $MachinePrecision] * 4.0), $MachinePrecision]), $MachinePrecision], 1.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Power[(-b), 2.0], $MachinePrecision] + N[(t$95$0 + N[(b * N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := b \cdot b - c \cdot \left(a \cdot 4\right)\\
\frac{\frac{\frac{\mathsf{fma}\left(64, {\left(c \cdot a\right)}^{3}, {b}^{4} \cdot \left(\left(c \cdot a\right) \cdot 12\right) + \left(b \cdot b\right) \cdot \left(\left(\left(c \cdot a\right) \cdot \left(c \cdot a\right)\right) \cdot -48\right)\right)}{{\left(-b\right)}^{3} - {\left(b \cdot b - \left(c \cdot a\right) \cdot 4\right)}^{1.5}}}{{\left(-b\right)}^{2} + \left(t_0 + b \cdot \sqrt{t_0}\right)}}{a \cdot 2}
\end{array}
\end{array}
Initial program 31.9%
flip3-+31.9%
pow1/231.9%
pow-pow32.9%
*-commutative32.9%
*-commutative32.9%
metadata-eval32.9%
pow232.9%
Applied egg-rr32.9%
flip-+32.9%
Applied egg-rr32.9%
neg-mul-132.9%
neg-mul-132.9%
pow-sqr33.6%
neg-mul-133.6%
metadata-eval33.6%
pow-sqr33.7%
associate-*r*33.7%
*-commutative33.7%
metadata-eval33.7%
associate-*r*33.7%
*-commutative33.7%
Simplified33.7%
Taylor expanded in b around inf 98.8%
fma-def98.8%
cube-prod98.8%
mul-1-neg98.8%
*-commutative98.8%
distribute-rgt-neg-in98.8%
distribute-rgt-out98.8%
metadata-eval98.8%
distribute-rgt-neg-in98.8%
metadata-eval98.8%
Simplified98.8%
Final simplification98.8%
(FPCore (a b c)
:precision binary64
(-
(-
(fma
-0.25
(/ (pow a 3.0) (/ (pow b 7.0) (* (pow c 4.0) 20.0)))
(* -2.0 (/ (* a a) (/ (pow b 5.0) (pow c 3.0)))))
(/ c b))
(/ (* c (* c a)) (pow b 3.0))))
double code(double a, double b, double c) {
return (fma(-0.25, (pow(a, 3.0) / (pow(b, 7.0) / (pow(c, 4.0) * 20.0))), (-2.0 * ((a * a) / (pow(b, 5.0) / pow(c, 3.0))))) - (c / b)) - ((c * (c * a)) / pow(b, 3.0));
}
function code(a, b, c) return Float64(Float64(fma(-0.25, Float64((a ^ 3.0) / Float64((b ^ 7.0) / Float64((c ^ 4.0) * 20.0))), Float64(-2.0 * Float64(Float64(a * a) / Float64((b ^ 5.0) / (c ^ 3.0))))) - Float64(c / b)) - Float64(Float64(c * Float64(c * a)) / (b ^ 3.0))) end
code[a_, b_, c_] := N[(N[(N[(-0.25 * N[(N[Power[a, 3.0], $MachinePrecision] / N[(N[Power[b, 7.0], $MachinePrecision] / N[(N[Power[c, 4.0], $MachinePrecision] * 20.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(N[(a * a), $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(N[(c * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-0.25, \frac{{a}^{3}}{\frac{{b}^{7}}{{c}^{4} \cdot 20}}, -2 \cdot \frac{a \cdot a}{\frac{{b}^{5}}{{c}^{3}}}\right) - \frac{c}{b}\right) - \frac{c \cdot \left(c \cdot a\right)}{{b}^{3}}
\end{array}
Initial program 31.9%
neg-sub031.9%
associate-+l-31.9%
sub0-neg31.9%
neg-mul-131.9%
associate-*l/31.9%
*-commutative31.9%
associate-/r*31.9%
/-rgt-identity31.9%
metadata-eval31.9%
Simplified31.8%
Taylor expanded in a around 0 96.1%
Simplified96.1%
Taylor expanded in b around 0 96.1%
associate-/l*96.1%
distribute-rgt-out96.1%
metadata-eval96.1%
Simplified96.1%
Final simplification96.1%
(FPCore (a b c) :precision binary64 (- (- (* -2.0 (* (pow c 3.0) (/ a (/ (pow b 5.0) a)))) (/ c b)) (/ (* c c) (/ (pow b 3.0) a))))
double code(double a, double b, double c) {
return ((-2.0 * (pow(c, 3.0) * (a / (pow(b, 5.0) / a)))) - (c / b)) - ((c * c) / (pow(b, 3.0) / a));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (((-2.0d0) * ((c ** 3.0d0) * (a / ((b ** 5.0d0) / a)))) - (c / b)) - ((c * c) / ((b ** 3.0d0) / a))
end function
public static double code(double a, double b, double c) {
return ((-2.0 * (Math.pow(c, 3.0) * (a / (Math.pow(b, 5.0) / a)))) - (c / b)) - ((c * c) / (Math.pow(b, 3.0) / a));
}
def code(a, b, c): return ((-2.0 * (math.pow(c, 3.0) * (a / (math.pow(b, 5.0) / a)))) - (c / b)) - ((c * c) / (math.pow(b, 3.0) / a))
function code(a, b, c) return Float64(Float64(Float64(-2.0 * Float64((c ^ 3.0) * Float64(a / Float64((b ^ 5.0) / a)))) - Float64(c / b)) - Float64(Float64(c * c) / Float64((b ^ 3.0) / a))) end
function tmp = code(a, b, c) tmp = ((-2.0 * ((c ^ 3.0) * (a / ((b ^ 5.0) / a)))) - (c / b)) - ((c * c) / ((b ^ 3.0) / a)); end
code[a_, b_, c_] := N[(N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] * N[(a / N[(N[Power[b, 5.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \left({c}^{3} \cdot \frac{a}{\frac{{b}^{5}}{a}}\right) - \frac{c}{b}\right) - \frac{c \cdot c}{\frac{{b}^{3}}{a}}
\end{array}
Initial program 31.9%
*-commutative31.9%
+-commutative31.9%
unsub-neg31.9%
fma-neg31.9%
associate-*l*31.9%
*-commutative31.9%
distribute-rgt-neg-in31.9%
metadata-eval31.9%
Simplified31.9%
add-cbrt-cube31.3%
pow331.3%
pow1/231.3%
pow-pow31.5%
*-commutative31.5%
metadata-eval31.5%
Applied egg-rr31.5%
Taylor expanded in b around inf 94.5%
+-commutative94.5%
mul-1-neg94.5%
unsub-neg94.5%
Simplified94.5%
Final simplification94.5%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (/ (* c c) (/ (pow b 3.0) a))))
double code(double a, double b, double c) {
return (-c / b) - ((c * c) / (pow(b, 3.0) / a));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - ((c * c) / ((b ** 3.0d0) / a))
end function
public static double code(double a, double b, double c) {
return (-c / b) - ((c * c) / (Math.pow(b, 3.0) / a));
}
def code(a, b, c): return (-c / b) - ((c * c) / (math.pow(b, 3.0) / a))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64(Float64(c * c) / Float64((b ^ 3.0) / a))) end
function tmp = code(a, b, c) tmp = (-c / b) - ((c * c) / ((b ^ 3.0) / a)); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] / N[(N[Power[b, 3.0], $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - \frac{c \cdot c}{\frac{{b}^{3}}{a}}
\end{array}
Initial program 31.9%
neg-sub031.9%
associate-+l-31.9%
sub0-neg31.9%
neg-mul-131.9%
associate-*l/31.9%
*-commutative31.9%
associate-/r*31.9%
/-rgt-identity31.9%
metadata-eval31.9%
Simplified31.8%
Taylor expanded in b around inf 91.0%
+-commutative91.0%
mul-1-neg91.0%
unsub-neg91.0%
associate-*r/91.0%
neg-mul-191.0%
unpow291.0%
associate-*l*91.0%
Simplified91.0%
Taylor expanded in c around 0 91.0%
associate-/l*91.0%
unpow291.0%
Simplified91.0%
Final simplification91.0%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 31.9%
neg-sub031.9%
associate-+l-31.9%
sub0-neg31.9%
neg-mul-131.9%
associate-*l/31.9%
*-commutative31.9%
associate-/r*31.9%
/-rgt-identity31.9%
metadata-eval31.9%
Simplified31.8%
Taylor expanded in b around inf 81.0%
associate-*r/81.0%
neg-mul-181.0%
Simplified81.0%
Final simplification81.0%
herbie shell --seed 2023214
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))