
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(-
(*
a
(-
(*
(pow c 4.0)
(+ (* -5.0 (/ (pow a 2.0) (pow b 7.0))) (* -2.0 (/ a (* c (pow b 5.0))))))
(/ (pow c 2.0) (pow b 3.0))))
(/ c b)))
double code(double a, double b, double c) {
return (a * ((pow(c, 4.0) * ((-5.0 * (pow(a, 2.0) / pow(b, 7.0))) + (-2.0 * (a / (c * pow(b, 5.0)))))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (a * (((c ** 4.0d0) * (((-5.0d0) * ((a ** 2.0d0) / (b ** 7.0d0))) + ((-2.0d0) * (a / (c * (b ** 5.0d0)))))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
return (a * ((Math.pow(c, 4.0) * ((-5.0 * (Math.pow(a, 2.0) / Math.pow(b, 7.0))) + (-2.0 * (a / (c * Math.pow(b, 5.0)))))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c): return (a * ((math.pow(c, 4.0) * ((-5.0 * (math.pow(a, 2.0) / math.pow(b, 7.0))) + (-2.0 * (a / (c * math.pow(b, 5.0)))))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c) return Float64(Float64(a * Float64(Float64((c ^ 4.0) * Float64(Float64(-5.0 * Float64((a ^ 2.0) / (b ^ 7.0))) + Float64(-2.0 * Float64(a / Float64(c * (b ^ 5.0)))))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = (a * (((c ^ 4.0) * ((-5.0 * ((a ^ 2.0) / (b ^ 7.0))) + (-2.0 * (a / (c * (b ^ 5.0)))))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b); end
code[a_, b_, c_] := N[(N[(a * N[(N[(N[Power[c, 4.0], $MachinePrecision] * N[(N[(-5.0 * N[(N[Power[a, 2.0], $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(a / N[(c * N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left({c}^{4} \cdot \left(-5 \cdot \frac{{a}^{2}}{{b}^{7}} + -2 \cdot \frac{a}{c \cdot {b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in a around 0 95.0%
+-commutative95.0%
mul-1-neg95.0%
unsub-neg95.0%
Simplified95.0%
Taylor expanded in c around inf 95.0%
Final simplification95.0%
(FPCore (a b c)
:precision binary64
(*
c
(+
(*
c
(-
(*
c
(+
(* -5.0 (/ (* c (pow a 3.0)) (pow b 7.0)))
(* -2.0 (/ (pow a 2.0) (pow b 5.0)))))
(/ a (pow b 3.0))))
(/ -1.0 b))))
double code(double a, double b, double c) {
return c * ((c * ((c * ((-5.0 * ((c * pow(a, 3.0)) / pow(b, 7.0))) + (-2.0 * (pow(a, 2.0) / pow(b, 5.0))))) - (a / pow(b, 3.0)))) + (-1.0 / b));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * ((c * ((c * (((-5.0d0) * ((c * (a ** 3.0d0)) / (b ** 7.0d0))) + ((-2.0d0) * ((a ** 2.0d0) / (b ** 5.0d0))))) - (a / (b ** 3.0d0)))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
return c * ((c * ((c * ((-5.0 * ((c * Math.pow(a, 3.0)) / Math.pow(b, 7.0))) + (-2.0 * (Math.pow(a, 2.0) / Math.pow(b, 5.0))))) - (a / Math.pow(b, 3.0)))) + (-1.0 / b));
}
def code(a, b, c): return c * ((c * ((c * ((-5.0 * ((c * math.pow(a, 3.0)) / math.pow(b, 7.0))) + (-2.0 * (math.pow(a, 2.0) / math.pow(b, 5.0))))) - (a / math.pow(b, 3.0)))) + (-1.0 / b))
function code(a, b, c) return Float64(c * Float64(Float64(c * Float64(Float64(c * Float64(Float64(-5.0 * Float64(Float64(c * (a ^ 3.0)) / (b ^ 7.0))) + Float64(-2.0 * Float64((a ^ 2.0) / (b ^ 5.0))))) - Float64(a / (b ^ 3.0)))) + Float64(-1.0 / b))) end
function tmp = code(a, b, c) tmp = c * ((c * ((c * ((-5.0 * ((c * (a ^ 3.0)) / (b ^ 7.0))) + (-2.0 * ((a ^ 2.0) / (b ^ 5.0))))) - (a / (b ^ 3.0)))) + (-1.0 / b)); end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(c * N[(N[(-5.0 * N[(N[(c * N[Power[a, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(N[Power[a, 2.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \left(c \cdot \left(c \cdot \left(-5 \cdot \frac{c \cdot {a}^{3}}{{b}^{7}} + -2 \cdot \frac{{a}^{2}}{{b}^{5}}\right) - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right)
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
frac-2neg31.6%
div-inv31.6%
sub-neg31.6%
distribute-neg-in31.6%
pow231.6%
add-sqr-sqrt0.0%
sqrt-unprod1.6%
sqr-neg1.6%
sqrt-prod1.6%
add-sqr-sqrt1.6%
add-sqr-sqrt0.0%
sqrt-unprod31.6%
sqr-neg31.6%
sqrt-prod31.5%
add-sqr-sqrt31.6%
distribute-rgt-neg-in31.6%
metadata-eval31.6%
Applied egg-rr31.6%
flip-+31.4%
pow231.4%
unpow231.4%
Applied egg-rr31.4%
unpow231.4%
sqr-neg31.4%
rem-square-sqrt32.3%
fma-define32.3%
associate-*r*32.3%
fma-define32.3%
fma-define32.3%
associate-*r*32.3%
fma-define32.3%
Simplified32.3%
Taylor expanded in c around 0 94.8%
fmm-def94.8%
Simplified94.8%
Taylor expanded in c around 0 94.8%
Final simplification94.8%
(FPCore (a b c) :precision binary64 (- (* a (- (* -2.0 (* a (/ (pow c 3.0) (pow b 5.0)))) (/ (pow c 2.0) (pow b 3.0)))) (/ c b)))
double code(double a, double b, double c) {
return (a * ((-2.0 * (a * (pow(c, 3.0) / pow(b, 5.0)))) - (pow(c, 2.0) / pow(b, 3.0)))) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (a * (((-2.0d0) * (a * ((c ** 3.0d0) / (b ** 5.0d0)))) - ((c ** 2.0d0) / (b ** 3.0d0)))) - (c / b)
end function
public static double code(double a, double b, double c) {
return (a * ((-2.0 * (a * (Math.pow(c, 3.0) / Math.pow(b, 5.0)))) - (Math.pow(c, 2.0) / Math.pow(b, 3.0)))) - (c / b);
}
def code(a, b, c): return (a * ((-2.0 * (a * (math.pow(c, 3.0) / math.pow(b, 5.0)))) - (math.pow(c, 2.0) / math.pow(b, 3.0)))) - (c / b)
function code(a, b, c) return Float64(Float64(a * Float64(Float64(-2.0 * Float64(a * Float64((c ^ 3.0) / (b ^ 5.0)))) - Float64((c ^ 2.0) / (b ^ 3.0)))) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = (a * ((-2.0 * (a * ((c ^ 3.0) / (b ^ 5.0)))) - ((c ^ 2.0) / (b ^ 3.0)))) - (c / b); end
code[a_, b_, c_] := N[(N[(a * N[(N[(-2.0 * N[(a * N[(N[Power[c, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(-2 \cdot \left(a \cdot \frac{{c}^{3}}{{b}^{5}}\right) - \frac{{c}^{2}}{{b}^{3}}\right) - \frac{c}{b}
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in a around 0 93.5%
+-commutative93.5%
mul-1-neg93.5%
unsub-neg93.5%
mul-1-neg93.5%
unsub-neg93.5%
associate-/l*93.5%
Simplified93.5%
Final simplification93.5%
(FPCore (a b c) :precision binary64 (* c (+ (* c (- (* -2.0 (/ (* c (pow a 2.0)) (pow b 5.0))) (/ a (pow b 3.0)))) (/ -1.0 b))))
double code(double a, double b, double c) {
return c * ((c * ((-2.0 * ((c * pow(a, 2.0)) / pow(b, 5.0))) - (a / pow(b, 3.0)))) + (-1.0 / b));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * ((c * (((-2.0d0) * ((c * (a ** 2.0d0)) / (b ** 5.0d0))) - (a / (b ** 3.0d0)))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
return c * ((c * ((-2.0 * ((c * Math.pow(a, 2.0)) / Math.pow(b, 5.0))) - (a / Math.pow(b, 3.0)))) + (-1.0 / b));
}
def code(a, b, c): return c * ((c * ((-2.0 * ((c * math.pow(a, 2.0)) / math.pow(b, 5.0))) - (a / math.pow(b, 3.0)))) + (-1.0 / b))
function code(a, b, c) return Float64(c * Float64(Float64(c * Float64(Float64(-2.0 * Float64(Float64(c * (a ^ 2.0)) / (b ^ 5.0))) - Float64(a / (b ^ 3.0)))) + Float64(-1.0 / b))) end
function tmp = code(a, b, c) tmp = c * ((c * ((-2.0 * ((c * (a ^ 2.0)) / (b ^ 5.0))) - (a / (b ^ 3.0)))) + (-1.0 / b)); end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(N[(-2.0 * N[(N[(c * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \left(c \cdot \left(-2 \cdot \frac{c \cdot {a}^{2}}{{b}^{5}} - \frac{a}{{b}^{3}}\right) + \frac{-1}{b}\right)
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in c around 0 93.2%
Final simplification93.2%
(FPCore (a b c) :precision binary64 (/ (fma a (pow (/ (- c) b) 2.0) c) (- b)))
double code(double a, double b, double c) {
return fma(a, pow((-c / b), 2.0), c) / -b;
}
function code(a, b, c) return Float64(fma(a, (Float64(Float64(-c) / b) ^ 2.0), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(a * N[Power[N[((-c) / b), $MachinePrecision], 2.0], $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(a, {\left(\frac{-c}{b}\right)}^{2}, c\right)}{-b}
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in a around 0 90.9%
mul-1-neg90.9%
unsub-neg90.9%
mul-1-neg90.9%
distribute-neg-frac290.9%
associate-/l*90.9%
Simplified90.9%
Taylor expanded in b around inf 90.9%
distribute-lft-out90.9%
associate-*r/90.9%
mul-1-neg90.9%
distribute-neg-frac290.9%
+-commutative90.9%
associate-/l*90.9%
fma-define90.9%
unpow290.9%
unpow290.9%
times-frac90.9%
sqr-neg90.9%
distribute-frac-neg290.9%
distribute-frac-neg290.9%
unpow290.9%
distribute-frac-neg290.9%
distribute-frac-neg90.9%
Simplified90.9%
Final simplification90.9%
(FPCore (a b c) :precision binary64 (* c (- (/ -1.0 b) (/ (* a c) (pow b 3.0)))))
double code(double a, double b, double c) {
return c * ((-1.0 / b) - ((a * c) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * (((-1.0d0) / b) - ((a * c) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
return c * ((-1.0 / b) - ((a * c) / Math.pow(b, 3.0)));
}
def code(a, b, c): return c * ((-1.0 / b) - ((a * c) / math.pow(b, 3.0)))
function code(a, b, c) return Float64(c * Float64(Float64(-1.0 / b) - Float64(Float64(a * c) / (b ^ 3.0)))) end
function tmp = code(a, b, c) tmp = c * ((-1.0 / b) - ((a * c) / (b ^ 3.0))); end
code[a_, b_, c_] := N[(c * N[(N[(-1.0 / b), $MachinePrecision] - N[(N[(a * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \left(\frac{-1}{b} - \frac{a \cdot c}{{b}^{3}}\right)
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in c around 0 90.6%
associate-*r/90.6%
neg-mul-190.6%
distribute-rgt-neg-in90.6%
Simplified90.6%
Final simplification90.6%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
Taylor expanded in b around inf 81.4%
associate-*r/81.4%
mul-1-neg81.4%
Simplified81.4%
Final simplification81.4%
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
return 0.0 / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
return 0.0 / a;
}
def code(a, b, c): return 0.0 / a
function code(a, b, c) return Float64(0.0 / a) end
function tmp = code(a, b, c) tmp = 0.0 / a; end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{a}
\end{array}
Initial program 31.6%
*-commutative31.6%
Simplified31.6%
frac-2neg31.6%
div-inv31.6%
sub-neg31.6%
distribute-neg-in31.6%
pow231.6%
add-sqr-sqrt0.0%
sqrt-unprod1.6%
sqr-neg1.6%
sqrt-prod1.6%
add-sqr-sqrt1.6%
add-sqr-sqrt0.0%
sqrt-unprod31.6%
sqr-neg31.6%
sqrt-prod31.5%
add-sqr-sqrt31.6%
distribute-rgt-neg-in31.6%
metadata-eval31.6%
Applied egg-rr31.6%
add-log-exp23.0%
un-div-inv23.0%
neg-mul-123.0%
fma-define23.0%
Applied egg-rr23.0%
Taylor expanded in a around 0 3.2%
associate-*r/3.2%
distribute-rgt1-in3.2%
metadata-eval3.2%
mul0-lft3.2%
metadata-eval3.2%
Simplified3.2%
Final simplification3.2%
herbie shell --seed 2024076
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))