
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(-
(*
a
(*
(* c c)
(+
(* c (* a (- (* -5.0 (* a (/ c (pow b 7.0)))) (/ 2.0 (pow b 5.0)))))
(/ -1.0 (pow b 3.0)))))
(/ c b)))
double code(double a, double b, double c) {
return (a * ((c * c) * ((c * (a * ((-5.0 * (a * (c / pow(b, 7.0)))) - (2.0 / pow(b, 5.0))))) + (-1.0 / pow(b, 3.0))))) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (a * ((c * c) * ((c * (a * (((-5.0d0) * (a * (c / (b ** 7.0d0)))) - (2.0d0 / (b ** 5.0d0))))) + ((-1.0d0) / (b ** 3.0d0))))) - (c / b)
end function
public static double code(double a, double b, double c) {
return (a * ((c * c) * ((c * (a * ((-5.0 * (a * (c / Math.pow(b, 7.0)))) - (2.0 / Math.pow(b, 5.0))))) + (-1.0 / Math.pow(b, 3.0))))) - (c / b);
}
def code(a, b, c): return (a * ((c * c) * ((c * (a * ((-5.0 * (a * (c / math.pow(b, 7.0)))) - (2.0 / math.pow(b, 5.0))))) + (-1.0 / math.pow(b, 3.0))))) - (c / b)
function code(a, b, c) return Float64(Float64(a * Float64(Float64(c * c) * Float64(Float64(c * Float64(a * Float64(Float64(-5.0 * Float64(a * Float64(c / (b ^ 7.0)))) - Float64(2.0 / (b ^ 5.0))))) + Float64(-1.0 / (b ^ 3.0))))) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = (a * ((c * c) * ((c * (a * ((-5.0 * (a * (c / (b ^ 7.0)))) - (2.0 / (b ^ 5.0))))) + (-1.0 / (b ^ 3.0))))) - (c / b); end
code[a_, b_, c_] := N[(N[(a * N[(N[(c * c), $MachinePrecision] * N[(N[(c * N[(a * N[(N[(-5.0 * N[(a * N[(c / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(2.0 / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(\left(c \cdot c\right) \cdot \left(c \cdot \left(a \cdot \left(-5 \cdot \left(a \cdot \frac{c}{{b}^{7}}\right) - \frac{2}{{b}^{5}}\right)\right) + \frac{-1}{{b}^{3}}\right)\right) - \frac{c}{b}
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 96.3%
+-commutative96.3%
mul-1-neg96.3%
unsub-neg96.3%
Simplified96.3%
Taylor expanded in c around 0 96.3%
Taylor expanded in a around 0 96.3%
associate-/l*96.3%
associate-*r/96.3%
metadata-eval96.3%
Simplified96.3%
unpow296.3%
Applied egg-rr96.3%
Final simplification96.3%
(FPCore (a b c) :precision binary64 (- (* a (* (pow c 2.0) (+ (* -2.0 (/ (* a c) (pow b 5.0))) (/ -1.0 (pow b 3.0))))) (/ c b)))
double code(double a, double b, double c) {
return (a * (pow(c, 2.0) * ((-2.0 * ((a * c) / pow(b, 5.0))) + (-1.0 / pow(b, 3.0))))) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (a * ((c ** 2.0d0) * (((-2.0d0) * ((a * c) / (b ** 5.0d0))) + ((-1.0d0) / (b ** 3.0d0))))) - (c / b)
end function
public static double code(double a, double b, double c) {
return (a * (Math.pow(c, 2.0) * ((-2.0 * ((a * c) / Math.pow(b, 5.0))) + (-1.0 / Math.pow(b, 3.0))))) - (c / b);
}
def code(a, b, c): return (a * (math.pow(c, 2.0) * ((-2.0 * ((a * c) / math.pow(b, 5.0))) + (-1.0 / math.pow(b, 3.0))))) - (c / b)
function code(a, b, c) return Float64(Float64(a * Float64((c ^ 2.0) * Float64(Float64(-2.0 * Float64(Float64(a * c) / (b ^ 5.0))) + Float64(-1.0 / (b ^ 3.0))))) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = (a * ((c ^ 2.0) * ((-2.0 * ((a * c) / (b ^ 5.0))) + (-1.0 / (b ^ 3.0))))) - (c / b); end
code[a_, b_, c_] := N[(N[(a * N[(N[Power[c, 2.0], $MachinePrecision] * N[(N[(-2.0 * N[(N[(a * c), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left({c}^{2} \cdot \left(-2 \cdot \frac{a \cdot c}{{b}^{5}} + \frac{-1}{{b}^{3}}\right)\right) - \frac{c}{b}
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 96.3%
+-commutative96.3%
mul-1-neg96.3%
unsub-neg96.3%
Simplified96.3%
Taylor expanded in c around 0 95.2%
Final simplification95.2%
(FPCore (a b c) :precision binary64 (* c (+ (* c (* a (+ (* -2.0 (/ (* a c) (pow b 5.0))) (/ -1.0 (pow b 3.0))))) (/ -1.0 b))))
double code(double a, double b, double c) {
return c * ((c * (a * ((-2.0 * ((a * c) / pow(b, 5.0))) + (-1.0 / pow(b, 3.0))))) + (-1.0 / b));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * ((c * (a * (((-2.0d0) * ((a * c) / (b ** 5.0d0))) + ((-1.0d0) / (b ** 3.0d0))))) + ((-1.0d0) / b))
end function
public static double code(double a, double b, double c) {
return c * ((c * (a * ((-2.0 * ((a * c) / Math.pow(b, 5.0))) + (-1.0 / Math.pow(b, 3.0))))) + (-1.0 / b));
}
def code(a, b, c): return c * ((c * (a * ((-2.0 * ((a * c) / math.pow(b, 5.0))) + (-1.0 / math.pow(b, 3.0))))) + (-1.0 / b))
function code(a, b, c) return Float64(c * Float64(Float64(c * Float64(a * Float64(Float64(-2.0 * Float64(Float64(a * c) / (b ^ 5.0))) + Float64(-1.0 / (b ^ 3.0))))) + Float64(-1.0 / b))) end
function tmp = code(a, b, c) tmp = c * ((c * (a * ((-2.0 * ((a * c) / (b ^ 5.0))) + (-1.0 / (b ^ 3.0))))) + (-1.0 / b)); end
code[a_, b_, c_] := N[(c * N[(N[(c * N[(a * N[(N[(-2.0 * N[(N[(a * c), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \left(c \cdot \left(a \cdot \left(-2 \cdot \frac{a \cdot c}{{b}^{5}} + \frac{-1}{{b}^{3}}\right)\right) + \frac{-1}{b}\right)
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in c around 0 94.9%
Taylor expanded in a around 0 94.9%
Final simplification94.9%
(FPCore (a b c) :precision binary64 (- (/ c (- b)) (* a (/ (pow c 2.0) (pow b 3.0)))))
double code(double a, double b, double c) {
return (c / -b) - (a * (pow(c, 2.0) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (c / -b) - (a * ((c ** 2.0d0) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
return (c / -b) - (a * (Math.pow(c, 2.0) / Math.pow(b, 3.0)));
}
def code(a, b, c): return (c / -b) - (a * (math.pow(c, 2.0) / math.pow(b, 3.0)))
function code(a, b, c) return Float64(Float64(c / Float64(-b)) - Float64(a * Float64((c ^ 2.0) / (b ^ 3.0)))) end
function tmp = code(a, b, c) tmp = (c / -b) - (a * ((c ^ 2.0) / (b ^ 3.0))); end
code[a_, b_, c_] := N[(N[(c / (-b)), $MachinePrecision] - N[(a * N[(N[Power[c, 2.0], $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b} - a \cdot \frac{{c}^{2}}{{b}^{3}}
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 92.4%
mul-1-neg92.4%
unsub-neg92.4%
mul-1-neg92.4%
distribute-neg-frac292.4%
associate-/l*92.4%
Simplified92.4%
(FPCore (a b c) :precision binary64 (/ (fma a (pow (/ c b) 2.0) c) (- b)))
double code(double a, double b, double c) {
return fma(a, pow((c / b), 2.0), c) / -b;
}
function code(a, b, c) return Float64(fma(a, (Float64(c / b) ^ 2.0), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(a * N[Power[N[(c / b), $MachinePrecision], 2.0], $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(a, {\left(\frac{c}{b}\right)}^{2}, c\right)}{-b}
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 96.3%
+-commutative96.3%
mul-1-neg96.3%
unsub-neg96.3%
Simplified96.3%
Taylor expanded in b around inf 92.4%
sub-neg92.4%
mul-1-neg92.4%
distribute-neg-out92.4%
remove-double-neg92.4%
sub-neg92.4%
neg-mul-192.4%
associate-/l*92.4%
neg-mul-192.4%
fmm-def92.4%
unpow292.4%
unpow292.4%
times-frac92.4%
unpow292.4%
remove-double-neg92.4%
Simplified92.4%
Final simplification92.4%
(FPCore (a b c) :precision binary64 (* c (- (/ -1.0 b) (/ (* a c) (pow b 3.0)))))
double code(double a, double b, double c) {
return c * ((-1.0 / b) - ((a * c) / pow(b, 3.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * (((-1.0d0) / b) - ((a * c) / (b ** 3.0d0)))
end function
public static double code(double a, double b, double c) {
return c * ((-1.0 / b) - ((a * c) / Math.pow(b, 3.0)));
}
def code(a, b, c): return c * ((-1.0 / b) - ((a * c) / math.pow(b, 3.0)))
function code(a, b, c) return Float64(c * Float64(Float64(-1.0 / b) - Float64(Float64(a * c) / (b ^ 3.0)))) end
function tmp = code(a, b, c) tmp = c * ((-1.0 / b) - ((a * c) / (b ^ 3.0))); end
code[a_, b_, c_] := N[(c * N[(N[(-1.0 / b), $MachinePrecision] - N[(N[(a * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \left(\frac{-1}{b} - \frac{a \cdot c}{{b}^{3}}\right)
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 96.3%
+-commutative96.3%
mul-1-neg96.3%
unsub-neg96.3%
Simplified96.3%
Taylor expanded in c around 0 92.1%
sub-neg92.1%
mul-1-neg92.1%
distribute-neg-out92.1%
+-commutative92.1%
distribute-neg-out92.1%
unsub-neg92.1%
distribute-neg-frac92.1%
metadata-eval92.1%
*-commutative92.1%
Simplified92.1%
Final simplification92.1%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 30.3%
*-commutative30.3%
Simplified30.3%
Taylor expanded in a around 0 82.4%
associate-*r/82.4%
mul-1-neg82.4%
Simplified82.4%
Final simplification82.4%
herbie shell --seed 2024186
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))