
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(fma
-2.0
(* (/ (* a a) (pow b 5.0)) (pow c 3.0))
(-
(-
(* -5.0 (/ (pow c 4.0) (/ (pow b 7.0) (pow a 3.0))))
(* (/ a (pow b 3.0)) (* c c)))
(/ c b))))
double code(double a, double b, double c) {
return fma(-2.0, (((a * a) / pow(b, 5.0)) * pow(c, 3.0)), (((-5.0 * (pow(c, 4.0) / (pow(b, 7.0) / pow(a, 3.0)))) - ((a / pow(b, 3.0)) * (c * c))) - (c / b)));
}
function code(a, b, c) return fma(-2.0, Float64(Float64(Float64(a * a) / (b ^ 5.0)) * (c ^ 3.0)), Float64(Float64(Float64(-5.0 * Float64((c ^ 4.0) / Float64((b ^ 7.0) / (a ^ 3.0)))) - Float64(Float64(a / (b ^ 3.0)) * Float64(c * c))) - Float64(c / b))) end
code[a_, b_, c_] := N[(-2.0 * N[(N[(N[(a * a), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-5.0 * N[(N[Power[c, 4.0], $MachinePrecision] / N[(N[Power[b, 7.0], $MachinePrecision] / N[Power[a, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-2, \frac{a \cdot a}{{b}^{5}} \cdot {c}^{3}, \left(-5 \cdot \frac{{c}^{4}}{\frac{{b}^{7}}{{a}^{3}}} - \frac{a}{{b}^{3}} \cdot \left(c \cdot c\right)\right) - \frac{c}{b}\right)
\end{array}
Initial program 19.5%
Taylor expanded in a around 0 96.9%
Simplified96.9%
Taylor expanded in c around 0 96.9%
*-commutative96.9%
associate-/l*96.9%
Simplified96.9%
Final simplification96.9%
(FPCore (a b c) :precision binary64 (- (- (/ -2.0 (/ (pow b 5.0) (* (* a a) (pow c 3.0)))) (/ c b)) (* (/ a (pow b 3.0)) (* c c))))
double code(double a, double b, double c) {
return ((-2.0 / (pow(b, 5.0) / ((a * a) * pow(c, 3.0)))) - (c / b)) - ((a / pow(b, 3.0)) * (c * c));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (((-2.0d0) / ((b ** 5.0d0) / ((a * a) * (c ** 3.0d0)))) - (c / b)) - ((a / (b ** 3.0d0)) * (c * c))
end function
public static double code(double a, double b, double c) {
return ((-2.0 / (Math.pow(b, 5.0) / ((a * a) * Math.pow(c, 3.0)))) - (c / b)) - ((a / Math.pow(b, 3.0)) * (c * c));
}
def code(a, b, c): return ((-2.0 / (math.pow(b, 5.0) / ((a * a) * math.pow(c, 3.0)))) - (c / b)) - ((a / math.pow(b, 3.0)) * (c * c))
function code(a, b, c) return Float64(Float64(Float64(-2.0 / Float64((b ^ 5.0) / Float64(Float64(a * a) * (c ^ 3.0)))) - Float64(c / b)) - Float64(Float64(a / (b ^ 3.0)) * Float64(c * c))) end
function tmp = code(a, b, c) tmp = ((-2.0 / ((b ^ 5.0) / ((a * a) * (c ^ 3.0)))) - (c / b)) - ((a / (b ^ 3.0)) * (c * c)); end
code[a_, b_, c_] := N[(N[(N[(-2.0 / N[(N[Power[b, 5.0], $MachinePrecision] / N[(N[(a * a), $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{-2}{\frac{{b}^{5}}{\left(a \cdot a\right) \cdot {c}^{3}}} - \frac{c}{b}\right) - \frac{a}{{b}^{3}} \cdot \left(c \cdot c\right)
\end{array}
Initial program 19.5%
Taylor expanded in b around inf 96.0%
associate-+r+96.0%
mul-1-neg96.0%
unsub-neg96.0%
mul-1-neg96.0%
unsub-neg96.0%
associate-*r/96.0%
associate-/l*96.0%
*-commutative96.0%
unpow296.0%
associate-/l*96.0%
associate-/r/96.0%
unpow296.0%
Simplified96.0%
Final simplification96.0%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (* (/ a (pow b 3.0)) (* c c))))
double code(double a, double b, double c) {
return (-c / b) - ((a / pow(b, 3.0)) * (c * c));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - ((a / (b ** 3.0d0)) * (c * c))
end function
public static double code(double a, double b, double c) {
return (-c / b) - ((a / Math.pow(b, 3.0)) * (c * c));
}
def code(a, b, c): return (-c / b) - ((a / math.pow(b, 3.0)) * (c * c))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64(Float64(a / (b ^ 3.0)) * Float64(c * c))) end
function tmp = code(a, b, c) tmp = (-c / b) - ((a / (b ^ 3.0)) * (c * c)); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - \frac{a}{{b}^{3}} \cdot \left(c \cdot c\right)
\end{array}
Initial program 19.5%
Taylor expanded in b around inf 94.0%
mul-1-neg94.0%
unsub-neg94.0%
mul-1-neg94.0%
distribute-neg-frac94.0%
associate-/l*94.0%
associate-/r/94.0%
unpow294.0%
Simplified94.0%
Final simplification94.0%
(FPCore (a b c) :precision binary64 (/ (/ (* a (* c 4.0)) (- (- b) (+ b (* -2.0 (/ a (/ b c)))))) (* a 2.0)))
double code(double a, double b, double c) {
return ((a * (c * 4.0)) / (-b - (b + (-2.0 * (a / (b / c)))))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((a * (c * 4.0d0)) / (-b - (b + ((-2.0d0) * (a / (b / c)))))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
return ((a * (c * 4.0)) / (-b - (b + (-2.0 * (a / (b / c)))))) / (a * 2.0);
}
def code(a, b, c): return ((a * (c * 4.0)) / (-b - (b + (-2.0 * (a / (b / c)))))) / (a * 2.0)
function code(a, b, c) return Float64(Float64(Float64(a * Float64(c * 4.0)) / Float64(Float64(-b) - Float64(b + Float64(-2.0 * Float64(a / Float64(b / c)))))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) tmp = ((a * (c * 4.0)) / (-b - (b + (-2.0 * (a / (b / c)))))) / (a * 2.0); end
code[a_, b_, c_] := N[(N[(N[(a * N[(c * 4.0), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[(b + N[(-2.0 * N[(a / N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{a \cdot \left(c \cdot 4\right)}{\left(-b\right) - \left(b + -2 \cdot \frac{a}{\frac{b}{c}}\right)}}{a \cdot 2}
\end{array}
Initial program 19.5%
Taylor expanded in b around inf 13.1%
flip-+13.1%
associate-/l*13.1%
associate-/l*13.1%
associate-/l*13.1%
Applied egg-rr13.1%
sqr-neg13.1%
Simplified13.1%
Taylor expanded in b around inf 93.7%
*-commutative93.7%
associate-*l*93.7%
Simplified93.7%
Final simplification93.7%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 19.5%
Taylor expanded in b around inf 88.8%
mul-1-neg88.8%
distribute-neg-frac88.8%
Simplified88.8%
Final simplification88.8%
herbie shell --seed 2023268
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))