
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(-
(-
(fma
-0.25
(* 20.0 (/ (pow c 4.0) (/ (pow b 7.0) (pow a 3.0))))
(* -2.0 (/ (* a a) (/ (pow b 5.0) (pow c 3.0)))))
(/ c b))
(/ (* c (* c a)) (pow b 3.0))))
double code(double a, double b, double c) {
return (fma(-0.25, (20.0 * (pow(c, 4.0) / (pow(b, 7.0) / pow(a, 3.0)))), (-2.0 * ((a * a) / (pow(b, 5.0) / pow(c, 3.0))))) - (c / b)) - ((c * (c * a)) / pow(b, 3.0));
}
function code(a, b, c) return Float64(Float64(fma(-0.25, Float64(20.0 * Float64((c ^ 4.0) / Float64((b ^ 7.0) / (a ^ 3.0)))), Float64(-2.0 * Float64(Float64(a * a) / Float64((b ^ 5.0) / (c ^ 3.0))))) - Float64(c / b)) - Float64(Float64(c * Float64(c * a)) / (b ^ 3.0))) end
code[a_, b_, c_] := N[(N[(N[(-0.25 * N[(20.0 * N[(N[Power[c, 4.0], $MachinePrecision] / N[(N[Power[b, 7.0], $MachinePrecision] / N[Power[a, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-2.0 * N[(N[(a * a), $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(N[(c * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-0.25, 20 \cdot \frac{{c}^{4}}{\frac{{b}^{7}}{{a}^{3}}}, -2 \cdot \frac{a \cdot a}{\frac{{b}^{5}}{{c}^{3}}}\right) - \frac{c}{b}\right) - \frac{c \cdot \left(c \cdot a\right)}{{b}^{3}}
\end{array}
Initial program 28.9%
neg-sub028.9%
associate-+l-28.9%
sub0-neg28.9%
neg-mul-128.9%
associate-*l/28.9%
*-commutative28.9%
associate-/r*28.9%
/-rgt-identity28.9%
metadata-eval28.9%
Simplified28.8%
Taylor expanded in a around 0 96.5%
+-commutative96.5%
mul-1-neg96.5%
unsub-neg96.5%
Simplified96.5%
Taylor expanded in c around 0 96.5%
associate-/l*96.5%
Simplified96.5%
Final simplification96.5%
(FPCore (a b c) :precision binary64 (- (fma -2.0 (/ (* a a) (/ (pow b 5.0) (pow c 3.0))) (/ (- c) b)) (/ (* c (* c a)) (pow b 3.0))))
double code(double a, double b, double c) {
return fma(-2.0, ((a * a) / (pow(b, 5.0) / pow(c, 3.0))), (-c / b)) - ((c * (c * a)) / pow(b, 3.0));
}
function code(a, b, c) return Float64(fma(-2.0, Float64(Float64(a * a) / Float64((b ^ 5.0) / (c ^ 3.0))), Float64(Float64(-c) / b)) - Float64(Float64(c * Float64(c * a)) / (b ^ 3.0))) end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(a * a), $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[((-c) / b), $MachinePrecision]), $MachinePrecision] - N[(N[(c * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-2, \frac{a \cdot a}{\frac{{b}^{5}}{{c}^{3}}}, \frac{-c}{b}\right) - \frac{c \cdot \left(c \cdot a\right)}{{b}^{3}}
\end{array}
Initial program 28.9%
neg-sub028.9%
associate-+l-28.9%
sub0-neg28.9%
neg-mul-128.9%
associate-*l/28.9%
*-commutative28.9%
associate-/r*28.9%
/-rgt-identity28.9%
metadata-eval28.9%
Simplified28.8%
Taylor expanded in b around inf 95.0%
+-commutative95.0%
mul-1-neg95.0%
unsub-neg95.0%
+-commutative95.0%
fma-def95.0%
*-commutative95.0%
associate-/l*95.0%
unpow295.0%
associate-*r/95.0%
neg-mul-195.0%
unpow295.0%
associate-*l*95.0%
Simplified95.0%
Final simplification95.0%
(FPCore (a b c) :precision binary64 (* (fma 2.0 (+ (/ a (/ b c)) (/ (* a a) (/ (* b (* b b)) (* c c)))) (* 4.0 (/ (pow (* c a) 3.0) (pow b 5.0)))) (/ -0.5 a)))
double code(double a, double b, double c) {
return fma(2.0, ((a / (b / c)) + ((a * a) / ((b * (b * b)) / (c * c)))), (4.0 * (pow((c * a), 3.0) / pow(b, 5.0)))) * (-0.5 / a);
}
function code(a, b, c) return Float64(fma(2.0, Float64(Float64(a / Float64(b / c)) + Float64(Float64(a * a) / Float64(Float64(b * Float64(b * b)) / Float64(c * c)))), Float64(4.0 * Float64((Float64(c * a) ^ 3.0) / (b ^ 5.0)))) * Float64(-0.5 / a)) end
code[a_, b_, c_] := N[(N[(2.0 * N[(N[(a / N[(b / c), $MachinePrecision]), $MachinePrecision] + N[(N[(a * a), $MachinePrecision] / N[(N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(4.0 * N[(N[Power[N[(c * a), $MachinePrecision], 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-0.5 / a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2, \frac{a}{\frac{b}{c}} + \frac{a \cdot a}{\frac{b \cdot \left(b \cdot b\right)}{c \cdot c}}, 4 \cdot \frac{{\left(c \cdot a\right)}^{3}}{{b}^{5}}\right) \cdot \frac{-0.5}{a}
\end{array}
Initial program 28.9%
neg-sub028.9%
associate-+l-28.9%
sub0-neg28.9%
neg-mul-128.9%
associate-*l/28.9%
*-commutative28.9%
associate-/r*28.9%
/-rgt-identity28.9%
metadata-eval28.9%
Simplified28.8%
Taylor expanded in b around inf 94.7%
associate-+r+94.7%
distribute-lft-out94.7%
fma-def94.7%
*-commutative94.7%
associate-/l*94.7%
*-commutative94.7%
associate-/l*94.7%
unpow294.7%
unpow294.7%
cube-prod94.7%
Simplified94.7%
unpow392.0%
Applied egg-rr94.7%
Final simplification94.7%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (/ (* c (* c a)) (* b (* b b)))))
double code(double a, double b, double c) {
return (-c / b) - ((c * (c * a)) / (b * (b * b)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - ((c * (c * a)) / (b * (b * b)))
end function
public static double code(double a, double b, double c) {
return (-c / b) - ((c * (c * a)) / (b * (b * b)));
}
def code(a, b, c): return (-c / b) - ((c * (c * a)) / (b * (b * b)))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64(Float64(c * Float64(c * a)) / Float64(b * Float64(b * b)))) end
function tmp = code(a, b, c) tmp = (-c / b) - ((c * (c * a)) / (b * (b * b))); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(N[(c * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - \frac{c \cdot \left(c \cdot a\right)}{b \cdot \left(b \cdot b\right)}
\end{array}
Initial program 28.9%
neg-sub028.9%
associate-+l-28.9%
sub0-neg28.9%
neg-mul-128.9%
associate-*l/28.9%
*-commutative28.9%
associate-/r*28.9%
/-rgt-identity28.9%
metadata-eval28.9%
Simplified28.8%
Taylor expanded in b around inf 92.0%
+-commutative92.0%
mul-1-neg92.0%
unsub-neg92.0%
associate-*r/92.0%
neg-mul-192.0%
unpow292.0%
associate-*l*92.0%
Simplified92.0%
unpow392.0%
Applied egg-rr92.0%
Final simplification92.0%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 28.9%
neg-sub028.9%
associate-+l-28.9%
sub0-neg28.9%
neg-mul-128.9%
associate-*l/28.9%
*-commutative28.9%
associate-/r*28.9%
/-rgt-identity28.9%
metadata-eval28.9%
Simplified28.8%
Taylor expanded in b around inf 83.0%
associate-*r/83.0%
neg-mul-183.0%
Simplified83.0%
Final simplification83.0%
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
return 0.0 / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
return 0.0 / a;
}
def code(a, b, c): return 0.0 / a
function code(a, b, c) return Float64(0.0 / a) end
function tmp = code(a, b, c) tmp = 0.0 / a; end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{a}
\end{array}
Initial program 28.9%
+-commutative28.9%
add-log-exp9.8%
add-log-exp9.2%
sum-log9.2%
*-commutative9.2%
*-commutative9.2%
Applied egg-rr9.2%
Taylor expanded in c around 0 1.2%
associate-*r/1.2%
exp-neg0.9%
lft-mult-inverse3.2%
metadata-eval3.2%
metadata-eval3.2%
Simplified3.2%
Final simplification3.2%
herbie shell --seed 2023187
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))