
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (- (fma -0.25 (* (/ (pow (* c a) 4.0) a) (/ 20.0 (pow b 7.0))) (- (/ (* -2.0 (* (pow c 3.0) (* a a))) (pow b 5.0)) (/ c b))) (/ (* c (* c a)) (pow b 3.0))))
double code(double a, double b, double c) {
return fma(-0.25, ((pow((c * a), 4.0) / a) * (20.0 / pow(b, 7.0))), (((-2.0 * (pow(c, 3.0) * (a * a))) / pow(b, 5.0)) - (c / b))) - ((c * (c * a)) / pow(b, 3.0));
}
function code(a, b, c) return Float64(fma(-0.25, Float64(Float64((Float64(c * a) ^ 4.0) / a) * Float64(20.0 / (b ^ 7.0))), Float64(Float64(Float64(-2.0 * Float64((c ^ 3.0) * Float64(a * a))) / (b ^ 5.0)) - Float64(c / b))) - Float64(Float64(c * Float64(c * a)) / (b ^ 3.0))) end
code[a_, b_, c_] := N[(N[(-0.25 * N[(N[(N[Power[N[(c * a), $MachinePrecision], 4.0], $MachinePrecision] / a), $MachinePrecision] * N[(20.0 / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-2.0 * N[(N[Power[c, 3.0], $MachinePrecision] * N[(a * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.25, \frac{{\left(c \cdot a\right)}^{4}}{a} \cdot \frac{20}{{b}^{7}}, \frac{-2 \cdot \left({c}^{3} \cdot \left(a \cdot a\right)\right)}{{b}^{5}} - \frac{c}{b}\right) - \frac{c \cdot \left(c \cdot a\right)}{{b}^{3}}
\end{array}
Initial program 33.1%
/-rgt-identity33.1%
metadata-eval33.1%
associate-/l*33.1%
associate-*r/33.1%
+-commutative33.1%
unsub-neg33.1%
fma-neg33.2%
associate-*l*33.2%
*-commutative33.2%
distribute-rgt-neg-in33.2%
metadata-eval33.2%
associate-/r*33.2%
metadata-eval33.2%
metadata-eval33.2%
Simplified33.2%
fma-udef33.1%
*-commutative33.1%
Applied egg-rr33.1%
Taylor expanded in b around inf 95.4%
Simplified95.4%
Final simplification95.4%
(FPCore (a b c) :precision binary64 (/ 1.0 (+ (/ a b) (fma -2.0 (/ (* (* c (* a a)) -0.5) (pow b 3.0)) (/ (- b) c)))))
double code(double a, double b, double c) {
return 1.0 / ((a / b) + fma(-2.0, (((c * (a * a)) * -0.5) / pow(b, 3.0)), (-b / c)));
}
function code(a, b, c) return Float64(1.0 / Float64(Float64(a / b) + fma(-2.0, Float64(Float64(Float64(c * Float64(a * a)) * -0.5) / (b ^ 3.0)), Float64(Float64(-b) / c)))) end
code[a_, b_, c_] := N[(1.0 / N[(N[(a / b), $MachinePrecision] + N[(-2.0 * N[(N[(N[(c * N[(a * a), $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] + N[((-b) / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{a}{b} + \mathsf{fma}\left(-2, \frac{\left(c \cdot \left(a \cdot a\right)\right) \cdot -0.5}{{b}^{3}}, \frac{-b}{c}\right)}
\end{array}
Initial program 33.1%
log1p-expm1-u23.7%
neg-mul-123.7%
fma-def23.7%
*-commutative23.7%
*-commutative23.7%
*-commutative23.7%
Applied egg-rr23.7%
log1p-expm1-u33.1%
clear-num33.1%
Applied egg-rr33.1%
Taylor expanded in b around inf 93.8%
fma-def93.8%
distribute-rgt-out93.8%
unpow293.8%
metadata-eval93.8%
associate-*r/93.8%
mul-1-neg93.8%
Simplified93.8%
Final simplification93.8%
(FPCore (a b c) :precision binary64 (/ 1.0 (- (/ a b) (/ b c))))
double code(double a, double b, double c) {
return 1.0 / ((a / b) - (b / c));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 1.0d0 / ((a / b) - (b / c))
end function
public static double code(double a, double b, double c) {
return 1.0 / ((a / b) - (b / c));
}
def code(a, b, c): return 1.0 / ((a / b) - (b / c))
function code(a, b, c) return Float64(1.0 / Float64(Float64(a / b) - Float64(b / c))) end
function tmp = code(a, b, c) tmp = 1.0 / ((a / b) - (b / c)); end
code[a_, b_, c_] := N[(1.0 / N[(N[(a / b), $MachinePrecision] - N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{a}{b} - \frac{b}{c}}
\end{array}
Initial program 33.1%
log1p-expm1-u23.7%
neg-mul-123.7%
fma-def23.7%
*-commutative23.7%
*-commutative23.7%
*-commutative23.7%
Applied egg-rr23.7%
log1p-expm1-u33.1%
clear-num33.1%
Applied egg-rr33.1%
Taylor expanded in b around inf 89.9%
mul-1-neg89.9%
unsub-neg89.9%
Simplified89.9%
Final simplification89.9%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 33.1%
neg-sub033.1%
associate-+l-33.1%
sub0-neg33.1%
neg-mul-133.1%
associate-*l/33.1%
*-commutative33.1%
associate-/r*33.1%
/-rgt-identity33.1%
metadata-eval33.1%
Simplified33.1%
Taylor expanded in b around inf 79.5%
associate-*r/79.5%
neg-mul-179.5%
Simplified79.5%
Final simplification79.5%
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
return 0.0 / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
return 0.0 / a;
}
def code(a, b, c): return 0.0 / a
function code(a, b, c) return Float64(0.0 / a) end
function tmp = code(a, b, c) tmp = 0.0 / a; end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{a}
\end{array}
Initial program 33.1%
log1p-expm1-u23.7%
neg-mul-123.7%
fma-def23.7%
*-commutative23.7%
*-commutative23.7%
*-commutative23.7%
Applied egg-rr23.7%
Taylor expanded in c around 0 3.2%
associate-*r/3.2%
distribute-rgt1-in3.2%
metadata-eval3.2%
mul0-lft3.2%
metadata-eval3.2%
Simplified3.2%
Final simplification3.2%
herbie shell --seed 2023258
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))