
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* (/ 0.5 a) (* a (* c -4.0))) (+ (sqrt (fma (* c -4.0) a (* b b))) b)))
double code(double a, double b, double c) {
return ((0.5 / a) * (a * (c * -4.0))) / (sqrt(fma((c * -4.0), a, (b * b))) + b);
}
function code(a, b, c) return Float64(Float64(Float64(0.5 / a) * Float64(a * Float64(c * -4.0))) / Float64(sqrt(fma(Float64(c * -4.0), a, Float64(b * b))) + b)) end
code[a_, b_, c_] := N[(N[(N[(0.5 / a), $MachinePrecision] * N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{0.5}{a} \cdot \left(a \cdot \left(c \cdot -4\right)\right)}{\sqrt{\mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)} + b}
\end{array}
Initial program 31.6%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval31.7
Applied rewrites31.7%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites32.4%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites99.5%
Final simplification99.5%
(FPCore (a b c) :precision binary64 (if (<= (/ (- (sqrt (- (* b b) (* (* 4.0 a) c))) b) (* 2.0 a)) -2.0) (* (- (sqrt (fma b b (* a (* c -4.0)))) b) (/ 0.5 a)) (/ (fma (/ c b) (/ (* a c) b) c) (- b))))
double code(double a, double b, double c) {
double tmp;
if (((sqrt(((b * b) - ((4.0 * a) * c))) - b) / (2.0 * a)) <= -2.0) {
tmp = (sqrt(fma(b, b, (a * (c * -4.0)))) - b) * (0.5 / a);
} else {
tmp = fma((c / b), ((a * c) / b), c) / -b;
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c))) - b) / Float64(2.0 * a)) <= -2.0) tmp = Float64(Float64(sqrt(fma(b, b, Float64(a * Float64(c * -4.0)))) - b) * Float64(0.5 / a)); else tmp = Float64(fma(Float64(c / b), Float64(Float64(a * c) / b), c) / Float64(-b)); end return tmp end
code[a_, b_, c_] := If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], -2.0], N[(N[(N[Sqrt[N[(b * b + N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision], N[(N[(N[(c / b), $MachinePrecision] * N[(N[(a * c), $MachinePrecision] / b), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c} - b}{2 \cdot a} \leq -2:\\
\;\;\;\;\left(\sqrt{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot -4\right)\right)} - b\right) \cdot \frac{0.5}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{c}{b}, \frac{a \cdot c}{b}, c\right)}{-b}\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -2Initial program 81.1%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval81.3
Applied rewrites81.3%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lift-*.f64N/A
associate-/r*N/A
metadata-evalN/A
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f6481.3
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
lower--.f6481.3
Applied rewrites81.3%
if -2 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 24.0%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f6495.2
Applied rewrites95.2%
Final simplification93.4%
(FPCore (a b c) :precision binary64 (if (<= (/ (- (sqrt (- (* b b) (* (* 4.0 a) c))) b) (* 2.0 a)) -5e-5) (* (- (sqrt (fma b b (* a (* c -4.0)))) b) (/ 0.5 a)) (/ (- c) b)))
double code(double a, double b, double c) {
double tmp;
if (((sqrt(((b * b) - ((4.0 * a) * c))) - b) / (2.0 * a)) <= -5e-5) {
tmp = (sqrt(fma(b, b, (a * (c * -4.0)))) - b) * (0.5 / a);
} else {
tmp = -c / b;
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c))) - b) / Float64(2.0 * a)) <= -5e-5) tmp = Float64(Float64(sqrt(fma(b, b, Float64(a * Float64(c * -4.0)))) - b) * Float64(0.5 / a)); else tmp = Float64(Float64(-c) / b); end return tmp end
code[a_, b_, c_] := If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], -5e-5], N[(N[(N[Sqrt[N[(b * b + N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision], N[((-c) / b), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c} - b}{2 \cdot a} \leq -5 \cdot 10^{-5}:\\
\;\;\;\;\left(\sqrt{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot -4\right)\right)} - b\right) \cdot \frac{0.5}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{-c}{b}\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -5.00000000000000024e-5Initial program 72.9%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval73.1
Applied rewrites73.1%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lift-*.f64N/A
associate-/r*N/A
metadata-evalN/A
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f6473.1
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
sub-negN/A
lower--.f6473.1
Applied rewrites73.1%
if -5.00000000000000024e-5 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 15.4%
Taylor expanded in c around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6493.0
Applied rewrites93.0%
Final simplification87.4%
(FPCore (a b c) :precision binary64 (/ (* (* 4.0 a) c) (* (- (- b) (sqrt (fma b b (* a (* c -4.0))))) (* 2.0 a))))
double code(double a, double b, double c) {
return ((4.0 * a) * c) / ((-b - sqrt(fma(b, b, (a * (c * -4.0))))) * (2.0 * a));
}
function code(a, b, c) return Float64(Float64(Float64(4.0 * a) * c) / Float64(Float64(Float64(-b) - sqrt(fma(b, b, Float64(a * Float64(c * -4.0))))) * Float64(2.0 * a))) end
code[a_, b_, c_] := N[(N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision] / N[(N[((-b) - N[Sqrt[N[(b * b + N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(4 \cdot a\right) \cdot c}{\left(\left(-b\right) - \sqrt{\mathsf{fma}\left(b, b, a \cdot \left(c \cdot -4\right)\right)}\right) \cdot \left(2 \cdot a\right)}
\end{array}
Initial program 31.6%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval31.7
Applied rewrites31.7%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites32.4%
Taylor expanded in c around 0
associate-*r*N/A
lower-*.f64N/A
lower-*.f6499.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 31.6%
Taylor expanded in c around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6480.9
Applied rewrites80.9%
herbie shell --seed 2024273
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))