
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (/ (* (* 4.0 a) c) (* -2.0 a)) (+ b (sqrt (fma (* c a) -4.0 (* b b))))))
double code(double a, double b, double c) {
return (((4.0 * a) * c) / (-2.0 * a)) / (b + sqrt(fma((c * a), -4.0, (b * b))));
}
function code(a, b, c) return Float64(Float64(Float64(Float64(4.0 * a) * c) / Float64(-2.0 * a)) / Float64(b + sqrt(fma(Float64(c * a), -4.0, Float64(b * b))))) end
code[a_, b_, c_] := N[(N[(N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision] / N[(-2.0 * a), $MachinePrecision]), $MachinePrecision] / N[(b + N[Sqrt[N[(N[(c * a), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(4 \cdot a\right) \cdot c}{-2 \cdot a}}{b + \sqrt{\mathsf{fma}\left(c \cdot a, -4, b \cdot b\right)}}
\end{array}
Initial program 34.1%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval34.1
Applied rewrites34.1%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites35.3%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.3
Applied rewrites99.3%
Applied rewrites99.6%
Final simplification99.6%
(FPCore (a b c) :precision binary64 (* (* (/ (/ c a) (+ b (sqrt (fma (* c a) -4.0 (* b b))))) (- a)) 2.0))
double code(double a, double b, double c) {
return (((c / a) / (b + sqrt(fma((c * a), -4.0, (b * b))))) * -a) * 2.0;
}
function code(a, b, c) return Float64(Float64(Float64(Float64(c / a) / Float64(b + sqrt(fma(Float64(c * a), -4.0, Float64(b * b))))) * Float64(-a)) * 2.0) end
code[a_, b_, c_] := N[(N[(N[(N[(c / a), $MachinePrecision] / N[(b + N[Sqrt[N[(N[(c * a), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * (-a)), $MachinePrecision] * 2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{\frac{c}{a}}{b + \sqrt{\mathsf{fma}\left(c \cdot a, -4, b \cdot b\right)}} \cdot \left(-a\right)\right) \cdot 2
\end{array}
Initial program 34.1%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval34.1
Applied rewrites34.1%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites33.5%
Taylor expanded in a around 0
lower-/.f6499.4
Applied rewrites99.4%
lift-/.f64N/A
lift-/.f64N/A
associate-/r/N/A
lift-*.f64N/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (* (* c a) 4.0) (* (- (- b) (sqrt (fma b b (* (* c a) -4.0)))) (* 2.0 a))))
double code(double a, double b, double c) {
return ((c * a) * 4.0) / ((-b - sqrt(fma(b, b, ((c * a) * -4.0)))) * (2.0 * a));
}
function code(a, b, c) return Float64(Float64(Float64(c * a) * 4.0) / Float64(Float64(Float64(-b) - sqrt(fma(b, b, Float64(Float64(c * a) * -4.0)))) * Float64(2.0 * a))) end
code[a_, b_, c_] := N[(N[(N[(c * a), $MachinePrecision] * 4.0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[N[(b * b + N[(N[(c * a), $MachinePrecision] * -4.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(c \cdot a\right) \cdot 4}{\left(\left(-b\right) - \sqrt{\mathsf{fma}\left(b, b, \left(c \cdot a\right) \cdot -4\right)}\right) \cdot \left(2 \cdot a\right)}
\end{array}
Initial program 34.1%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval34.1
Applied rewrites34.1%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites35.3%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.3
Applied rewrites99.3%
lift-*.f64N/A
lift-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.3
lift-*.f64N/A
*-commutativeN/A
lift-*.f6499.3
Applied rewrites99.3%
(FPCore (a b c) :precision binary64 (/ (* (* c a) 4.0) (* (- (- b) (sqrt (fma -4.0 (* a c) (* b b)))) (* 2.0 a))))
double code(double a, double b, double c) {
return ((c * a) * 4.0) / ((-b - sqrt(fma(-4.0, (a * c), (b * b)))) * (2.0 * a));
}
function code(a, b, c) return Float64(Float64(Float64(c * a) * 4.0) / Float64(Float64(Float64(-b) - sqrt(fma(-4.0, Float64(a * c), Float64(b * b)))) * Float64(2.0 * a))) end
code[a_, b_, c_] := N[(N[(N[(c * a), $MachinePrecision] * 4.0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(c \cdot a\right) \cdot 4}{\left(\left(-b\right) - \sqrt{\mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)}\right) \cdot \left(2 \cdot a\right)}
\end{array}
Initial program 34.1%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval34.1
Applied rewrites34.1%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites35.3%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.3
Applied rewrites99.3%
(FPCore (a b c) :precision binary64 (if (<= b 0.00078) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 a) c)))) (* 2.0 a)) (- (fma (/ a (* b b)) (/ (* c c) b) (/ c b)))))
double code(double a, double b, double c) {
double tmp;
if (b <= 0.00078) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * a) * c)))) / (2.0 * a);
} else {
tmp = -fma((a / (b * b)), ((c * c) / b), (c / b));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 0.00078) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * a) * c)))) / Float64(2.0 * a)); else tmp = Float64(-fma(Float64(a / Float64(b * b)), Float64(Float64(c * c) / b), Float64(c / b))); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 0.00078], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], (-N[(N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.00078:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot a\right) \cdot c\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(\frac{a}{b \cdot b}, \frac{c \cdot c}{b}, \frac{c}{b}\right)\\
\end{array}
\end{array}
if b < 7.79999999999999986e-4Initial program 78.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval78.4
Applied rewrites78.4%
if 7.79999999999999986e-4 < b Initial program 30.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6492.3
Applied rewrites92.3%
Applied rewrites92.3%
(FPCore (a b c) :precision binary64 (if (<= b 0.00078) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 a) c)))) (* 2.0 a)) (/ (/ c a) (fma (/ b a) -1.0 (/ c b)))))
double code(double a, double b, double c) {
double tmp;
if (b <= 0.00078) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * a) * c)))) / (2.0 * a);
} else {
tmp = (c / a) / fma((b / a), -1.0, (c / b));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 0.00078) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * a) * c)))) / Float64(2.0 * a)); else tmp = Float64(Float64(c / a) / fma(Float64(b / a), -1.0, Float64(c / b))); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 0.00078], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], N[(N[(c / a), $MachinePrecision] / N[(N[(b / a), $MachinePrecision] * -1.0 + N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.00078:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot a\right) \cdot c\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{c}{a}}{\mathsf{fma}\left(\frac{b}{a}, -1, \frac{c}{b}\right)}\\
\end{array}
\end{array}
if b < 7.79999999999999986e-4Initial program 78.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval78.4
Applied rewrites78.4%
if 7.79999999999999986e-4 < b Initial program 30.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval30.0
Applied rewrites30.0%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites29.4%
Taylor expanded in a around 0
lower-/.f6499.4
Applied rewrites99.4%
Taylor expanded in c around 0
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f6492.3
Applied rewrites92.3%
(FPCore (a b c) :precision binary64 (if (<= b 0.00078) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 a) c)))) (* 2.0 a)) (/ (- (/ (* (* c c) a) (* (- b) b)) c) b)))
double code(double a, double b, double c) {
double tmp;
if (b <= 0.00078) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * a) * c)))) / (2.0 * a);
} else {
tmp = ((((c * c) * a) / (-b * b)) - c) / b;
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 0.00078) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * a) * c)))) / Float64(2.0 * a)); else tmp = Float64(Float64(Float64(Float64(Float64(c * c) * a) / Float64(Float64(-b) * b)) - c) / b); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 0.00078], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(c * c), $MachinePrecision] * a), $MachinePrecision] / N[((-b) * b), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.00078:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot a\right) \cdot c\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\left(c \cdot c\right) \cdot a}{\left(-b\right) \cdot b} - c}{b}\\
\end{array}
\end{array}
if b < 7.79999999999999986e-4Initial program 78.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval78.4
Applied rewrites78.4%
if 7.79999999999999986e-4 < b Initial program 30.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6492.3
Applied rewrites92.3%
Taylor expanded in b around inf
Applied rewrites92.3%
(FPCore (a b c) :precision binary64 (/ (- (/ (* (* c c) a) (* (- b) b)) c) b))
double code(double a, double b, double c) {
return ((((c * c) * a) / (-b * b)) - c) / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((c * c) * a) / (-b * b)) - c) / b
end function
public static double code(double a, double b, double c) {
return ((((c * c) * a) / (-b * b)) - c) / b;
}
def code(a, b, c): return ((((c * c) * a) / (-b * b)) - c) / b
function code(a, b, c) return Float64(Float64(Float64(Float64(Float64(c * c) * a) / Float64(Float64(-b) * b)) - c) / b) end
function tmp = code(a, b, c) tmp = ((((c * c) * a) / (-b * b)) - c) / b; end
code[a_, b_, c_] := N[(N[(N[(N[(N[(c * c), $MachinePrecision] * a), $MachinePrecision] / N[((-b) * b), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(c \cdot c\right) \cdot a}{\left(-b\right) \cdot b} - c}{b}
\end{array}
Initial program 34.1%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.3%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6489.5
Applied rewrites89.5%
Taylor expanded in b around inf
Applied rewrites89.4%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 34.1%
Taylor expanded in a around 0
associate-*r/N/A
mul-1-negN/A
lower-/.f64N/A
lower-neg.f6479.3
Applied rewrites79.3%
herbie shell --seed 2024337
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))