
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (fma (* -4.0 a) c 0.0) (* (+ (sqrt (fma (* -4.0 a) c (* b b))) b) (* 2.0 a))))
double code(double a, double b, double c) {
return fma((-4.0 * a), c, 0.0) / ((sqrt(fma((-4.0 * a), c, (b * b))) + b) * (2.0 * a));
}
function code(a, b, c) return Float64(fma(Float64(-4.0 * a), c, 0.0) / Float64(Float64(sqrt(fma(Float64(-4.0 * a), c, Float64(b * b))) + b) * Float64(2.0 * a))) end
code[a_, b_, c_] := N[(N[(N[(-4.0 * a), $MachinePrecision] * c + 0.0), $MachinePrecision] / N[(N[(N[Sqrt[N[(N[(-4.0 * a), $MachinePrecision] * c + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-4 \cdot a, c, 0\right)}{\left(\sqrt{\mathsf{fma}\left(-4 \cdot a, c, b \cdot b\right)} + b\right) \cdot \left(2 \cdot a\right)}
\end{array}
Initial program 19.7%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval19.8
Applied rewrites19.8%
lift-+.f64N/A
+-commutativeN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites20.0%
lift-*.f64N/A
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
lift--.f64N/A
lift-fma.f64N/A
associate--l+N/A
lower-fma.f64N/A
+-inversesN/A
lower-*.f64N/A
lift-*.f6499.5
Applied rewrites99.5%
Final simplification99.5%
(FPCore (a b c) :precision binary64 (/ (- (fma (/ (* c c) b) (/ a b) c)) b))
double code(double a, double b, double c) {
return -fma(((c * c) / b), (a / b), c) / b;
}
function code(a, b, c) return Float64(Float64(-fma(Float64(Float64(c * c) / b), Float64(a / b), c)) / b) end
code[a_, b_, c_] := N[((-N[(N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] * N[(a / b), $MachinePrecision] + c), $MachinePrecision]) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\mathsf{fma}\left(\frac{c \cdot c}{b}, \frac{a}{b}, c\right)}{b}
\end{array}
Initial program 19.7%
Taylor expanded in a around 0
distribute-lft-outN/A
unpow3N/A
unpow2N/A
associate-/r*N/A
div-add-revN/A
associate-/l*N/A
distribute-lft-outN/A
lower-/.f64N/A
Applied rewrites94.7%
(FPCore (a b c) :precision binary64 (* (/ (fma (- a) (/ c (* b b)) -1.0) b) c))
double code(double a, double b, double c) {
return (fma(-a, (c / (b * b)), -1.0) / b) * c;
}
function code(a, b, c) return Float64(Float64(fma(Float64(-a), Float64(c / Float64(b * b)), -1.0) / b) * c) end
code[a_, b_, c_] := N[(N[(N[((-a) * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / b), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-a, \frac{c}{b \cdot b}, -1\right)}{b} \cdot c
\end{array}
Initial program 19.7%
Taylor expanded in c around 0
*-commutativeN/A
associate-*r/N/A
associate-*r*N/A
mul-1-negN/A
associate-*l/N/A
distribute-neg-fracN/A
mul-1-negN/A
lower-*.f64N/A
Applied rewrites94.3%
Taylor expanded in b around -inf
Applied rewrites94.4%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 19.7%
Taylor expanded in a around 0
associate-*r/N/A
mul-1-negN/A
lower-/.f64N/A
lower-neg.f6489.3
Applied rewrites89.3%
(FPCore (a b c) :precision binary64 0.0)
double code(double a, double b, double c) {
return 0.0;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0
end function
public static double code(double a, double b, double c) {
return 0.0;
}
def code(a, b, c): return 0.0
function code(a, b, c) return 0.0 end
function tmp = code(a, b, c) tmp = 0.0; end
code[a_, b_, c_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 19.7%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval19.8
Applied rewrites19.8%
lift-/.f64N/A
lift-+.f64N/A
div-addN/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
frac-addN/A
lower-/.f64N/A
Applied rewrites20.9%
Taylor expanded in a around 0
associate-*r/N/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
lower-/.f64N/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
+-inverses3.3
Applied rewrites3.3%
Taylor expanded in a around 0
Applied rewrites3.3%
herbie shell --seed 2024339
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))