
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* c -2.0) (+ b (sqrt (fma b b (* -4.0 (* c a)))))))
double code(double a, double b, double c) {
return (c * -2.0) / (b + sqrt(fma(b, b, (-4.0 * (c * a)))));
}
function code(a, b, c) return Float64(Float64(c * -2.0) / Float64(b + sqrt(fma(b, b, Float64(-4.0 * Float64(c * a)))))) end
code[a_, b_, c_] := N[(N[(c * -2.0), $MachinePrecision] / N[(b + N[Sqrt[N[(b * b + N[(-4.0 * N[(c * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot -2}{b + \sqrt{\mathsf{fma}\left(b, b, -4 \cdot \left(c \cdot a\right)\right)}}
\end{array}
Initial program 15.7%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6415.7
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-*.f64N/A
metadata-eval15.7
Applied rewrites15.7%
lift-/.f64N/A
div-invN/A
lift--.f64N/A
flip--N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites16.2%
Taylor expanded in b around 0
lower-*.f6499.9
Applied rewrites99.9%
Final simplification99.9%
(FPCore (a b c) :precision binary64 (/ (* c -2.0) (fma -2.0 (/ (* c a) b) (* b 2.0))))
double code(double a, double b, double c) {
return (c * -2.0) / fma(-2.0, ((c * a) / b), (b * 2.0));
}
function code(a, b, c) return Float64(Float64(c * -2.0) / fma(-2.0, Float64(Float64(c * a) / b), Float64(b * 2.0))) end
code[a_, b_, c_] := N[(N[(c * -2.0), $MachinePrecision] / N[(-2.0 * N[(N[(c * a), $MachinePrecision] / b), $MachinePrecision] + N[(b * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot -2}{\mathsf{fma}\left(-2, \frac{c \cdot a}{b}, b \cdot 2\right)}
\end{array}
Initial program 15.7%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6415.7
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-*.f64N/A
metadata-eval15.7
Applied rewrites15.7%
lift-/.f64N/A
div-invN/A
lift--.f64N/A
flip--N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites16.2%
Taylor expanded in b around 0
lower-*.f6499.9
Applied rewrites99.9%
Taylor expanded in c around 0
lower-fma.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f6496.6
Applied rewrites96.6%
Final simplification96.6%
(FPCore (a b c) :precision binary64 (/ (* c -2.0) (+ b (fma -2.0 (/ (* c a) b) b))))
double code(double a, double b, double c) {
return (c * -2.0) / (b + fma(-2.0, ((c * a) / b), b));
}
function code(a, b, c) return Float64(Float64(c * -2.0) / Float64(b + fma(-2.0, Float64(Float64(c * a) / b), b))) end
code[a_, b_, c_] := N[(N[(c * -2.0), $MachinePrecision] / N[(b + N[(-2.0 * N[(N[(c * a), $MachinePrecision] / b), $MachinePrecision] + b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot -2}{b + \mathsf{fma}\left(-2, \frac{c \cdot a}{b}, b\right)}
\end{array}
Initial program 15.7%
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6415.7
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-*.f64N/A
metadata-eval15.7
Applied rewrites15.7%
lift-/.f64N/A
div-invN/A
lift--.f64N/A
flip--N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites16.2%
Taylor expanded in b around 0
lower-*.f6499.9
Applied rewrites99.9%
Taylor expanded in c around 0
+-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f6496.6
Applied rewrites96.6%
Final simplification96.6%
(FPCore (a b c) :precision binary64 (/ (fma (* c c) (/ a (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma((c * c), (a / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 15.7%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6496.5
Applied rewrites96.5%
Final simplification96.5%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 15.7%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-neg.f6492.0
Applied rewrites92.0%
herbie shell --seed 2024222
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))