
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* (* c a) -4.0) (* (* 2.0 a) (+ (sqrt (fma (* -4.0 a) c (* b b))) b))))
double code(double a, double b, double c) {
return ((c * a) * -4.0) / ((2.0 * a) * (sqrt(fma((-4.0 * a), c, (b * b))) + b));
}
function code(a, b, c) return Float64(Float64(Float64(c * a) * -4.0) / Float64(Float64(2.0 * a) * Float64(sqrt(fma(Float64(-4.0 * a), c, Float64(b * b))) + b))) end
code[a_, b_, c_] := N[(N[(N[(c * a), $MachinePrecision] * -4.0), $MachinePrecision] / N[(N[(2.0 * a), $MachinePrecision] * N[(N[Sqrt[N[(N[(-4.0 * a), $MachinePrecision] * c + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(c \cdot a\right) \cdot -4}{\left(2 \cdot a\right) \cdot \left(\sqrt{\mathsf{fma}\left(-4 \cdot a, c, b \cdot b\right)} + b\right)}
\end{array}
Initial program 17.9%
lift-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
div-subN/A
lower--.f64N/A
Applied rewrites17.6%
Applied rewrites99.4%
lift-fma.f64N/A
+-rgt-identityN/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.4
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (a b c) :precision binary64 (/ (fma c (/ (* a c) (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma(c, ((a * c) / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(c, Float64(Float64(a * c) / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(c * N[(N[(a * c), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(c, \frac{a \cdot c}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 17.9%
Taylor expanded in a around 0
mul-1-negN/A
unsub-negN/A
associate-*r/N/A
unpow3N/A
unpow2N/A
associate-/r*N/A
div-subN/A
unsub-negN/A
mul-1-negN/A
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
Applied rewrites95.2%
Applied rewrites95.2%
Final simplification95.2%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 17.9%
Taylor expanded in a around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6490.1
Applied rewrites90.1%
herbie shell --seed 2024309
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))