
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (+ (/ (- (* b b) (* b b)) (* (* a 2.0) (+ b (sqrt (fma (* a c) -4.0 (* b b)))))) (* (/ a (* a 2.0)) (/ (* c -4.0) (+ b (sqrt (fma c (* a -4.0) (* b b))))))))
double code(double a, double b, double c) {
return (((b * b) - (b * b)) / ((a * 2.0) * (b + sqrt(fma((a * c), -4.0, (b * b)))))) + ((a / (a * 2.0)) * ((c * -4.0) / (b + sqrt(fma(c, (a * -4.0), (b * b))))));
}
function code(a, b, c) return Float64(Float64(Float64(Float64(b * b) - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(fma(Float64(a * c), -4.0, Float64(b * b)))))) + Float64(Float64(a / Float64(a * 2.0)) * Float64(Float64(c * -4.0) / Float64(b + sqrt(fma(c, Float64(a * -4.0), Float64(b * b))))))) end
code[a_, b_, c_] := N[(N[(N[(N[(b * b), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[N[(N[(a * c), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a / N[(a * 2.0), $MachinePrecision]), $MachinePrecision] * N[(N[(c * -4.0), $MachinePrecision] / N[(b + N[Sqrt[N[(c * N[(a * -4.0), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{b \cdot b - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{\mathsf{fma}\left(a \cdot c, -4, b \cdot b\right)}\right)} + \frac{a}{a \cdot 2} \cdot \frac{c \cdot -4}{b + \sqrt{\mathsf{fma}\left(c, a \cdot -4, b \cdot b\right)}}
\end{array}
Initial program 33.8%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval33.9
Applied egg-rr33.9%
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
flip-+N/A
lift-neg.f64N/A
lift-neg.f64N/A
sqr-negN/A
lift-*.f64N/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
Applied egg-rr34.3%
Applied egg-rr99.3%
associate-*l*N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift--.f64N/A
*-commutativeN/A
*-commutativeN/A
times-fracN/A
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (a b c) :precision binary64 (+ (/ (- (* b b) (* b b)) (* (* a 2.0) (+ b (sqrt (fma (* a c) -4.0 (* b b)))))) (* (* a -4.0) (/ c (* (* a 2.0) (+ b (sqrt (fma c (* a -4.0) (* b b)))))))))
double code(double a, double b, double c) {
return (((b * b) - (b * b)) / ((a * 2.0) * (b + sqrt(fma((a * c), -4.0, (b * b)))))) + ((a * -4.0) * (c / ((a * 2.0) * (b + sqrt(fma(c, (a * -4.0), (b * b)))))));
}
function code(a, b, c) return Float64(Float64(Float64(Float64(b * b) - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(fma(Float64(a * c), -4.0, Float64(b * b)))))) + Float64(Float64(a * -4.0) * Float64(c / Float64(Float64(a * 2.0) * Float64(b + sqrt(fma(c, Float64(a * -4.0), Float64(b * b)))))))) end
code[a_, b_, c_] := N[(N[(N[(N[(b * b), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[N[(N[(a * c), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a * -4.0), $MachinePrecision] * N[(c / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[N[(c * N[(a * -4.0), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{b \cdot b - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{\mathsf{fma}\left(a \cdot c, -4, b \cdot b\right)}\right)} + \left(a \cdot -4\right) \cdot \frac{c}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{\mathsf{fma}\left(c, a \cdot -4, b \cdot b\right)}\right)}
\end{array}
Initial program 33.8%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval33.9
Applied egg-rr33.9%
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
flip-+N/A
lift-neg.f64N/A
lift-neg.f64N/A
sqr-negN/A
lift-*.f64N/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
Applied egg-rr34.3%
Applied egg-rr99.3%
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
lift-*.f64N/A
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
lift--.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-*.f64N/A
lower-/.f6499.4
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (* 4.0 (* a c)) (* (* a 2.0) (- (- b) (sqrt (fma (* a c) -4.0 (* b b)))))))
double code(double a, double b, double c) {
return (4.0 * (a * c)) / ((a * 2.0) * (-b - sqrt(fma((a * c), -4.0, (b * b)))));
}
function code(a, b, c) return Float64(Float64(4.0 * Float64(a * c)) / Float64(Float64(a * 2.0) * Float64(Float64(-b) - sqrt(fma(Float64(a * c), -4.0, Float64(b * b)))))) end
code[a_, b_, c_] := N[(N[(4.0 * N[(a * c), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[((-b) - N[Sqrt[N[(N[(a * c), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{4 \cdot \left(a \cdot c\right)}{\left(a \cdot 2\right) \cdot \left(\left(-b\right) - \sqrt{\mathsf{fma}\left(a \cdot c, -4, b \cdot b\right)}\right)}
\end{array}
Initial program 33.8%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval33.9
Applied egg-rr33.9%
lift-neg.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-sqrt.f64N/A
flip-+N/A
lift-neg.f64N/A
lift-neg.f64N/A
sqr-negN/A
lift-*.f64N/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
Applied egg-rr34.3%
Applied egg-rr34.7%
Taylor expanded in b around 0
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.3
Simplified99.3%
Final simplification99.3%
(FPCore (a b c) :precision binary64 (- (fma a (/ (* c c) (* b (* b b))) (/ c b))))
double code(double a, double b, double c) {
return -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
function code(a, b, c) return Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))) end
code[a_, b_, c_] := (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)
\end{array}
Initial program 33.8%
Taylor expanded in c around 0
distribute-lft-outN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
associate-/l*N/A
unpow2N/A
associate-*l*N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f6489.5
Simplified89.5%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f6489.8
Simplified89.8%
(FPCore (a b c) :precision binary64 (/ (fma a (/ (* c c) (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma(a, ((c * c) / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(a, Float64(Float64(c * c) / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 33.8%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6489.8
Simplified89.8%
Final simplification89.8%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 33.8%
Taylor expanded in b around inf
mul-1-negN/A
lower-neg.f64N/A
lower-/.f6479.9
Simplified79.9%
Final simplification79.9%
herbie shell --seed 2024215
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))