
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (* (pow (- (- b) (sqrt (* (fma -4.0 c (* (/ b a) b)) a))) -1.0) (/ (* c (* a 4.0)) (* 2.0 a))))
double code(double a, double b, double c) {
return pow((-b - sqrt((fma(-4.0, c, ((b / a) * b)) * a))), -1.0) * ((c * (a * 4.0)) / (2.0 * a));
}
function code(a, b, c) return Float64((Float64(Float64(-b) - sqrt(Float64(fma(-4.0, c, Float64(Float64(b / a) * b)) * a))) ^ -1.0) * Float64(Float64(c * Float64(a * 4.0)) / Float64(2.0 * a))) end
code[a_, b_, c_] := N[(N[Power[N[((-b) - N[Sqrt[N[(N[(-4.0 * c + N[(N[(b / a), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] * N[(N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(\left(-b\right) - \sqrt{\mathsf{fma}\left(-4, c, \frac{b}{a} \cdot b\right) \cdot a}\right)}^{-1} \cdot \frac{c \cdot \left(a \cdot 4\right)}{2 \cdot a}
\end{array}
Initial program 32.4%
Taylor expanded in a around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6432.3
Applied rewrites32.3%
lift-+.f64N/A
flip-+N/A
clear-numN/A
lower-/.f64N/A
Applied rewrites32.9%
Taylor expanded in a around 0
lower-*.f64N/A
lower-*.f6499.3
Applied rewrites99.3%
lift-/.f64N/A
lift-/.f64N/A
lift-/.f64N/A
associate-/r/N/A
Applied rewrites99.4%
(FPCore (a b c) :precision binary64 (/ (pow (/ (+ b (sqrt (fma -4.0 (* c a) (* b b)))) (* 4.0 (* a c))) -1.0) (* 2.0 (- a))))
double code(double a, double b, double c) {
return pow(((b + sqrt(fma(-4.0, (c * a), (b * b)))) / (4.0 * (a * c))), -1.0) / (2.0 * -a);
}
function code(a, b, c) return Float64((Float64(Float64(b + sqrt(fma(-4.0, Float64(c * a), Float64(b * b)))) / Float64(4.0 * Float64(a * c))) ^ -1.0) / Float64(2.0 * Float64(-a))) end
code[a_, b_, c_] := N[(N[Power[N[(N[(b + N[Sqrt[N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(4.0 * N[(a * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] / N[(2.0 * (-a)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\left(\frac{b + \sqrt{\mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)}}{4 \cdot \left(a \cdot c\right)}\right)}^{-1}}{2 \cdot \left(-a\right)}
\end{array}
Initial program 32.4%
Taylor expanded in a around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6432.3
Applied rewrites32.3%
lift-+.f64N/A
flip-+N/A
clear-numN/A
lower-/.f64N/A
Applied rewrites32.9%
Taylor expanded in a around 0
lower-*.f64N/A
lower-*.f6499.3
Applied rewrites99.3%
Taylor expanded in b around 0
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (pow (/ (fma -0.5 (/ b a) (* 0.5 (/ c b))) c) -1.0) (* 2.0 a)))
double code(double a, double b, double c) {
return pow((fma(-0.5, (b / a), (0.5 * (c / b))) / c), -1.0) / (2.0 * a);
}
function code(a, b, c) return Float64((Float64(fma(-0.5, Float64(b / a), Float64(0.5 * Float64(c / b))) / c) ^ -1.0) / Float64(2.0 * a)) end
code[a_, b_, c_] := N[(N[Power[N[(N[(-0.5 * N[(b / a), $MachinePrecision] + N[(0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\left(\frac{\mathsf{fma}\left(-0.5, \frac{b}{a}, 0.5 \cdot \frac{c}{b}\right)}{c}\right)}^{-1}}{2 \cdot a}
\end{array}
Initial program 32.4%
Taylor expanded in a around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6432.3
Applied rewrites32.3%
lift-+.f64N/A
flip-+N/A
clear-numN/A
lower-/.f64N/A
Applied rewrites32.9%
Taylor expanded in c around 0
lower-/.f64N/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6489.7
Applied rewrites89.7%
Final simplification89.7%
(FPCore (a b c) :precision binary64 (/ (/ (* c (* a 4.0)) (+ b (sqrt (* (fma -4.0 c (* (/ b a) b)) a)))) (* 2.0 (- a))))
double code(double a, double b, double c) {
return ((c * (a * 4.0)) / (b + sqrt((fma(-4.0, c, ((b / a) * b)) * a)))) / (2.0 * -a);
}
function code(a, b, c) return Float64(Float64(Float64(c * Float64(a * 4.0)) / Float64(b + sqrt(Float64(fma(-4.0, c, Float64(Float64(b / a) * b)) * a)))) / Float64(2.0 * Float64(-a))) end
code[a_, b_, c_] := N[(N[(N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision] / N[(b + N[Sqrt[N[(N[(-4.0 * c + N[(N[(b / a), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(2.0 * (-a)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{c \cdot \left(a \cdot 4\right)}{b + \sqrt{\mathsf{fma}\left(-4, c, \frac{b}{a} \cdot b\right) \cdot a}}}{2 \cdot \left(-a\right)}
\end{array}
Initial program 32.4%
Taylor expanded in a around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6432.3
Applied rewrites32.3%
lift-+.f64N/A
flip-+N/A
clear-numN/A
lower-/.f64N/A
Applied rewrites32.9%
Taylor expanded in a around 0
lower-*.f64N/A
lower-*.f6499.3
Applied rewrites99.3%
lift-/.f64N/A
lift-/.f64N/A
clear-numN/A
lower-/.f6499.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (* (/ (* c (* a 4.0)) (+ b (sqrt (* (fma -4.0 c (* (/ b a) b)) a)))) (/ 0.5 (- a))))
double code(double a, double b, double c) {
return ((c * (a * 4.0)) / (b + sqrt((fma(-4.0, c, ((b / a) * b)) * a)))) * (0.5 / -a);
}
function code(a, b, c) return Float64(Float64(Float64(c * Float64(a * 4.0)) / Float64(b + sqrt(Float64(fma(-4.0, c, Float64(Float64(b / a) * b)) * a)))) * Float64(0.5 / Float64(-a))) end
code[a_, b_, c_] := N[(N[(N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision] / N[(b + N[Sqrt[N[(N[(-4.0 * c + N[(N[(b / a), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.5 / (-a)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot \left(a \cdot 4\right)}{b + \sqrt{\mathsf{fma}\left(-4, c, \frac{b}{a} \cdot b\right) \cdot a}} \cdot \frac{0.5}{-a}
\end{array}
Initial program 32.4%
Taylor expanded in a around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6432.3
Applied rewrites32.3%
lift-+.f64N/A
flip-+N/A
clear-numN/A
lower-/.f64N/A
Applied rewrites32.9%
Taylor expanded in a around 0
lower-*.f64N/A
lower-*.f6499.3
Applied rewrites99.3%
lift-/.f64N/A
div-invN/A
lower-*.f64N/A
Applied rewrites99.3%
Final simplification99.3%
(FPCore (a b c) :precision binary64 (/ (- (- c) (/ (* a (* c c)) (* b b))) b))
double code(double a, double b, double c) {
return (-c - ((a * (c * c)) / (b * b))) / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c - ((a * (c * c)) / (b * b))) / b
end function
public static double code(double a, double b, double c) {
return (-c - ((a * (c * c)) / (b * b))) / b;
}
def code(a, b, c): return (-c - ((a * (c * c)) / (b * b))) / b
function code(a, b, c) return Float64(Float64(Float64(-c) - Float64(Float64(a * Float64(c * c)) / Float64(b * b))) / b) end
function tmp = code(a, b, c) tmp = (-c - ((a * (c * c)) / (b * b))) / b; end
code[a_, b_, c_] := N[(N[((-c) - N[(N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-c\right) - \frac{a \cdot \left(c \cdot c\right)}{b \cdot b}}{b}
\end{array}
Initial program 32.4%
Taylor expanded in a around 0
Applied rewrites94.9%
Taylor expanded in b around 0
Applied rewrites94.9%
Taylor expanded in b around -inf
Applied rewrites89.6%
Final simplification89.6%
(FPCore (a b c) :precision binary64 (* (/ (- -1.0 (/ (* c a) (* b b))) b) c))
double code(double a, double b, double c) {
return ((-1.0 - ((c * a) / (b * b))) / b) * c;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (((-1.0d0) - ((c * a) / (b * b))) / b) * c
end function
public static double code(double a, double b, double c) {
return ((-1.0 - ((c * a) / (b * b))) / b) * c;
}
def code(a, b, c): return ((-1.0 - ((c * a) / (b * b))) / b) * c
function code(a, b, c) return Float64(Float64(Float64(-1.0 - Float64(Float64(c * a) / Float64(b * b))) / b) * c) end
function tmp = code(a, b, c) tmp = ((-1.0 - ((c * a) / (b * b))) / b) * c; end
code[a_, b_, c_] := N[(N[(N[(-1.0 - N[(N[(c * a), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1 - \frac{c \cdot a}{b \cdot b}}{b} \cdot c
\end{array}
Initial program 32.4%
Taylor expanded in c around 0
*-commutativeN/A
sub-negN/A
distribute-neg-fracN/A
metadata-evalN/A
associate-*r/N/A
associate-*r*N/A
associate-*l/N/A
associate-*r/N/A
lower-*.f64N/A
Applied rewrites89.5%
Applied rewrites89.5%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 32.4%
Taylor expanded in a around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6480.5
Applied rewrites80.5%
herbie shell --seed 2024308
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))