
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (fma (fma (/ (- c) (* b b)) (/ c b) (* (* (* (pow c 3.0) (fma (* c a) -5.0 (* -2.0 (* b b)))) (pow b -7.0)) a)) a (/ (- c) b)))
double code(double a, double b, double c) {
return fma(fma((-c / (b * b)), (c / b), (((pow(c, 3.0) * fma((c * a), -5.0, (-2.0 * (b * b)))) * pow(b, -7.0)) * a)), a, (-c / b));
}
function code(a, b, c) return fma(fma(Float64(Float64(-c) / Float64(b * b)), Float64(c / b), Float64(Float64(Float64((c ^ 3.0) * fma(Float64(c * a), -5.0, Float64(-2.0 * Float64(b * b)))) * (b ^ -7.0)) * a)), a, Float64(Float64(-c) / b)) end
code[a_, b_, c_] := N[(N[(N[((-c) / N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(c / b), $MachinePrecision] + N[(N[(N[(N[Power[c, 3.0], $MachinePrecision] * N[(N[(c * a), $MachinePrecision] * -5.0 + N[(-2.0 * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[b, -7.0], $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\frac{-c}{b \cdot b}, \frac{c}{b}, \left(\left({c}^{3} \cdot \mathsf{fma}\left(c \cdot a, -5, -2 \cdot \left(b \cdot b\right)\right)\right) \cdot {b}^{-7}\right) \cdot a\right), a, \frac{-c}{b}\right)
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in b around 0
Applied rewrites97.1%
Taylor expanded in c around 0
Applied rewrites97.1%
Applied rewrites97.1%
Final simplification97.1%
(FPCore (a b c) :precision binary64 (/ (- (/ (* (* (* (pow c 3.0) a) a) -2.0) (pow b 4.0)) (fma (/ c b) (/ (* c a) b) c)) b))
double code(double a, double b, double c) {
return (((((pow(c, 3.0) * a) * a) * -2.0) / pow(b, 4.0)) - fma((c / b), ((c * a) / b), c)) / b;
}
function code(a, b, c) return Float64(Float64(Float64(Float64(Float64(Float64((c ^ 3.0) * a) * a) * -2.0) / (b ^ 4.0)) - fma(Float64(c / b), Float64(Float64(c * a) / b), c)) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(N[(N[(N[Power[c, 3.0], $MachinePrecision] * a), $MachinePrecision] * a), $MachinePrecision] * -2.0), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision] - N[(N[(c / b), $MachinePrecision] * N[(N[(c * a), $MachinePrecision] / b), $MachinePrecision] + c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(\left({c}^{3} \cdot a\right) \cdot a\right) \cdot -2}{{b}^{4}} - \mathsf{fma}\left(\frac{c}{b}, \frac{c \cdot a}{b}, c\right)}{b}
\end{array}
Initial program 16.0%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites96.4%
(FPCore (a b c) :precision binary64 (/ (- (- c) (fma 2.0 (/ (* (* a a) (pow c 3.0)) (pow b 4.0)) (/ (* (* c c) a) (* b b)))) b))
double code(double a, double b, double c) {
return (-c - fma(2.0, (((a * a) * pow(c, 3.0)) / pow(b, 4.0)), (((c * c) * a) / (b * b)))) / b;
}
function code(a, b, c) return Float64(Float64(Float64(-c) - fma(2.0, Float64(Float64(Float64(a * a) * (c ^ 3.0)) / (b ^ 4.0)), Float64(Float64(Float64(c * c) * a) / Float64(b * b)))) / b) end
code[a_, b_, c_] := N[(N[((-c) - N[(2.0 * N[(N[(N[(a * a), $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(c * c), $MachinePrecision] * a), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-c\right) - \mathsf{fma}\left(2, \frac{\left(a \cdot a\right) \cdot {c}^{3}}{{b}^{4}}, \frac{\left(c \cdot c\right) \cdot a}{b \cdot b}\right)}{b}
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in b around 0
Applied rewrites97.1%
Taylor expanded in b around -inf
Applied rewrites96.4%
Final simplification96.4%
(FPCore (a b c) :precision binary64 (fma (/ (fma -2.0 (/ (* (pow c 3.0) a) (* b b)) (* (- c) c)) (pow b 3.0)) a (/ (- c) b)))
double code(double a, double b, double c) {
return fma((fma(-2.0, ((pow(c, 3.0) * a) / (b * b)), (-c * c)) / pow(b, 3.0)), a, (-c / b));
}
function code(a, b, c) return fma(Float64(fma(-2.0, Float64(Float64((c ^ 3.0) * a) / Float64(b * b)), Float64(Float64(-c) * c)) / (b ^ 3.0)), a, Float64(Float64(-c) / b)) end
code[a_, b_, c_] := N[(N[(N[(-2.0 * N[(N[(N[Power[c, 3.0], $MachinePrecision] * a), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + N[((-c) * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\mathsf{fma}\left(-2, \frac{{c}^{3} \cdot a}{b \cdot b}, \left(-c\right) \cdot c\right)}{{b}^{3}}, a, \frac{-c}{b}\right)
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in b around 0
Applied rewrites97.1%
Taylor expanded in b around inf
Applied rewrites96.4%
Final simplification96.4%
(FPCore (a b c) :precision binary64 (* (fma (/ (fma (* (* a a) -2.0) c (* (* (- b) b) a)) (pow b 5.0)) c (/ -1.0 b)) c))
double code(double a, double b, double c) {
return fma((fma(((a * a) * -2.0), c, ((-b * b) * a)) / pow(b, 5.0)), c, (-1.0 / b)) * c;
}
function code(a, b, c) return Float64(fma(Float64(fma(Float64(Float64(a * a) * -2.0), c, Float64(Float64(Float64(-b) * b) * a)) / (b ^ 5.0)), c, Float64(-1.0 / b)) * c) end
code[a_, b_, c_] := N[(N[(N[(N[(N[(N[(a * a), $MachinePrecision] * -2.0), $MachinePrecision] * c + N[(N[((-b) * b), $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * c + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\mathsf{fma}\left(\left(a \cdot a\right) \cdot -2, c, \left(\left(-b\right) \cdot b\right) \cdot a\right)}{{b}^{5}}, c, \frac{-1}{b}\right) \cdot c
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites96.0%
Taylor expanded in b around 0
Applied rewrites96.0%
Final simplification96.0%
(FPCore (a b c) :precision binary64 (* (/ (fma (* (* a a) -2.0) (* c c) (* (* (- b) b) (fma a c (* b b)))) (pow b 5.0)) c))
double code(double a, double b, double c) {
return (fma(((a * a) * -2.0), (c * c), ((-b * b) * fma(a, c, (b * b)))) / pow(b, 5.0)) * c;
}
function code(a, b, c) return Float64(Float64(fma(Float64(Float64(a * a) * -2.0), Float64(c * c), Float64(Float64(Float64(-b) * b) * fma(a, c, Float64(b * b)))) / (b ^ 5.0)) * c) end
code[a_, b_, c_] := N[(N[(N[(N[(N[(a * a), $MachinePrecision] * -2.0), $MachinePrecision] * N[(c * c), $MachinePrecision] + N[(N[((-b) * b), $MachinePrecision] * N[(a * c + N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\left(a \cdot a\right) \cdot -2, c \cdot c, \left(\left(-b\right) \cdot b\right) \cdot \mathsf{fma}\left(a, c, b \cdot b\right)\right)}{{b}^{5}} \cdot c
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites96.0%
Taylor expanded in b around 0
Applied rewrites95.6%
Final simplification95.6%
(FPCore (a b c) :precision binary64 (- (fma a (/ (* c c) (pow b 3.0)) (/ c b))))
double code(double a, double b, double c) {
return -fma(a, ((c * c) / pow(b, 3.0)), (c / b));
}
function code(a, b, c) return Float64(-fma(a, Float64(Float64(c * c) / (b ^ 3.0)), Float64(c / b))) end
code[a_, b_, c_] := (-N[(a * N[(N[(c * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(a, \frac{c \cdot c}{{b}^{3}}, \frac{c}{b}\right)
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6495.2
Applied rewrites95.2%
(FPCore (a b c) :precision binary64 (/ (fma a (/ (* c c) (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma(a, ((c * c) / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(a, Float64(Float64(c * c) / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 16.0%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval16.0
Applied rewrites16.0%
lift-+.f64N/A
+-commutativeN/A
lift-fma.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-fma.f64N/A
lift-neg.f64N/A
sub-negN/A
flip--N/A
Applied rewrites16.2%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-*r/N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6495.2
Applied rewrites95.2%
Final simplification95.2%
(FPCore (a b c) :precision binary64 (* (/ (fma (- a) (/ c (* b b)) -1.0) b) c))
double code(double a, double b, double c) {
return (fma(-a, (c / (b * b)), -1.0) / b) * c;
}
function code(a, b, c) return Float64(Float64(fma(Float64(-a), Float64(c / Float64(b * b)), -1.0) / b) * c) end
code[a_, b_, c_] := N[(N[(N[((-a) * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / b), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-a, \frac{c}{b \cdot b}, -1\right)}{b} \cdot c
\end{array}
Initial program 16.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.1%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites96.0%
Taylor expanded in b around inf
Applied rewrites94.8%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 16.0%
Taylor expanded in c around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6491.4
Applied rewrites91.4%
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
return 0.0 / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
return 0.0 / a;
}
def code(a, b, c): return 0.0 / a
function code(a, b, c) return Float64(0.0 / a) end
function tmp = code(a, b, c) tmp = 0.0 / a; end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{a}
\end{array}
Initial program 16.0%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval16.0
Applied rewrites16.0%
lift-+.f64N/A
+-commutativeN/A
lift-fma.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-fma.f64N/A
lift-neg.f64N/A
sub-negN/A
flip--N/A
Applied rewrites16.2%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
Applied rewrites16.3%
Taylor expanded in c around 0
associate-*r/N/A
distribute-rgt-outN/A
metadata-evalN/A
mul0-rgtN/A
metadata-evalN/A
lower-/.f643.3
Applied rewrites3.3%
herbie shell --seed 2024248
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))