
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* b (* b b))))
(fma
(fma
c
(* (* c c) (* -2.0 (pow b -5.0)))
(/ (* -0.25 (* a (* (* c c) (* (* c c) 20.0)))) (* t_0 (* b t_0))))
(* a a)
(/ (fma (* c c) (/ a (* b b)) c) (- b)))))
double code(double a, double b, double c) {
double t_0 = b * (b * b);
return fma(fma(c, ((c * c) * (-2.0 * pow(b, -5.0))), ((-0.25 * (a * ((c * c) * ((c * c) * 20.0)))) / (t_0 * (b * t_0)))), (a * a), (fma((c * c), (a / (b * b)), c) / -b));
}
function code(a, b, c) t_0 = Float64(b * Float64(b * b)) return fma(fma(c, Float64(Float64(c * c) * Float64(-2.0 * (b ^ -5.0))), Float64(Float64(-0.25 * Float64(a * Float64(Float64(c * c) * Float64(Float64(c * c) * 20.0)))) / Float64(t_0 * Float64(b * t_0)))), Float64(a * a), Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b))) end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, N[(N[(c * N[(N[(c * c), $MachinePrecision] * N[(-2.0 * N[Power[b, -5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.25 * N[(a * N[(N[(c * c), $MachinePrecision] * N[(N[(c * c), $MachinePrecision] * 20.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * N[(b * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision] + N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(c, \left(c \cdot c\right) \cdot \left(-2 \cdot {b}^{-5}\right), \frac{-0.25 \cdot \left(a \cdot \left(\left(c \cdot c\right) \cdot \left(\left(c \cdot c\right) \cdot 20\right)\right)\right)}{t\_0 \cdot \left(b \cdot t\_0\right)}\right), a \cdot a, \frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}\right)
\end{array}
\end{array}
Initial program 31.7%
Taylor expanded in a around 0
Applied rewrites96.5%
Applied rewrites96.5%
Final simplification96.5%
(FPCore (a b c)
:precision binary64
(/
1.0
(fma
a
(fma
a
(* -2.0 (fma a (- (/ (* c c) (pow b 5.0))) (* (/ c (* b (* b b))) -0.5)))
(/ 1.0 b))
(/ (- b) c))))
double code(double a, double b, double c) {
return 1.0 / fma(a, fma(a, (-2.0 * fma(a, -((c * c) / pow(b, 5.0)), ((c / (b * (b * b))) * -0.5))), (1.0 / b)), (-b / c));
}
function code(a, b, c) return Float64(1.0 / fma(a, fma(a, Float64(-2.0 * fma(a, Float64(-Float64(Float64(c * c) / (b ^ 5.0))), Float64(Float64(c / Float64(b * Float64(b * b))) * -0.5))), Float64(1.0 / b)), Float64(Float64(-b) / c))) end
code[a_, b_, c_] := N[(1.0 / N[(a * N[(a * N[(-2.0 * N[(a * (-N[(N[(c * c), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]) + N[(N[(c / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / b), $MachinePrecision]), $MachinePrecision] + N[((-b) / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, -2 \cdot \mathsf{fma}\left(a, -\frac{c \cdot c}{{b}^{5}}, \frac{c}{b \cdot \left(b \cdot b\right)} \cdot -0.5\right), \frac{1}{b}\right), \frac{-b}{c}\right)}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in a around 0
Applied rewrites96.4%
Taylor expanded in b around 0
distribute-lft1-inN/A
metadata-evalN/A
distribute-rgt-out--N/A
metadata-evalN/A
*-commutativeN/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-pow.f6496.4
Applied rewrites96.4%
Final simplification96.4%
(FPCore (a b c) :precision binary64 (/ c (fma c (fma (* a (/ a (* b (* b b)))) (* -0.5 (* c -2.0)) (/ a b)) (- b))))
double code(double a, double b, double c) {
return c / fma(c, fma((a * (a / (b * (b * b)))), (-0.5 * (c * -2.0)), (a / b)), -b);
}
function code(a, b, c) return Float64(c / fma(c, fma(Float64(a * Float64(a / Float64(b * Float64(b * b)))), Float64(-0.5 * Float64(c * -2.0)), Float64(a / b)), Float64(-b))) end
code[a_, b_, c_] := N[(c / N[(c * N[(N[(a * N[(a / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-0.5 * N[(c * -2.0), $MachinePrecision]), $MachinePrecision] + N[(a / b), $MachinePrecision]), $MachinePrecision] + (-b)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{\mathsf{fma}\left(c, \mathsf{fma}\left(a \cdot \frac{a}{b \cdot \left(b \cdot b\right)}, -0.5 \cdot \left(c \cdot -2\right), \frac{a}{b}\right), -b\right)}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in c around 0
lower-/.f64N/A
Applied rewrites94.9%
Applied rewrites95.2%
(FPCore (a b c) :precision binary64 (/ 1.0 (fma a (fma a (/ c (* b (* b b))) (/ 1.0 b)) (/ (- b) c))))
double code(double a, double b, double c) {
return 1.0 / fma(a, fma(a, (c / (b * (b * b))), (1.0 / b)), (-b / c));
}
function code(a, b, c) return Float64(1.0 / fma(a, fma(a, Float64(c / Float64(b * Float64(b * b))), Float64(1.0 / b)), Float64(Float64(-b) / c))) end
code[a_, b_, c_] := N[(1.0 / N[(a * N[(a * N[(c / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / b), $MachinePrecision]), $MachinePrecision] + N[((-b) / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(a, \mathsf{fma}\left(a, \frac{c}{b \cdot \left(b \cdot b\right)}, \frac{1}{b}\right), \frac{-b}{c}\right)}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in c around 0
lower-/.f64N/A
Applied rewrites94.9%
Taylor expanded in a around 0
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f64N/A
mul-1-negN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6494.9
Applied rewrites94.9%
Final simplification94.9%
(FPCore (a b c) :precision binary64 (/ 1.0 (/ (fma c (/ (fma a (* a (/ c (* b b))) a) b) (- b)) c)))
double code(double a, double b, double c) {
return 1.0 / (fma(c, (fma(a, (a * (c / (b * b))), a) / b), -b) / c);
}
function code(a, b, c) return Float64(1.0 / Float64(fma(c, Float64(fma(a, Float64(a * Float64(c / Float64(b * b))), a) / b), Float64(-b)) / c)) end
code[a_, b_, c_] := N[(1.0 / N[(N[(c * N[(N[(a * N[(a * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + a), $MachinePrecision] / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{\mathsf{fma}\left(c, \frac{\mathsf{fma}\left(a, a \cdot \frac{c}{b \cdot b}, a\right)}{b}, -b\right)}{c}}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in c around 0
lower-/.f64N/A
Applied rewrites94.9%
Taylor expanded in b around inf
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
unpow2N/A
associate-*l*N/A
associate-/l*N/A
lower-fma.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6494.9
Applied rewrites94.9%
(FPCore (a b c) :precision binary64 (/ 1.0 (/ (- (/ (* c a) b) b) c)))
double code(double a, double b, double c) {
return 1.0 / ((((c * a) / b) - b) / c);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 1.0d0 / ((((c * a) / b) - b) / c)
end function
public static double code(double a, double b, double c) {
return 1.0 / ((((c * a) / b) - b) / c);
}
def code(a, b, c): return 1.0 / ((((c * a) / b) - b) / c)
function code(a, b, c) return Float64(1.0 / Float64(Float64(Float64(Float64(c * a) / b) - b) / c)) end
function tmp = code(a, b, c) tmp = 1.0 / ((((c * a) / b) - b) / c); end
code[a_, b_, c_] := N[(1.0 / N[(N[(N[(N[(c * a), $MachinePrecision] / b), $MachinePrecision] - b), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{\frac{c \cdot a}{b} - b}{c}}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in c around 0
+-commutativeN/A
*-commutativeN/A
associate-*r/N/A
lower-/.f64N/A
associate-*r/N/A
*-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f6491.1
Applied rewrites91.1%
(FPCore (a b c) :precision binary64 (/ 1.0 (- (/ a b) (/ b c))))
double code(double a, double b, double c) {
return 1.0 / ((a / b) - (b / c));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 1.0d0 / ((a / b) - (b / c))
end function
public static double code(double a, double b, double c) {
return 1.0 / ((a / b) - (b / c));
}
def code(a, b, c): return 1.0 / ((a / b) - (b / c))
function code(a, b, c) return Float64(1.0 / Float64(Float64(a / b) - Float64(b / c))) end
function tmp = code(a, b, c) tmp = 1.0 / ((a / b) - (b / c)); end
code[a_, b_, c_] := N[(1.0 / N[(N[(a / b), $MachinePrecision] - N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{a}{b} - \frac{b}{c}}
\end{array}
Initial program 31.7%
Taylor expanded in a around inf
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6431.8
Applied rewrites31.8%
Applied rewrites31.7%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-/.f64N/A
lower-/.f6491.1
Applied rewrites91.1%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 31.7%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-neg.f6481.2
Applied rewrites81.2%
herbie shell --seed 2024219
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))