
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 0.258)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(fma
(fma
(fma
(* -0.25 a)
(* (/ (pow c 4.0) (pow b 6.0)) (/ 20.0 b))
(/ (* (pow c 3.0) -2.0) (pow b 5.0)))
a
(/ (* (- c) c) (pow b 3.0)))
a
(/ (- c) b)))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 0.258) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = fma(fma(fma((-0.25 * a), ((pow(c, 4.0) / pow(b, 6.0)) * (20.0 / b)), ((pow(c, 3.0) * -2.0) / pow(b, 5.0))), a, ((-c * c) / pow(b, 3.0))), a, (-c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 0.258) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = fma(fma(fma(Float64(-0.25 * a), Float64(Float64((c ^ 4.0) / (b ^ 6.0)) * Float64(20.0 / b)), Float64(Float64((c ^ 3.0) * -2.0) / (b ^ 5.0))), a, Float64(Float64(Float64(-c) * c) / (b ^ 3.0))), a, Float64(Float64(-c) / b)); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.258], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-0.25 * a), $MachinePrecision] * N[(N[(N[Power[c, 4.0], $MachinePrecision] / N[Power[b, 6.0], $MachinePrecision]), $MachinePrecision] * N[(20.0 / b), $MachinePrecision]), $MachinePrecision] + N[(N[(N[Power[c, 3.0], $MachinePrecision] * -2.0), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a + N[(N[((-c) * c), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.258:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25 \cdot a, \frac{{c}^{4}}{{b}^{6}} \cdot \frac{20}{b}, \frac{{c}^{3} \cdot -2}{{b}^{5}}\right), a, \frac{\left(-c\right) \cdot c}{{b}^{3}}\right), a, \frac{-c}{b}\right)\\
\end{array}
\end{array}
if b < 0.25800000000000001Initial program 89.5%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval89.5
Applied rewrites89.5%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites90.3%
if 0.25800000000000001 < b Initial program 51.4%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites91.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 2.8)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(fma
(* (- (* (* -2.0 a) (/ c (pow b 5.0))) (pow (pow b 3.0) -1.0)) (* c c))
a
(/ (- c) b)))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 2.8) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = fma(((((-2.0 * a) * (c / pow(b, 5.0))) - pow(pow(b, 3.0), -1.0)) * (c * c)), a, (-c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 2.8) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = fma(Float64(Float64(Float64(Float64(-2.0 * a) * Float64(c / (b ^ 5.0))) - ((b ^ 3.0) ^ -1.0)) * Float64(c * c)), a, Float64(Float64(-c) / b)); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 2.8], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(-2.0 * a), $MachinePrecision] * N[(c / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Power[N[Power[b, 3.0], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 2.8:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\left(-2 \cdot a\right) \cdot \frac{c}{{b}^{5}} - {\left({b}^{3}\right)}^{-1}\right) \cdot \left(c \cdot c\right), a, \frac{-c}{b}\right)\\
\end{array}
\end{array}
if b < 2.7999999999999998Initial program 85.3%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval85.3
Applied rewrites85.3%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites86.1%
if 2.7999999999999998 < b Initial program 49.0%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites92.9%
Taylor expanded in c around 0
Applied rewrites90.3%
Final simplification89.4%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 0.258)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(fma
(/
(fma
(* (* c c) (* (* c c) -5.0))
(* a a)
(* (* (fma (* -2.0 (* (* b b) c)) a (- (pow b 4.0))) c) c))
(pow b 7.0))
a
(/ (- c) b)))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 0.258) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = fma((fma(((c * c) * ((c * c) * -5.0)), (a * a), ((fma((-2.0 * ((b * b) * c)), a, -pow(b, 4.0)) * c) * c)) / pow(b, 7.0)), a, (-c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 0.258) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = fma(Float64(fma(Float64(Float64(c * c) * Float64(Float64(c * c) * -5.0)), Float64(a * a), Float64(Float64(fma(Float64(-2.0 * Float64(Float64(b * b) * c)), a, Float64(-(b ^ 4.0))) * c) * c)) / (b ^ 7.0)), a, Float64(Float64(-c) / b)); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.258], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(c * c), $MachinePrecision] * N[(N[(c * c), $MachinePrecision] * -5.0), $MachinePrecision]), $MachinePrecision] * N[(a * a), $MachinePrecision] + N[(N[(N[(N[(-2.0 * N[(N[(b * b), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision] * a + (-N[Power[b, 4.0], $MachinePrecision])), $MachinePrecision] * c), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.258:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{fma}\left(\left(c \cdot c\right) \cdot \left(\left(c \cdot c\right) \cdot -5\right), a \cdot a, \left(\mathsf{fma}\left(-2 \cdot \left(\left(b \cdot b\right) \cdot c\right), a, -{b}^{4}\right) \cdot c\right) \cdot c\right)}{{b}^{7}}, a, \frac{-c}{b}\right)\\
\end{array}
\end{array}
if b < 0.25800000000000001Initial program 89.5%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval89.5
Applied rewrites89.5%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites90.3%
if 0.25800000000000001 < b Initial program 51.4%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites91.8%
Taylor expanded in b around 0
Applied rewrites91.8%
Taylor expanded in a around 0
Applied rewrites91.8%
Applied rewrites91.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 2.8)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(/
(fma
(* (* -2.0 a) a)
(* (* c c) (/ c (pow b 4.0)))
(- (fma (/ (* c c) b) (/ a b) c)))
b))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 2.8) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = fma(((-2.0 * a) * a), ((c * c) * (c / pow(b, 4.0))), -fma(((c * c) / b), (a / b), c)) / b;
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 2.8) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = Float64(fma(Float64(Float64(-2.0 * a) * a), Float64(Float64(c * c) * Float64(c / (b ^ 4.0))), Float64(-fma(Float64(Float64(c * c) / b), Float64(a / b), c))) / b); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 2.8], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-2.0 * a), $MachinePrecision] * a), $MachinePrecision] * N[(N[(c * c), $MachinePrecision] * N[(c / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + (-N[(N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] * N[(a / b), $MachinePrecision] + c), $MachinePrecision])), $MachinePrecision] / b), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 2.8:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\left(-2 \cdot a\right) \cdot a, \left(c \cdot c\right) \cdot \frac{c}{{b}^{4}}, -\mathsf{fma}\left(\frac{c \cdot c}{b}, \frac{a}{b}, c\right)\right)}{b}\\
\end{array}
\end{array}
if b < 2.7999999999999998Initial program 85.3%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval85.3
Applied rewrites85.3%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites86.1%
if 2.7999999999999998 < b Initial program 49.0%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites90.2%
Applied rewrites90.2%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 2.8)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(/
(*
(- (* (- (* (* (* a a) -2.0) (/ c (pow b 4.0))) (/ a (* b b))) c) 1.0)
c)
b))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 2.8) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = (((((((a * a) * -2.0) * (c / pow(b, 4.0))) - (a / (b * b))) * c) - 1.0) * c) / b;
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 2.8) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = Float64(Float64(Float64(Float64(Float64(Float64(Float64(Float64(a * a) * -2.0) * Float64(c / (b ^ 4.0))) - Float64(a / Float64(b * b))) * c) - 1.0) * c) / b); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 2.8], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(N[(a * a), $MachinePrecision] * -2.0), $MachinePrecision] * N[(c / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * c), $MachinePrecision] - 1.0), $MachinePrecision] * c), $MachinePrecision] / b), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 2.8:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(\left(\left(\left(a \cdot a\right) \cdot -2\right) \cdot \frac{c}{{b}^{4}} - \frac{a}{b \cdot b}\right) \cdot c - 1\right) \cdot c}{b}\\
\end{array}
\end{array}
if b < 2.7999999999999998Initial program 85.3%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval85.3
Applied rewrites85.3%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites86.1%
if 2.7999999999999998 < b Initial program 49.0%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites90.2%
Taylor expanded in c around 0
Applied rewrites90.1%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* a c) (* b b))))
(if (<= b 4.8)
(/ (- (* b b) t_0) (* (- (- b) (sqrt t_0)) (* 2.0 a)))
(- (fma (/ a (* b b)) (* (/ c b) c) (/ c b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (a * c), (b * b));
double tmp;
if (b <= 4.8) {
tmp = ((b * b) - t_0) / ((-b - sqrt(t_0)) * (2.0 * a));
} else {
tmp = -fma((a / (b * b)), ((c / b) * c), (c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(a * c), Float64(b * b)) tmp = 0.0 if (b <= 4.8) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(Float64(-b) - sqrt(t_0)) * Float64(2.0 * a))); else tmp = Float64(-fma(Float64(a / Float64(b * b)), Float64(Float64(c / b) * c), Float64(c / b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(a * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 4.8], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision] * N[(2.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[(N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(N[(c / b), $MachinePrecision] * c), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, a \cdot c, b \cdot b\right)\\
\mathbf{if}\;b \leq 4.8:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(\left(-b\right) - \sqrt{t\_0}\right) \cdot \left(2 \cdot a\right)}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(\frac{a}{b \cdot b}, \frac{c}{b} \cdot c, \frac{c}{b}\right)\\
\end{array}
\end{array}
if b < 4.79999999999999982Initial program 85.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-eval85.0
Applied rewrites85.0%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites85.7%
if 4.79999999999999982 < b Initial program 48.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites92.8%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6485.7
Applied rewrites85.7%
Applied rewrites85.7%
(FPCore (a b c) :precision binary64 (if (<= b 4.8) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 a) c)))) (* 2.0 a)) (- (fma (/ a (* b b)) (* (/ c b) c) (/ c b)))))
double code(double a, double b, double c) {
double tmp;
if (b <= 4.8) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * a) * c)))) / (2.0 * a);
} else {
tmp = -fma((a / (b * b)), ((c / b) * c), (c / b));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 4.8) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * a) * c)))) / Float64(2.0 * a)); else tmp = Float64(-fma(Float64(a / Float64(b * b)), Float64(Float64(c / b) * c), Float64(c / b))); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 4.8], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], (-N[(N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(N[(c / b), $MachinePrecision] * c), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 4.8:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot a\right) \cdot c\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(\frac{a}{b \cdot b}, \frac{c}{b} \cdot c, \frac{c}{b}\right)\\
\end{array}
\end{array}
if b < 4.79999999999999982Initial program 85.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval85.0
Applied rewrites85.0%
if 4.79999999999999982 < b Initial program 48.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites92.8%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6485.7
Applied rewrites85.7%
Applied rewrites85.7%
(FPCore (a b c) :precision binary64 (if (<= b 4.8) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 a) c)))) (* 2.0 a)) (/ (- (fma (/ (* c c) b) (/ a b) c)) b)))
double code(double a, double b, double c) {
double tmp;
if (b <= 4.8) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * a) * c)))) / (2.0 * a);
} else {
tmp = -fma(((c * c) / b), (a / b), c) / b;
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 4.8) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * a) * c)))) / Float64(2.0 * a)); else tmp = Float64(Float64(-fma(Float64(Float64(c * c) / b), Float64(a / b), c)) / b); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 4.8], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], N[((-N[(N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] * N[(a / b), $MachinePrecision] + c), $MachinePrecision]) / b), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 4.8:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot a\right) \cdot c\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;\frac{-\mathsf{fma}\left(\frac{c \cdot c}{b}, \frac{a}{b}, c\right)}{b}\\
\end{array}
\end{array}
if b < 4.79999999999999982Initial program 85.0%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval85.0
Applied rewrites85.0%
if 4.79999999999999982 < b Initial program 48.3%
Taylor expanded in a around 0
associate-*r/N/A
unpow3N/A
unpow2N/A
associate-/r*N/A
associate-/l*N/A
div-addN/A
lower-/.f64N/A
Applied rewrites85.6%
(FPCore (a b c) :precision binary64 (/ (- (fma (/ (* c c) b) (/ a b) c)) b))
double code(double a, double b, double c) {
return -fma(((c * c) / b), (a / b), c) / b;
}
function code(a, b, c) return Float64(Float64(-fma(Float64(Float64(c * c) / b), Float64(a / b), c)) / b) end
code[a_, b_, c_] := N[((-N[(N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] * N[(a / b), $MachinePrecision] + c), $MachinePrecision]) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-\mathsf{fma}\left(\frac{c \cdot c}{b}, \frac{a}{b}, c\right)}{b}
\end{array}
Initial program 56.3%
Taylor expanded in a around 0
associate-*r/N/A
unpow3N/A
unpow2N/A
associate-/r*N/A
associate-/l*N/A
div-addN/A
lower-/.f64N/A
Applied rewrites78.9%
(FPCore (a b c) :precision binary64 (- (/ (/ (* c (fma c a (* b b))) (* b b)) b)))
double code(double a, double b, double c) {
return -(((c * fma(c, a, (b * b))) / (b * b)) / b);
}
function code(a, b, c) return Float64(-Float64(Float64(Float64(c * fma(c, a, Float64(b * b))) / Float64(b * b)) / b)) end
code[a_, b_, c_] := (-N[(N[(N[(c * N[(c * a + N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision])
\begin{array}{l}
\\
-\frac{\frac{c \cdot \mathsf{fma}\left(c, a, b \cdot b\right)}{b \cdot b}}{b}
\end{array}
Initial program 56.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites88.4%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-/.f6478.9
Applied rewrites78.9%
Taylor expanded in b around 0
Applied rewrites78.7%
Applied rewrites78.8%
(FPCore (a b c) :precision binary64 (/ (* (+ (* a (/ c (* b b))) 1.0) (- c)) b))
double code(double a, double b, double c) {
return (((a * (c / (b * b))) + 1.0) * -c) / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (((a * (c / (b * b))) + 1.0d0) * -c) / b
end function
public static double code(double a, double b, double c) {
return (((a * (c / (b * b))) + 1.0) * -c) / b;
}
def code(a, b, c): return (((a * (c / (b * b))) + 1.0) * -c) / b
function code(a, b, c) return Float64(Float64(Float64(Float64(a * Float64(c / Float64(b * b))) + 1.0) * Float64(-c)) / b) end
function tmp = code(a, b, c) tmp = (((a * (c / (b * b))) + 1.0) * -c) / b; end
code[a_, b_, c_] := N[(N[(N[(N[(a * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * (-c)), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(a \cdot \frac{c}{b \cdot b} + 1\right) \cdot \left(-c\right)}{b}
\end{array}
Initial program 56.3%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites84.9%
Taylor expanded in c around 0
Applied rewrites78.8%
Final simplification78.8%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 56.3%
Taylor expanded in a around 0
associate-*r/N/A
mul-1-negN/A
lower-/.f64N/A
lower-neg.f6463.2
Applied rewrites63.2%
herbie shell --seed 2024332
(FPCore (a b c)
:name "Quadratic roots, narrow range"
:precision binary64
:pre (and (and (and (< 1.0536712127723509e-8 a) (< a 94906265.62425156)) (and (< 1.0536712127723509e-8 b) (< b 94906265.62425156))) (and (< 1.0536712127723509e-8 c) (< c 94906265.62425156)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))