
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* b (* b b)))
(t_1 (* c (* a (* c c))))
(t_2 (* c (* t_1 -5.0)))
(t_3 (fma -4.0 (* c a) (* b b)))
(t_4 (* -2.0 (* c (* c c))))
(t_5 (* b (* t_0 t_0)))
(t_6 (* (* b b) t_0)))
(if (<= b 0.013)
(/ (- t_3 (* b b)) (* (* a 2.0) (+ b (sqrt t_3))))
(fma
a
(fma
(/
(-
(/ (* t_4 t_4) (* t_6 t_6))
(/ (* (* (* c t_1) 5.0) t_2) (* t_5 t_5)))
(- (/ t_4 t_6) (/ t_2 t_5)))
a
(* c (/ c (* b (* b (- b))))))
(/ c (- b))))))
double code(double a, double b, double c) {
double t_0 = b * (b * b);
double t_1 = c * (a * (c * c));
double t_2 = c * (t_1 * -5.0);
double t_3 = fma(-4.0, (c * a), (b * b));
double t_4 = -2.0 * (c * (c * c));
double t_5 = b * (t_0 * t_0);
double t_6 = (b * b) * t_0;
double tmp;
if (b <= 0.013) {
tmp = (t_3 - (b * b)) / ((a * 2.0) * (b + sqrt(t_3)));
} else {
tmp = fma(a, fma(((((t_4 * t_4) / (t_6 * t_6)) - ((((c * t_1) * 5.0) * t_2) / (t_5 * t_5))) / ((t_4 / t_6) - (t_2 / t_5))), a, (c * (c / (b * (b * -b))))), (c / -b));
}
return tmp;
}
function code(a, b, c) t_0 = Float64(b * Float64(b * b)) t_1 = Float64(c * Float64(a * Float64(c * c))) t_2 = Float64(c * Float64(t_1 * -5.0)) t_3 = fma(-4.0, Float64(c * a), Float64(b * b)) t_4 = Float64(-2.0 * Float64(c * Float64(c * c))) t_5 = Float64(b * Float64(t_0 * t_0)) t_6 = Float64(Float64(b * b) * t_0) tmp = 0.0 if (b <= 0.013) tmp = Float64(Float64(t_3 - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(t_3)))); else tmp = fma(a, fma(Float64(Float64(Float64(Float64(t_4 * t_4) / Float64(t_6 * t_6)) - Float64(Float64(Float64(Float64(c * t_1) * 5.0) * t_2) / Float64(t_5 * t_5))) / Float64(Float64(t_4 / t_6) - Float64(t_2 / t_5))), a, Float64(c * Float64(c / Float64(b * Float64(b * Float64(-b)))))), Float64(c / Float64(-b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(c * N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(c * N[(t$95$1 * -5.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$3 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$4 = N[(-2.0 * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$5 = N[(b * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$6 = N[(N[(b * b), $MachinePrecision] * t$95$0), $MachinePrecision]}, If[LessEqual[b, 0.013], N[(N[(t$95$3 - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[t$95$3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(a * N[(N[(N[(N[(N[(t$95$4 * t$95$4), $MachinePrecision] / N[(t$95$6 * t$95$6), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(N[(c * t$95$1), $MachinePrecision] * 5.0), $MachinePrecision] * t$95$2), $MachinePrecision] / N[(t$95$5 * t$95$5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(t$95$4 / t$95$6), $MachinePrecision] - N[(t$95$2 / t$95$5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a + N[(c * N[(c / N[(b * N[(b * (-b)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / (-b)), $MachinePrecision]), $MachinePrecision]]]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := b \cdot \left(b \cdot b\right)\\
t_1 := c \cdot \left(a \cdot \left(c \cdot c\right)\right)\\
t_2 := c \cdot \left(t\_1 \cdot -5\right)\\
t_3 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
t_4 := -2 \cdot \left(c \cdot \left(c \cdot c\right)\right)\\
t_5 := b \cdot \left(t\_0 \cdot t\_0\right)\\
t_6 := \left(b \cdot b\right) \cdot t\_0\\
\mathbf{if}\;b \leq 0.013:\\
\;\;\;\;\frac{t\_3 - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{t\_3}\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(a, \mathsf{fma}\left(\frac{\frac{t\_4 \cdot t\_4}{t\_6 \cdot t\_6} - \frac{\left(\left(c \cdot t\_1\right) \cdot 5\right) \cdot t\_2}{t\_5 \cdot t\_5}}{\frac{t\_4}{t\_6} - \frac{t\_2}{t\_5}}, a, c \cdot \frac{c}{b \cdot \left(b \cdot \left(-b\right)\right)}\right), \frac{c}{-b}\right)\\
\end{array}
\end{array}
if b < 0.0129999999999999994Initial program 89.5%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6489.6
Applied egg-rr89.6%
flip-+N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr90.6%
if 0.0129999999999999994 < b Initial program 50.9%
Taylor expanded in a around 0
Simplified93.0%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
accelerator-lowering-fma.f64N/A
Simplified93.0%
Applied egg-rr93.0%
Applied egg-rr93.1%
Final simplification92.9%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* c a) (* b b))))
(if (<= b 0.0135)
(/ (- t_0 (* b b)) (* (* a 2.0) (+ b (sqrt t_0))))
(fma
a
(fma
a
(fma
-2.0
(/ (* c (* c c)) (pow b 5.0))
(/ (* -5.0 (* a (pow c 4.0))) (pow b 7.0)))
(/ (* c c) (* b (* b (- b)))))
(/ c (- b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (c * a), (b * b));
double tmp;
if (b <= 0.0135) {
tmp = (t_0 - (b * b)) / ((a * 2.0) * (b + sqrt(t_0)));
} else {
tmp = fma(a, fma(a, fma(-2.0, ((c * (c * c)) / pow(b, 5.0)), ((-5.0 * (a * pow(c, 4.0))) / pow(b, 7.0))), ((c * c) / (b * (b * -b)))), (c / -b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(c * a), Float64(b * b)) tmp = 0.0 if (b <= 0.0135) tmp = Float64(Float64(t_0 - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(t_0)))); else tmp = fma(a, fma(a, fma(-2.0, Float64(Float64(c * Float64(c * c)) / (b ^ 5.0)), Float64(Float64(-5.0 * Float64(a * (c ^ 4.0))) / (b ^ 7.0))), Float64(Float64(c * c) / Float64(b * Float64(b * Float64(-b))))), Float64(c / Float64(-b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.0135], N[(N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(a * N[(a * N[(-2.0 * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] + N[(N[(-5.0 * N[(a * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * (-b)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / (-b)), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.0135:\\
\;\;\;\;\frac{t\_0 - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{t\_0}\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(a, \mathsf{fma}\left(a, \mathsf{fma}\left(-2, \frac{c \cdot \left(c \cdot c\right)}{{b}^{5}}, \frac{-5 \cdot \left(a \cdot {c}^{4}\right)}{{b}^{7}}\right), \frac{c \cdot c}{b \cdot \left(b \cdot \left(-b\right)\right)}\right), \frac{c}{-b}\right)\\
\end{array}
\end{array}
if b < 0.0134999999999999998Initial program 89.5%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6489.6
Applied egg-rr89.6%
flip-+N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr90.6%
if 0.0134999999999999998 < b Initial program 50.9%
Taylor expanded in a around 0
Simplified93.0%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
accelerator-lowering-fma.f64N/A
Simplified93.0%
Final simplification92.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* c a) (* b b))) (t_1 (* b (* b b))))
(if (<= b 0.0125)
(/ (- t_0 (* b b)) (* (* a 2.0) (+ b (sqrt t_0))))
(fma
a
(fma
(fma
-2.0
(/ (* c (* c c)) (* (* b b) t_1))
(/ (* -5.0 (* (* c c) (* a (* c c)))) (* t_1 (* b t_1))))
a
(* c (/ c (* b (* b (- b))))))
(/ c (- b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (c * a), (b * b));
double t_1 = b * (b * b);
double tmp;
if (b <= 0.0125) {
tmp = (t_0 - (b * b)) / ((a * 2.0) * (b + sqrt(t_0)));
} else {
tmp = fma(a, fma(fma(-2.0, ((c * (c * c)) / ((b * b) * t_1)), ((-5.0 * ((c * c) * (a * (c * c)))) / (t_1 * (b * t_1)))), a, (c * (c / (b * (b * -b))))), (c / -b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(c * a), Float64(b * b)) t_1 = Float64(b * Float64(b * b)) tmp = 0.0 if (b <= 0.0125) tmp = Float64(Float64(t_0 - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(t_0)))); else tmp = fma(a, fma(fma(-2.0, Float64(Float64(c * Float64(c * c)) / Float64(Float64(b * b) * t_1)), Float64(Float64(-5.0 * Float64(Float64(c * c) * Float64(a * Float64(c * c)))) / Float64(t_1 * Float64(b * t_1)))), a, Float64(c * Float64(c / Float64(b * Float64(b * Float64(-b)))))), Float64(c / Float64(-b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.0125], N[(N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(a * N[(N[(-2.0 * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(N[(b * b), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(-5.0 * N[(N[(c * c), $MachinePrecision] * N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$1 * N[(b * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a + N[(c * N[(c / N[(b * N[(b * (-b)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / (-b)), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
t_1 := b \cdot \left(b \cdot b\right)\\
\mathbf{if}\;b \leq 0.0125:\\
\;\;\;\;\frac{t\_0 - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{t\_0}\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(a, \mathsf{fma}\left(\mathsf{fma}\left(-2, \frac{c \cdot \left(c \cdot c\right)}{\left(b \cdot b\right) \cdot t\_1}, \frac{-5 \cdot \left(\left(c \cdot c\right) \cdot \left(a \cdot \left(c \cdot c\right)\right)\right)}{t\_1 \cdot \left(b \cdot t\_1\right)}\right), a, c \cdot \frac{c}{b \cdot \left(b \cdot \left(-b\right)\right)}\right), \frac{c}{-b}\right)\\
\end{array}
\end{array}
if b < 0.012500000000000001Initial program 89.5%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6489.6
Applied egg-rr89.6%
flip-+N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr90.6%
if 0.012500000000000001 < b Initial program 50.9%
Taylor expanded in a around 0
Simplified93.0%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
accelerator-lowering-fma.f64N/A
Simplified93.0%
Applied egg-rr93.0%
Final simplification92.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* c a) (* b b))))
(if (<= (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)) -0.08)
(/ (* (/ 0.5 a) (- t_0 (* b b))) (+ b (sqrt t_0)))
(fma
a
(/ (- (/ (* -2.0 (* a (* c (* c c)))) (* b b)) (* c c)) (* b (* b b)))
(/ c (- b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (c * a), (b * b));
double tmp;
if (((sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0)) <= -0.08) {
tmp = ((0.5 / a) * (t_0 - (b * b))) / (b + sqrt(t_0));
} else {
tmp = fma(a, ((((-2.0 * (a * (c * (c * c)))) / (b * b)) - (c * c)) / (b * (b * b))), (c / -b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(c * a), Float64(b * b)) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) <= -0.08) tmp = Float64(Float64(Float64(0.5 / a) * Float64(t_0 - Float64(b * b))) / Float64(b + sqrt(t_0))); else tmp = fma(a, Float64(Float64(Float64(Float64(-2.0 * Float64(a * Float64(c * Float64(c * c)))) / Float64(b * b)) - Float64(c * c)) / Float64(b * Float64(b * b))), Float64(c / Float64(-b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision], -0.08], N[(N[(N[(0.5 / a), $MachinePrecision] * N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(a * N[(N[(N[(N[(-2.0 * N[(a * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / (-b)), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
\mathbf{if}\;\frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2} \leq -0.08:\\
\;\;\;\;\frac{\frac{0.5}{a} \cdot \left(t\_0 - b \cdot b\right)}{b + \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(a, \frac{\frac{-2 \cdot \left(a \cdot \left(c \cdot \left(c \cdot c\right)\right)\right)}{b \cdot b} - c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{-b}\right)\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -0.0800000000000000017Initial program 83.7%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6483.7
Applied egg-rr83.7%
div-invN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr85.2%
if -0.0800000000000000017 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 46.4%
Taylor expanded in a around 0
Simplified94.7%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
accelerator-lowering-fma.f64N/A
Simplified94.7%
Taylor expanded in b around inf
/-lowering-/.f64N/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
Simplified92.8%
Final simplification91.2%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* c a) (* b b))))
(if (<= (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)) -0.032)
(/ (* (/ 0.5 a) (- t_0 (* b b))) (+ b (sqrt t_0)))
(- (fma a (/ (* c c) (* b (* b b))) (/ c b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (c * a), (b * b));
double tmp;
if (((sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0)) <= -0.032) {
tmp = ((0.5 / a) * (t_0 - (b * b))) / (b + sqrt(t_0));
} else {
tmp = -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(c * a), Float64(b * b)) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) <= -0.032) tmp = Float64(Float64(Float64(0.5 / a) * Float64(t_0 - Float64(b * b))) / Float64(b + sqrt(t_0))); else tmp = Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision], -0.032], N[(N[(N[(0.5 / a), $MachinePrecision] * N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
\mathbf{if}\;\frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2} \leq -0.032:\\
\;\;\;\;\frac{\frac{0.5}{a} \cdot \left(t\_0 - b \cdot b\right)}{b + \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -0.032000000000000001Initial program 83.1%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6483.1
Applied egg-rr83.1%
div-invN/A
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr84.7%
if -0.032000000000000001 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 46.0%
Taylor expanded in a around 0
Simplified94.9%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
distribute-neg-outN/A
neg-lowering-neg.f64N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f6489.0
Simplified89.0%
Final simplification88.1%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma -4.0 (* c a) (* b b))))
(if (<= (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)) -0.032)
(/ (- t_0 (* b b)) (* (* a 2.0) (+ b (sqrt t_0))))
(- (fma a (/ (* c c) (* b (* b b))) (/ c b))))))
double code(double a, double b, double c) {
double t_0 = fma(-4.0, (c * a), (b * b));
double tmp;
if (((sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0)) <= -0.032) {
tmp = (t_0 - (b * b)) / ((a * 2.0) * (b + sqrt(t_0)));
} else {
tmp = -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
return tmp;
}
function code(a, b, c) t_0 = fma(-4.0, Float64(c * a), Float64(b * b)) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) <= -0.032) tmp = Float64(Float64(t_0 - Float64(b * b)) / Float64(Float64(a * 2.0) * Float64(b + sqrt(t_0)))); else tmp = Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(-4.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision], -0.032], N[(N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[(a * 2.0), $MachinePrecision] * N[(b + N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(-4, c \cdot a, b \cdot b\right)\\
\mathbf{if}\;\frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2} \leq -0.032:\\
\;\;\;\;\frac{t\_0 - b \cdot b}{\left(a \cdot 2\right) \cdot \left(b + \sqrt{t\_0}\right)}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -0.032000000000000001Initial program 83.1%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6483.1
Applied egg-rr83.1%
flip-+N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr84.7%
if -0.032000000000000001 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 46.0%
Taylor expanded in a around 0
Simplified94.9%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
distribute-neg-outN/A
neg-lowering-neg.f64N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f6489.0
Simplified89.0%
Final simplification88.1%
(FPCore (a b c) :precision binary64 (if (<= (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)) -0.032) (/ 1.0 (/ (* a 2.0) (- (sqrt (fma a (* -4.0 c) (* b b))) b))) (- (fma a (/ (* c c) (* b (* b b))) (/ c b)))))
double code(double a, double b, double c) {
double tmp;
if (((sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0)) <= -0.032) {
tmp = 1.0 / ((a * 2.0) / (sqrt(fma(a, (-4.0 * c), (b * b))) - b));
} else {
tmp = -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) <= -0.032) tmp = Float64(1.0 / Float64(Float64(a * 2.0) / Float64(sqrt(fma(a, Float64(-4.0 * c), Float64(b * b))) - b))); else tmp = Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))); end return tmp end
code[a_, b_, c_] := If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision], -0.032], N[(1.0 / N[(N[(a * 2.0), $MachinePrecision] / N[(N[Sqrt[N[(a * N[(-4.0 * c), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2} \leq -0.032:\\
\;\;\;\;\frac{1}{\frac{a \cdot 2}{\sqrt{\mathsf{fma}\left(a, -4 \cdot c, b \cdot b\right)} - b}}\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -0.032000000000000001Initial program 83.1%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-inN/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f6483.1
Applied egg-rr83.1%
flip-+N/A
sqr-negN/A
rem-square-sqrtN/A
div-subN/A
--lowering--.f64N/A
Applied egg-rr84.1%
clear-numN/A
/-lowering-/.f64N/A
sub-divN/A
sqr-negN/A
rem-square-sqrtN/A
flip-+N/A
Applied egg-rr83.2%
if -0.032000000000000001 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 46.0%
Taylor expanded in a around 0
Simplified94.9%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
distribute-neg-outN/A
neg-lowering-neg.f64N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f6489.0
Simplified89.0%
Final simplification87.8%
(FPCore (a b c) :precision binary64 (if (<= (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)) -0.032) (* (/ -0.5 a) (- b (sqrt (fma b b (* c (* -4.0 a)))))) (- (fma a (/ (* c c) (* b (* b b))) (/ c b)))))
double code(double a, double b, double c) {
double tmp;
if (((sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0)) <= -0.032) {
tmp = (-0.5 / a) * (b - sqrt(fma(b, b, (c * (-4.0 * a)))));
} else {
tmp = -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) <= -0.032) tmp = Float64(Float64(-0.5 / a) * Float64(b - sqrt(fma(b, b, Float64(c * Float64(-4.0 * a)))))); else tmp = Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))); end return tmp end
code[a_, b_, c_] := If[LessEqual[N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision], -0.032], N[(N[(-0.5 / a), $MachinePrecision] * N[(b - N[Sqrt[N[(b * b + N[(c * N[(-4.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2} \leq -0.032:\\
\;\;\;\;\frac{-0.5}{a} \cdot \left(b - \sqrt{\mathsf{fma}\left(b, b, c \cdot \left(-4 \cdot a\right)\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) < -0.032000000000000001Initial program 83.1%
Applied egg-rr83.2%
if -0.032000000000000001 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 #s(literal 4 binary64) a) c)))) (*.f64 #s(literal 2 binary64) a)) Initial program 46.0%
Taylor expanded in a around 0
Simplified94.9%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
distribute-neg-outN/A
neg-lowering-neg.f64N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f6489.0
Simplified89.0%
Final simplification87.8%
(FPCore (a b c) :precision binary64 (- (fma a (/ (* c c) (* b (* b b))) (/ c b))))
double code(double a, double b, double c) {
return -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
function code(a, b, c) return Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))) end
code[a_, b_, c_] := (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)
\end{array}
Initial program 54.3%
Taylor expanded in a around 0
Simplified90.6%
Taylor expanded in a around 0
sub-negN/A
mul-1-negN/A
distribute-neg-outN/A
neg-lowering-neg.f64N/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f6482.0
Simplified82.0%
(FPCore (a b c) :precision binary64 (/ (fma (* c c) (/ a (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma((c * c), (a / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 54.3%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
neg-lowering-neg.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6482.0
Simplified82.0%
Final simplification82.0%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 54.3%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
neg-lowering-neg.f6466.2
Simplified66.2%
(FPCore (a b c) :precision binary64 (/ c b))
double code(double a, double b, double c) {
return c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / b
end function
public static double code(double a, double b, double c) {
return c / b;
}
def code(a, b, c): return c / b
function code(a, b, c) return Float64(c / b) end
function tmp = code(a, b, c) tmp = c / b; end
code[a_, b_, c_] := N[(c / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{b}
\end{array}
Initial program 54.3%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
neg-lowering-neg.f6466.2
Simplified66.2%
clear-numN/A
associate-/r/N/A
*-lowering-*.f64N/A
metadata-evalN/A
frac-2negN/A
/-lowering-/.f6466.1
Applied egg-rr66.1%
*-commutativeN/A
clear-numN/A
un-div-invN/A
clear-numN/A
frac-2negN/A
metadata-evalN/A
inv-powN/A
pow-flipN/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow2N/A
sqr-negN/A
pow2N/A
pow-powN/A
metadata-evalN/A
unpow1N/A
/-lowering-/.f641.6
Applied egg-rr1.6%
herbie shell --seed 2024204
(FPCore (a b c)
:name "Quadratic roots, narrow range"
:precision binary64
:pre (and (and (and (< 1.0536712127723509e-8 a) (< a 94906265.62425156)) (and (< 1.0536712127723509e-8 b) (< b 94906265.62425156))) (and (< 1.0536712127723509e-8 c) (< c 94906265.62425156)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))