
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (/ c (pow b 4.0))))
(/
(pow a -1.0)
(*
(-
(-
(pow (* b b) -1.0)
(/
(fma
(* (* a c) c)
a
(fma
(* (/ (* (pow a 4.0) (pow c 4.0)) (* a a)) (/ 20.0 (* c c)))
-0.25
(* (* 2.0 (* a a)) (* c c))))
(pow b 6.0)))
(fma (* a t_0) -2.0 (fma a t_0 (/ (pow a -1.0) c))))
b))))
double code(double a, double b, double c) {
double t_0 = c / pow(b, 4.0);
return pow(a, -1.0) / (((pow((b * b), -1.0) - (fma(((a * c) * c), a, fma((((pow(a, 4.0) * pow(c, 4.0)) / (a * a)) * (20.0 / (c * c))), -0.25, ((2.0 * (a * a)) * (c * c)))) / pow(b, 6.0))) - fma((a * t_0), -2.0, fma(a, t_0, (pow(a, -1.0) / c)))) * b);
}
function code(a, b, c) t_0 = Float64(c / (b ^ 4.0)) return Float64((a ^ -1.0) / Float64(Float64(Float64((Float64(b * b) ^ -1.0) - Float64(fma(Float64(Float64(a * c) * c), a, fma(Float64(Float64(Float64((a ^ 4.0) * (c ^ 4.0)) / Float64(a * a)) * Float64(20.0 / Float64(c * c))), -0.25, Float64(Float64(2.0 * Float64(a * a)) * Float64(c * c)))) / (b ^ 6.0))) - fma(Float64(a * t_0), -2.0, fma(a, t_0, Float64((a ^ -1.0) / c)))) * b)) end
code[a_, b_, c_] := Block[{t$95$0 = N[(c / N[Power[b, 4.0], $MachinePrecision]), $MachinePrecision]}, N[(N[Power[a, -1.0], $MachinePrecision] / N[(N[(N[(N[Power[N[(b * b), $MachinePrecision], -1.0], $MachinePrecision] - N[(N[(N[(N[(a * c), $MachinePrecision] * c), $MachinePrecision] * a + N[(N[(N[(N[(N[Power[a, 4.0], $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision] / N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(20.0 / N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.25 + N[(N[(2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a * t$95$0), $MachinePrecision] * -2.0 + N[(a * t$95$0 + N[(N[Power[a, -1.0], $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{c}{{b}^{4}}\\
\frac{{a}^{-1}}{\left(\left({\left(b \cdot b\right)}^{-1} - \frac{\mathsf{fma}\left(\left(a \cdot c\right) \cdot c, a, \mathsf{fma}\left(\frac{{a}^{4} \cdot {c}^{4}}{a \cdot a} \cdot \frac{20}{c \cdot c}, -0.25, \left(2 \cdot \left(a \cdot a\right)\right) \cdot \left(c \cdot c\right)\right)\right)}{{b}^{6}}\right) - \mathsf{fma}\left(a \cdot t\_0, -2, \mathsf{fma}\left(a, t\_0, \frac{{a}^{-1}}{c}\right)\right)\right) \cdot b}
\end{array}
\end{array}
Initial program 55.7%
Applied rewrites55.7%
Taylor expanded in b around inf
Applied rewrites92.1%
Final simplification92.1%
(FPCore (a b c)
:precision binary64
(/
(pow a -1.0)
(/
(fma
(fma
(fma (/ (* (* a a) -2.0) (pow b 5.0)) (- c) (/ a (pow b 3.0)))
c
(pow b -1.0))
c
(/ (- b) a))
c)))
double code(double a, double b, double c) {
return pow(a, -1.0) / (fma(fma(fma((((a * a) * -2.0) / pow(b, 5.0)), -c, (a / pow(b, 3.0))), c, pow(b, -1.0)), c, (-b / a)) / c);
}
function code(a, b, c) return Float64((a ^ -1.0) / Float64(fma(fma(fma(Float64(Float64(Float64(a * a) * -2.0) / (b ^ 5.0)), Float64(-c), Float64(a / (b ^ 3.0))), c, (b ^ -1.0)), c, Float64(Float64(-b) / a)) / c)) end
code[a_, b_, c_] := N[(N[Power[a, -1.0], $MachinePrecision] / N[(N[(N[(N[(N[(N[(N[(a * a), $MachinePrecision] * -2.0), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * (-c) + N[(a / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * c + N[Power[b, -1.0], $MachinePrecision]), $MachinePrecision] * c + N[((-b) / a), $MachinePrecision]), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{a}^{-1}}{\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(a \cdot a\right) \cdot -2}{{b}^{5}}, -c, \frac{a}{{b}^{3}}\right), c, {b}^{-1}\right), c, \frac{-b}{a}\right)}{c}}
\end{array}
Initial program 55.7%
Applied rewrites55.7%
Taylor expanded in c around 0
Applied rewrites92.0%
Taylor expanded in a around 0
Applied rewrites92.0%
Final simplification92.0%
(FPCore (a b c)
:precision binary64
(fma
(/
(fma
(* -5.0 (* a a))
(pow c 4.0)
(* (fma (* -2.0 a) (pow c 3.0) (* (* (- c) c) (* b b))) (* b b)))
(pow b 7.0))
a
(/ (- c) b)))
double code(double a, double b, double c) {
return fma((fma((-5.0 * (a * a)), pow(c, 4.0), (fma((-2.0 * a), pow(c, 3.0), ((-c * c) * (b * b))) * (b * b))) / pow(b, 7.0)), a, (-c / b));
}
function code(a, b, c) return fma(Float64(fma(Float64(-5.0 * Float64(a * a)), (c ^ 4.0), Float64(fma(Float64(-2.0 * a), (c ^ 3.0), Float64(Float64(Float64(-c) * c) * Float64(b * b))) * Float64(b * b))) / (b ^ 7.0)), a, Float64(Float64(-c) / b)) end
code[a_, b_, c_] := N[(N[(N[(N[(-5.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision] + N[(N[(N[(-2.0 * a), $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision] + N[(N[((-c) * c), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\mathsf{fma}\left(-5 \cdot \left(a \cdot a\right), {c}^{4}, \mathsf{fma}\left(-2 \cdot a, {c}^{3}, \left(\left(-c\right) \cdot c\right) \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot b\right)\right)}{{b}^{7}}, a, \frac{-c}{b}\right)
\end{array}
Initial program 55.7%
Taylor expanded in a around 0
Applied rewrites91.9%
Taylor expanded in b around 0
Applied rewrites91.9%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 0.0546)
(/ (* (fma b b (- t_0)) (/ 0.5 a)) (- (- b) (sqrt t_0)))
(pow
(fma
a
(fma -2.0 (* a (* (/ c (pow b 3.0)) -0.5)) (pow b -1.0))
(/ (- b) c))
-1.0))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 0.0546) {
tmp = (fma(b, b, -t_0) * (0.5 / a)) / (-b - sqrt(t_0));
} else {
tmp = pow(fma(a, fma(-2.0, (a * ((c / pow(b, 3.0)) * -0.5)), pow(b, -1.0)), (-b / c)), -1.0);
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 0.0546) tmp = Float64(Float64(fma(b, b, Float64(-t_0)) * Float64(0.5 / a)) / Float64(Float64(-b) - sqrt(t_0))); else tmp = fma(a, fma(-2.0, Float64(a * Float64(Float64(c / (b ^ 3.0)) * -0.5)), (b ^ -1.0)), Float64(Float64(-b) / c)) ^ -1.0; end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.0546], N[(N[(N[(b * b + (-t$95$0)), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Power[N[(a * N[(-2.0 * N[(a * N[(N[(c / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision] + N[Power[b, -1.0], $MachinePrecision]), $MachinePrecision] + N[((-b) / c), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.0546:\\
\;\;\;\;\frac{\mathsf{fma}\left(b, b, -t\_0\right) \cdot \frac{0.5}{a}}{\left(-b\right) - \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;{\left(\mathsf{fma}\left(a, \mathsf{fma}\left(-2, a \cdot \left(\frac{c}{{b}^{3}} \cdot -0.5\right), {b}^{-1}\right), \frac{-b}{c}\right)\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.0546000000000000027Initial program 84.7%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites84.5%
lift-/.f64N/A
div-invN/A
lift-+.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites86.8%
if 0.0546000000000000027 < b Initial program 52.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6452.6
Applied rewrites52.6%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6452.5
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6452.5
Applied rewrites52.5%
Taylor expanded in a around 0
+-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
distribute-rgt-outN/A
metadata-evalN/A
lower-*.f64N/A
lower-/.f64N/A
lower-pow.f64N/A
lower-/.f64N/A
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6491.7
Applied rewrites91.7%
Final simplification91.3%
(FPCore (a b c)
:precision binary64
(/
(pow a -1.0)
(*
(/
(-
(* (* b b) (+ (* (* b b) (fma -1.0 (/ (/ (* b b) a) c) 1.0)) (* a c)))
(fma (* c c) (* (* a a) -3.0) (* (* a a) (* c c))))
(pow b 6.0))
b)))
double code(double a, double b, double c) {
return pow(a, -1.0) / (((((b * b) * (((b * b) * fma(-1.0, (((b * b) / a) / c), 1.0)) + (a * c))) - fma((c * c), ((a * a) * -3.0), ((a * a) * (c * c)))) / pow(b, 6.0)) * b);
}
function code(a, b, c) return Float64((a ^ -1.0) / Float64(Float64(Float64(Float64(Float64(b * b) * Float64(Float64(Float64(b * b) * fma(-1.0, Float64(Float64(Float64(b * b) / a) / c), 1.0)) + Float64(a * c))) - fma(Float64(c * c), Float64(Float64(a * a) * -3.0), Float64(Float64(a * a) * Float64(c * c)))) / (b ^ 6.0)) * b)) end
code[a_, b_, c_] := N[(N[Power[a, -1.0], $MachinePrecision] / N[(N[(N[(N[(N[(b * b), $MachinePrecision] * N[(N[(N[(b * b), $MachinePrecision] * N[(-1.0 * N[(N[(N[(b * b), $MachinePrecision] / a), $MachinePrecision] / c), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + N[(a * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] * N[(N[(a * a), $MachinePrecision] * -3.0), $MachinePrecision] + N[(N[(a * a), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 6.0], $MachinePrecision]), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{a}^{-1}}{\frac{\left(b \cdot b\right) \cdot \left(\left(b \cdot b\right) \cdot \mathsf{fma}\left(-1, \frac{\frac{b \cdot b}{a}}{c}, 1\right) + a \cdot c\right) - \mathsf{fma}\left(c \cdot c, \left(a \cdot a\right) \cdot -3, \left(a \cdot a\right) \cdot \left(c \cdot c\right)\right)}{{b}^{6}} \cdot b}
\end{array}
Initial program 55.7%
Applied rewrites55.7%
Taylor expanded in b around inf
Applied rewrites92.1%
Taylor expanded in b around 0
Applied rewrites91.5%
Final simplification91.5%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 0.0546)
(/ (* (fma b b (- t_0)) (/ 0.5 a)) (- (- b) (sqrt t_0)))
(pow
(/
(fma c (fma (* -2.0 c) (* (/ (* a a) (pow b 3.0)) -0.5) (/ a b)) (- b))
c)
-1.0))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 0.0546) {
tmp = (fma(b, b, -t_0) * (0.5 / a)) / (-b - sqrt(t_0));
} else {
tmp = pow((fma(c, fma((-2.0 * c), (((a * a) / pow(b, 3.0)) * -0.5), (a / b)), -b) / c), -1.0);
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 0.0546) tmp = Float64(Float64(fma(b, b, Float64(-t_0)) * Float64(0.5 / a)) / Float64(Float64(-b) - sqrt(t_0))); else tmp = Float64(fma(c, fma(Float64(-2.0 * c), Float64(Float64(Float64(a * a) / (b ^ 3.0)) * -0.5), Float64(a / b)), Float64(-b)) / c) ^ -1.0; end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.0546], N[(N[(N[(b * b + (-t$95$0)), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(c * N[(N[(-2.0 * c), $MachinePrecision] * N[(N[(N[(a * a), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision] + N[(a / b), $MachinePrecision]), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.0546:\\
\;\;\;\;\frac{\mathsf{fma}\left(b, b, -t\_0\right) \cdot \frac{0.5}{a}}{\left(-b\right) - \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{\mathsf{fma}\left(c, \mathsf{fma}\left(-2 \cdot c, \frac{a \cdot a}{{b}^{3}} \cdot -0.5, \frac{a}{b}\right), -b\right)}{c}\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.0546000000000000027Initial program 84.7%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites84.5%
lift-/.f64N/A
div-invN/A
lift-+.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites86.8%
if 0.0546000000000000027 < b Initial program 52.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6452.6
Applied rewrites52.6%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6452.5
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6452.5
Applied rewrites52.5%
Taylor expanded in c around 0
lower-/.f64N/A
Applied rewrites91.8%
Final simplification91.3%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 0.0546)
(/ (* (fma b b (- t_0)) (/ 0.5 a)) (- (- b) (sqrt t_0)))
(pow
(* (* (pow b -3.0) (fma (* (- 1.0 (/ (/ (* b b) a) c)) b) b (* c a))) a)
-1.0))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 0.0546) {
tmp = (fma(b, b, -t_0) * (0.5 / a)) / (-b - sqrt(t_0));
} else {
tmp = pow(((pow(b, -3.0) * fma(((1.0 - (((b * b) / a) / c)) * b), b, (c * a))) * a), -1.0);
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 0.0546) tmp = Float64(Float64(fma(b, b, Float64(-t_0)) * Float64(0.5 / a)) / Float64(Float64(-b) - sqrt(t_0))); else tmp = Float64(Float64((b ^ -3.0) * fma(Float64(Float64(1.0 - Float64(Float64(Float64(b * b) / a) / c)) * b), b, Float64(c * a))) * a) ^ -1.0; end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.0546], N[(N[(N[(b * b + (-t$95$0)), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(N[Power[b, -3.0], $MachinePrecision] * N[(N[(N[(1.0 - N[(N[(N[(b * b), $MachinePrecision] / a), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision] * b), $MachinePrecision] * b + N[(c * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.0546:\\
\;\;\;\;\frac{\mathsf{fma}\left(b, b, -t\_0\right) \cdot \frac{0.5}{a}}{\left(-b\right) - \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;{\left(\left({b}^{-3} \cdot \mathsf{fma}\left(\left(1 - \frac{\frac{b \cdot b}{a}}{c}\right) \cdot b, b, c \cdot a\right)\right) \cdot a\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.0546000000000000027Initial program 84.7%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites84.5%
lift-/.f64N/A
div-invN/A
lift-+.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites86.8%
if 0.0546000000000000027 < b Initial program 52.7%
Applied rewrites52.7%
Taylor expanded in a around 0
lower-/.f64N/A
Applied rewrites91.6%
Taylor expanded in b around 0
Applied rewrites91.4%
lift-/.f64N/A
lift-pow.f64N/A
unpow-1N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites91.4%
Final simplification90.9%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 0.3)
(/ (* (fma b b (- t_0)) (/ 0.5 a)) (- (- b) (sqrt t_0)))
(*
(fma
(/ (fma (* -2.0 (* a a)) c (* (- a) (* b b))) (pow b 5.0))
c
(/ -1.0 b))
c))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 0.3) {
tmp = (fma(b, b, -t_0) * (0.5 / a)) / (-b - sqrt(t_0));
} else {
tmp = fma((fma((-2.0 * (a * a)), c, (-a * (b * b))) / pow(b, 5.0)), c, (-1.0 / b)) * c;
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 0.3) tmp = Float64(Float64(fma(b, b, Float64(-t_0)) * Float64(0.5 / a)) / Float64(Float64(-b) - sqrt(t_0))); else tmp = Float64(fma(Float64(fma(Float64(-2.0 * Float64(a * a)), c, Float64(Float64(-a) * Float64(b * b))) / (b ^ 5.0)), c, Float64(-1.0 / b)) * c); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.3], N[(N[(N[(b * b + (-t$95$0)), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * c + N[((-a) * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * c + N[(-1.0 / b), $MachinePrecision]), $MachinePrecision] * c), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.3:\\
\;\;\;\;\frac{\mathsf{fma}\left(b, b, -t\_0\right) \cdot \frac{0.5}{a}}{\left(-b\right) - \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{\mathsf{fma}\left(-2 \cdot \left(a \cdot a\right), c, \left(-a\right) \cdot \left(b \cdot b\right)\right)}{{b}^{5}}, c, \frac{-1}{b}\right) \cdot c\\
\end{array}
\end{array}
if b < 0.299999999999999989Initial program 83.6%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites83.2%
lift-/.f64N/A
div-invN/A
lift-+.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites84.9%
if 0.299999999999999989 < b Initial program 51.6%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites91.8%
Taylor expanded in b around 0
Applied rewrites91.8%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 0.3)
(/ (* (fma b b (- t_0)) (/ 0.5 a)) (- (- b) (sqrt t_0)))
(*
(/
(fma (- (fma b b (* a c))) (* b b) (* (* -2.0 (* a a)) (* c c)))
(pow b 5.0))
c))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 0.3) {
tmp = (fma(b, b, -t_0) * (0.5 / a)) / (-b - sqrt(t_0));
} else {
tmp = (fma(-fma(b, b, (a * c)), (b * b), ((-2.0 * (a * a)) * (c * c))) / pow(b, 5.0)) * c;
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 0.3) tmp = Float64(Float64(fma(b, b, Float64(-t_0)) * Float64(0.5 / a)) / Float64(Float64(-b) - sqrt(t_0))); else tmp = Float64(Float64(fma(Float64(-fma(b, b, Float64(a * c))), Float64(b * b), Float64(Float64(-2.0 * Float64(a * a)) * Float64(c * c))) / (b ^ 5.0)) * c); end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 0.3], N[(N[(N[(b * b + (-t$95$0)), $MachinePrecision] * N[(0.5 / a), $MachinePrecision]), $MachinePrecision] / N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[((-N[(b * b + N[(a * c), $MachinePrecision]), $MachinePrecision]) * N[(b * b), $MachinePrecision] + N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * c), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 0.3:\\
\;\;\;\;\frac{\mathsf{fma}\left(b, b, -t\_0\right) \cdot \frac{0.5}{a}}{\left(-b\right) - \sqrt{t\_0}}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(-\mathsf{fma}\left(b, b, a \cdot c\right), b \cdot b, \left(-2 \cdot \left(a \cdot a\right)\right) \cdot \left(c \cdot c\right)\right)}{{b}^{5}} \cdot c\\
\end{array}
\end{array}
if b < 0.299999999999999989Initial program 83.6%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites83.2%
lift-/.f64N/A
div-invN/A
lift-+.f64N/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites84.9%
if 0.299999999999999989 < b Initial program 51.6%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites91.8%
Taylor expanded in b around 0
Applied rewrites91.5%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* c -4.0) a (* b b))))
(if (<= b 135.0)
(/ (/ (- t_0 (* b b)) (+ (sqrt t_0) b)) (* 2.0 a))
(pow (/ (fma a (/ c b) (- b)) c) -1.0))))
double code(double a, double b, double c) {
double t_0 = fma((c * -4.0), a, (b * b));
double tmp;
if (b <= 135.0) {
tmp = ((t_0 - (b * b)) / (sqrt(t_0) + b)) / (2.0 * a);
} else {
tmp = pow((fma(a, (c / b), -b) / c), -1.0);
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(c * -4.0), a, Float64(b * b)) tmp = 0.0 if (b <= 135.0) tmp = Float64(Float64(Float64(t_0 - Float64(b * b)) / Float64(sqrt(t_0) + b)) / Float64(2.0 * a)); else tmp = Float64(fma(a, Float64(c / b), Float64(-b)) / c) ^ -1.0; end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(c * -4.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 135.0], N[(N[(N[(t$95$0 - N[(b * b), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[t$95$0], $MachinePrecision] + b), $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(a * N[(c / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(c \cdot -4, a, b \cdot b\right)\\
\mathbf{if}\;b \leq 135:\\
\;\;\;\;\frac{\frac{t\_0 - b \cdot b}{\sqrt{t\_0} + b}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}\right)}^{-1}\\
\end{array}
\end{array}
if b < 135Initial program 77.6%
lift-sqrt.f64N/A
lift--.f64N/A
flip--N/A
clear-numN/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-sqrt.f64N/A
clear-numN/A
Applied rewrites77.2%
lift-+.f64N/A
+-commutativeN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites79.2%
if 135 < b Initial program 46.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6446.5
Applied rewrites46.5%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6446.4
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6446.4
Applied rewrites46.4%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6489.3
Applied rewrites89.3%
Final simplification86.3%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (fma (* a c) -4.0 (* b b))))
(if (<= b 135.0)
(/ (- (* b b) t_0) (* (* 2.0 a) (- (- b) (sqrt t_0))))
(pow (/ (fma a (/ c b) (- b)) c) -1.0))))
double code(double a, double b, double c) {
double t_0 = fma((a * c), -4.0, (b * b));
double tmp;
if (b <= 135.0) {
tmp = ((b * b) - t_0) / ((2.0 * a) * (-b - sqrt(t_0)));
} else {
tmp = pow((fma(a, (c / b), -b) / c), -1.0);
}
return tmp;
}
function code(a, b, c) t_0 = fma(Float64(a * c), -4.0, Float64(b * b)) tmp = 0.0 if (b <= 135.0) tmp = Float64(Float64(Float64(b * b) - t_0) / Float64(Float64(2.0 * a) * Float64(Float64(-b) - sqrt(t_0)))); else tmp = Float64(fma(a, Float64(c / b), Float64(-b)) / c) ^ -1.0; end return tmp end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(a * c), $MachinePrecision] * -4.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[b, 135.0], N[(N[(N[(b * b), $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[(2.0 * a), $MachinePrecision] * N[((-b) - N[Sqrt[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(a * N[(c / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(a \cdot c, -4, b \cdot b\right)\\
\mathbf{if}\;b \leq 135:\\
\;\;\;\;\frac{b \cdot b - t\_0}{\left(2 \cdot a\right) \cdot \left(\left(-b\right) - \sqrt{t\_0}\right)}\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}\right)}^{-1}\\
\end{array}
\end{array}
if b < 135Initial program 77.6%
Applied rewrites77.7%
Applied rewrites79.2%
if 135 < b Initial program 46.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6446.5
Applied rewrites46.5%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6446.4
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6446.4
Applied rewrites46.4%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6489.3
Applied rewrites89.3%
Final simplification86.3%
(FPCore (a b c) :precision binary64 (if (<= b 0.31) (/ (+ (- b) (sqrt (fma b b (* (* -4.0 c) a)))) (* 2.0 a)) (pow (/ (fma a (/ c b) (- b)) c) -1.0)))
double code(double a, double b, double c) {
double tmp;
if (b <= 0.31) {
tmp = (-b + sqrt(fma(b, b, ((-4.0 * c) * a)))) / (2.0 * a);
} else {
tmp = pow((fma(a, (c / b), -b) / c), -1.0);
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 0.31) tmp = Float64(Float64(Float64(-b) + sqrt(fma(b, b, Float64(Float64(-4.0 * c) * a)))) / Float64(2.0 * a)); else tmp = Float64(fma(a, Float64(c / b), Float64(-b)) / c) ^ -1.0; end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 0.31], N[(N[((-b) + N[Sqrt[N[(b * b + N[(N[(-4.0 * c), $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(a * N[(c / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.31:\\
\;\;\;\;\frac{\left(-b\right) + \sqrt{\mathsf{fma}\left(b, b, \left(-4 \cdot c\right) \cdot a\right)}}{2 \cdot a}\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.309999999999999998Initial program 83.6%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
distribute-lft-neg-inN/A
*-commutativeN/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f64N/A
metadata-eval83.8
Applied rewrites83.8%
if 0.309999999999999998 < b Initial program 51.6%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6451.4
Applied rewrites51.4%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6451.4
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6451.4
Applied rewrites51.4%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6486.3
Applied rewrites86.3%
Final simplification85.9%
(FPCore (a b c) :precision binary64 (if (<= b 0.31) (* (/ 0.5 a) (- (sqrt (fma (* -4.0 c) a (* b b))) b)) (pow (/ (fma a (/ c b) (- b)) c) -1.0)))
double code(double a, double b, double c) {
double tmp;
if (b <= 0.31) {
tmp = (0.5 / a) * (sqrt(fma((-4.0 * c), a, (b * b))) - b);
} else {
tmp = pow((fma(a, (c / b), -b) / c), -1.0);
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 0.31) tmp = Float64(Float64(0.5 / a) * Float64(sqrt(fma(Float64(-4.0 * c), a, Float64(b * b))) - b)); else tmp = Float64(fma(a, Float64(c / b), Float64(-b)) / c) ^ -1.0; end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 0.31], N[(N[(0.5 / a), $MachinePrecision] * N[(N[Sqrt[N[(N[(-4.0 * c), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision]), $MachinePrecision], N[Power[N[(N[(a * N[(c / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 0.31:\\
\;\;\;\;\frac{0.5}{a} \cdot \left(\sqrt{\mathsf{fma}\left(-4 \cdot c, a, b \cdot b\right)} - b\right)\\
\mathbf{else}:\\
\;\;\;\;{\left(\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}\right)}^{-1}\\
\end{array}
\end{array}
if b < 0.309999999999999998Initial program 83.6%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lower-*.f64N/A
lift-*.f64N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f6483.7
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6483.7
Applied rewrites83.7%
if 0.309999999999999998 < b Initial program 51.6%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6451.4
Applied rewrites51.4%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6451.4
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6451.4
Applied rewrites51.4%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6486.3
Applied rewrites86.3%
Final simplification85.9%
(FPCore (a b c) :precision binary64 (pow (/ (fma a (/ c b) (- b)) c) -1.0))
double code(double a, double b, double c) {
return pow((fma(a, (c / b), -b) / c), -1.0);
}
function code(a, b, c) return Float64(fma(a, Float64(c / b), Float64(-b)) / c) ^ -1.0 end
code[a_, b_, c_] := N[Power[N[(N[(a * N[(c / b), $MachinePrecision] + (-b)), $MachinePrecision] / c), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{\mathsf{fma}\left(a, \frac{c}{b}, -b\right)}{c}\right)}^{-1}
\end{array}
Initial program 55.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6455.6
Applied rewrites55.6%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6455.5
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6455.5
Applied rewrites55.5%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6482.9
Applied rewrites82.9%
Final simplification82.9%
(FPCore (a b c) :precision binary64 (pow (fma -1.0 (/ b c) (/ a b)) -1.0))
double code(double a, double b, double c) {
return pow(fma(-1.0, (b / c), (a / b)), -1.0);
}
function code(a, b, c) return fma(-1.0, Float64(b / c), Float64(a / b)) ^ -1.0 end
code[a_, b_, c_] := N[Power[N[(-1.0 * N[(b / c), $MachinePrecision] + N[(a / b), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(-1, \frac{b}{c}, \frac{a}{b}\right)\right)}^{-1}
\end{array}
Initial program 55.7%
Taylor expanded in c around inf
*-commutativeN/A
lower-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6455.6
Applied rewrites55.6%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6455.5
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6455.5
Applied rewrites55.5%
Taylor expanded in a around 0
lower-fma.f64N/A
lower-/.f64N/A
lower-/.f6482.9
Applied rewrites82.9%
Final simplification82.9%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 55.7%
Taylor expanded in a around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6464.0
Applied rewrites64.0%
herbie shell --seed 2024307
(FPCore (a b c)
:name "Quadratic roots, narrow range"
:precision binary64
:pre (and (and (and (< 1.0536712127723509e-8 a) (< a 94906265.62425156)) (and (< 1.0536712127723509e-8 b) (< b 94906265.62425156))) (and (< 1.0536712127723509e-8 c) (< c 94906265.62425156)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))