
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 5.0) (pow x 5.0)))
double code(double x, double eps) {
return pow((x + eps), 5.0) - pow(x, 5.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 5.0d0) - (x ** 5.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 5.0) - Math.pow(x, 5.0);
}
def code(x, eps): return math.pow((x + eps), 5.0) - math.pow(x, 5.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 5.0) - (x ^ 5.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{5} - {x}^{5}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (pow (+ x eps) 5.0) (pow x 5.0)))
double code(double x, double eps) {
return pow((x + eps), 5.0) - pow(x, 5.0);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((x + eps) ** 5.0d0) - (x ** 5.0d0)
end function
public static double code(double x, double eps) {
return Math.pow((x + eps), 5.0) - Math.pow(x, 5.0);
}
def code(x, eps): return math.pow((x + eps), 5.0) - math.pow(x, 5.0)
function code(x, eps) return Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) end
function tmp = code(x, eps) tmp = ((x + eps) ^ 5.0) - (x ^ 5.0); end
code[x_, eps_] := N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\left(x + \varepsilon\right)}^{5} - {x}^{5}
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (+ x eps) 5.0))
(t_1 (- t_0 (pow x 5.0)))
(t_2 (- t_0 (* (* x x) (* x (* x x))))))
(if (<= t_1 -2e-305)
t_2
(if (<= t_1 0.0)
(* (pow x 4.0) (- (* eps 5.0) (/ (* (* eps eps) -10.0) x)))
t_2))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0);
double t_1 = t_0 - pow(x, 5.0);
double t_2 = t_0 - ((x * x) * (x * (x * x)));
double tmp;
if (t_1 <= -2e-305) {
tmp = t_2;
} else if (t_1 <= 0.0) {
tmp = pow(x, 4.0) * ((eps * 5.0) - (((eps * eps) * -10.0) / x));
} else {
tmp = t_2;
}
return tmp;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: tmp
t_0 = (x + eps) ** 5.0d0
t_1 = t_0 - (x ** 5.0d0)
t_2 = t_0 - ((x * x) * (x * (x * x)))
if (t_1 <= (-2d-305)) then
tmp = t_2
else if (t_1 <= 0.0d0) then
tmp = (x ** 4.0d0) * ((eps * 5.0d0) - (((eps * eps) * (-10.0d0)) / x))
else
tmp = t_2
end if
code = tmp
end function
public static double code(double x, double eps) {
double t_0 = Math.pow((x + eps), 5.0);
double t_1 = t_0 - Math.pow(x, 5.0);
double t_2 = t_0 - ((x * x) * (x * (x * x)));
double tmp;
if (t_1 <= -2e-305) {
tmp = t_2;
} else if (t_1 <= 0.0) {
tmp = Math.pow(x, 4.0) * ((eps * 5.0) - (((eps * eps) * -10.0) / x));
} else {
tmp = t_2;
}
return tmp;
}
def code(x, eps): t_0 = math.pow((x + eps), 5.0) t_1 = t_0 - math.pow(x, 5.0) t_2 = t_0 - ((x * x) * (x * (x * x))) tmp = 0 if t_1 <= -2e-305: tmp = t_2 elif t_1 <= 0.0: tmp = math.pow(x, 4.0) * ((eps * 5.0) - (((eps * eps) * -10.0) / x)) else: tmp = t_2 return tmp
function code(x, eps) t_0 = Float64(x + eps) ^ 5.0 t_1 = Float64(t_0 - (x ^ 5.0)) t_2 = Float64(t_0 - Float64(Float64(x * x) * Float64(x * Float64(x * x)))) tmp = 0.0 if (t_1 <= -2e-305) tmp = t_2; elseif (t_1 <= 0.0) tmp = Float64((x ^ 4.0) * Float64(Float64(eps * 5.0) - Float64(Float64(Float64(eps * eps) * -10.0) / x))); else tmp = t_2; end return tmp end
function tmp_2 = code(x, eps) t_0 = (x + eps) ^ 5.0; t_1 = t_0 - (x ^ 5.0); t_2 = t_0 - ((x * x) * (x * (x * x))); tmp = 0.0; if (t_1 <= -2e-305) tmp = t_2; elseif (t_1 <= 0.0) tmp = (x ^ 4.0) * ((eps * 5.0) - (((eps * eps) * -10.0) / x)); else tmp = t_2; end tmp_2 = tmp; end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$0 - N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -2e-305], t$95$2, If[LessEqual[t$95$1, 0.0], N[(N[Power[x, 4.0], $MachinePrecision] * N[(N[(eps * 5.0), $MachinePrecision] - N[(N[(N[(eps * eps), $MachinePrecision] * -10.0), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$2]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5}\\
t_1 := t\_0 - {x}^{5}\\
t_2 := t\_0 - \left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\mathbf{if}\;t\_1 \leq -2 \cdot 10^{-305}:\\
\;\;\;\;t\_2\\
\mathbf{elif}\;t\_1 \leq 0:\\
\;\;\;\;{x}^{4} \cdot \left(\varepsilon \cdot 5 - \frac{\left(\varepsilon \cdot \varepsilon\right) \cdot -10}{x}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_2\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -1.99999999999999999e-305 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 98.8%
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
*-lowering-*.f64N/A
cube-multN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6498.9
Applied egg-rr98.9%
if -1.99999999999999999e-305 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in x around -inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
associate-+r+N/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
distribute-rgt1-inN/A
metadata-evalN/A
*-commutativeN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
Simplified99.9%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (+ x eps) 5.0))
(t_1 (- t_0 (pow x 5.0)))
(t_2 (- t_0 (* (* x x) (* x (* x x))))))
(if (<= t_1 -2e-305)
t_2
(if (<= t_1 0.0) (* eps (* 5.0 (pow x 4.0))) t_2))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0);
double t_1 = t_0 - pow(x, 5.0);
double t_2 = t_0 - ((x * x) * (x * (x * x)));
double tmp;
if (t_1 <= -2e-305) {
tmp = t_2;
} else if (t_1 <= 0.0) {
tmp = eps * (5.0 * pow(x, 4.0));
} else {
tmp = t_2;
}
return tmp;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: tmp
t_0 = (x + eps) ** 5.0d0
t_1 = t_0 - (x ** 5.0d0)
t_2 = t_0 - ((x * x) * (x * (x * x)))
if (t_1 <= (-2d-305)) then
tmp = t_2
else if (t_1 <= 0.0d0) then
tmp = eps * (5.0d0 * (x ** 4.0d0))
else
tmp = t_2
end if
code = tmp
end function
public static double code(double x, double eps) {
double t_0 = Math.pow((x + eps), 5.0);
double t_1 = t_0 - Math.pow(x, 5.0);
double t_2 = t_0 - ((x * x) * (x * (x * x)));
double tmp;
if (t_1 <= -2e-305) {
tmp = t_2;
} else if (t_1 <= 0.0) {
tmp = eps * (5.0 * Math.pow(x, 4.0));
} else {
tmp = t_2;
}
return tmp;
}
def code(x, eps): t_0 = math.pow((x + eps), 5.0) t_1 = t_0 - math.pow(x, 5.0) t_2 = t_0 - ((x * x) * (x * (x * x))) tmp = 0 if t_1 <= -2e-305: tmp = t_2 elif t_1 <= 0.0: tmp = eps * (5.0 * math.pow(x, 4.0)) else: tmp = t_2 return tmp
function code(x, eps) t_0 = Float64(x + eps) ^ 5.0 t_1 = Float64(t_0 - (x ^ 5.0)) t_2 = Float64(t_0 - Float64(Float64(x * x) * Float64(x * Float64(x * x)))) tmp = 0.0 if (t_1 <= -2e-305) tmp = t_2; elseif (t_1 <= 0.0) tmp = Float64(eps * Float64(5.0 * (x ^ 4.0))); else tmp = t_2; end return tmp end
function tmp_2 = code(x, eps) t_0 = (x + eps) ^ 5.0; t_1 = t_0 - (x ^ 5.0); t_2 = t_0 - ((x * x) * (x * (x * x))); tmp = 0.0; if (t_1 <= -2e-305) tmp = t_2; elseif (t_1 <= 0.0) tmp = eps * (5.0 * (x ^ 4.0)); else tmp = t_2; end tmp_2 = tmp; end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(t$95$0 - N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -2e-305], t$95$2, If[LessEqual[t$95$1, 0.0], N[(eps * N[(5.0 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$2]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5}\\
t_1 := t\_0 - {x}^{5}\\
t_2 := t\_0 - \left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\mathbf{if}\;t\_1 \leq -2 \cdot 10^{-305}:\\
\;\;\;\;t\_2\\
\mathbf{elif}\;t\_1 \leq 0:\\
\;\;\;\;\varepsilon \cdot \left(5 \cdot {x}^{4}\right)\\
\mathbf{else}:\\
\;\;\;\;t\_2\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -1.99999999999999999e-305 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 98.8%
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
*-lowering-*.f64N/A
cube-multN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6498.9
Applied egg-rr98.9%
if -1.99999999999999999e-305 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
+-commutativeN/A
distribute-lft-inN/A
*-lowering-*.f64N/A
distribute-lft1-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
pow-lowering-pow.f6499.9
Simplified99.9%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (pow eps 5.0) (/ (fma 5.0 x eps) eps))))
(if (<= t_0 -2e-289)
t_1
(if (<= t_0 0.0)
(/
eps
(/
1.0
(fma eps (* (+ x eps) (* (* x x) 10.0)) (* 5.0 (* x (* x (* x x)))))))
t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = pow(eps, 5.0) * (fma(5.0, x, eps) / eps);
double tmp;
if (t_0 <= -2e-289) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = eps / (1.0 / fma(eps, ((x + eps) * ((x * x) * 10.0)), (5.0 * (x * (x * (x * x))))));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64((eps ^ 5.0) * Float64(fma(5.0, x, eps) / eps)) tmp = 0.0 if (t_0 <= -2e-289) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(eps / Float64(1.0 / fma(eps, Float64(Float64(x + eps) * Float64(Float64(x * x) * 10.0)), Float64(5.0 * Float64(x * Float64(x * Float64(x * x))))))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Power[eps, 5.0], $MachinePrecision] * N[(N[(5.0 * x + eps), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-289], t$95$1, If[LessEqual[t$95$0, 0.0], N[(eps / N[(1.0 / N[(eps * N[(N[(x + eps), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]), $MachinePrecision] + N[(5.0 * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := {\varepsilon}^{5} \cdot \frac{\mathsf{fma}\left(5, x, \varepsilon\right)}{\varepsilon}\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-289}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\frac{\varepsilon}{\frac{1}{\mathsf{fma}\left(\varepsilon, \left(x + \varepsilon\right) \cdot \left(\left(x \cdot x\right) \cdot 10\right), 5 \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}}\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -2e-289 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 99.5%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f6495.9
Simplified95.9%
Taylor expanded in eps around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6496.0
Simplified96.0%
if -2e-289 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
flip3-+N/A
clear-numN/A
un-div-invN/A
Applied egg-rr99.5%
Final simplification98.8%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (fma 5.0 x eps) (pow eps 4.0))))
(if (<= t_0 -2e-289)
t_1
(if (<= t_0 0.0)
(/
eps
(/
1.0
(fma eps (* (+ x eps) (* (* x x) 10.0)) (* 5.0 (* x (* x (* x x)))))))
t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = fma(5.0, x, eps) * pow(eps, 4.0);
double tmp;
if (t_0 <= -2e-289) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = eps / (1.0 / fma(eps, ((x + eps) * ((x * x) * 10.0)), (5.0 * (x * (x * (x * x))))));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64(fma(5.0, x, eps) * (eps ^ 4.0)) tmp = 0.0 if (t_0 <= -2e-289) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(eps / Float64(1.0 / fma(eps, Float64(Float64(x + eps) * Float64(Float64(x * x) * 10.0)), Float64(5.0 * Float64(x * Float64(x * Float64(x * x))))))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(5.0 * x + eps), $MachinePrecision] * N[Power[eps, 4.0], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-289], t$95$1, If[LessEqual[t$95$0, 0.0], N[(eps / N[(1.0 / N[(eps * N[(N[(x + eps), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]), $MachinePrecision] + N[(5.0 * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := \mathsf{fma}\left(5, x, \varepsilon\right) \cdot {\varepsilon}^{4}\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-289}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\frac{\varepsilon}{\frac{1}{\mathsf{fma}\left(\varepsilon, \left(x + \varepsilon\right) \cdot \left(\left(x \cdot x\right) \cdot 10\right), 5 \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}}\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -2e-289 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 99.5%
Taylor expanded in x around 0
*-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
pow-lowering-pow.f6496.0
Simplified96.0%
Taylor expanded in x around 0
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
*-commutativeN/A
associate-*r*N/A
distribute-rgt-inN/A
+-commutativeN/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6495.6
Simplified95.6%
if -2e-289 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
flip3-+N/A
clear-numN/A
un-div-invN/A
Applied egg-rr99.5%
Final simplification98.7%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (fma 5.0 x eps) (* (* eps eps) (* eps eps)))))
(if (<= t_0 -2e-289)
t_1
(if (<= t_0 0.0)
(/
eps
(/
1.0
(fma eps (* (+ x eps) (* (* x x) 10.0)) (* 5.0 (* x (* x (* x x)))))))
t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = fma(5.0, x, eps) * ((eps * eps) * (eps * eps));
double tmp;
if (t_0 <= -2e-289) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = eps / (1.0 / fma(eps, ((x + eps) * ((x * x) * 10.0)), (5.0 * (x * (x * (x * x))))));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64(fma(5.0, x, eps) * Float64(Float64(eps * eps) * Float64(eps * eps))) tmp = 0.0 if (t_0 <= -2e-289) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(eps / Float64(1.0 / fma(eps, Float64(Float64(x + eps) * Float64(Float64(x * x) * 10.0)), Float64(5.0 * Float64(x * Float64(x * Float64(x * x))))))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(5.0 * x + eps), $MachinePrecision] * N[(N[(eps * eps), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-289], t$95$1, If[LessEqual[t$95$0, 0.0], N[(eps / N[(1.0 / N[(eps * N[(N[(x + eps), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 10.0), $MachinePrecision]), $MachinePrecision] + N[(5.0 * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := \mathsf{fma}\left(5, x, \varepsilon\right) \cdot \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-289}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\frac{\varepsilon}{\frac{1}{\mathsf{fma}\left(\varepsilon, \left(x + \varepsilon\right) \cdot \left(\left(x \cdot x\right) \cdot 10\right), 5 \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}}\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -2e-289 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 99.5%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f6495.9
Simplified95.9%
Taylor expanded in eps around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6496.0
Simplified96.0%
*-commutativeN/A
div-invN/A
associate-*l*N/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
pow2N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6495.2
Applied egg-rr95.2%
if -2e-289 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
flip3-+N/A
clear-numN/A
un-div-invN/A
Applied egg-rr99.5%
Final simplification98.6%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (fma 5.0 x eps) (* (* eps eps) (* eps eps)))))
(if (<= t_0 -2e-289)
t_1
(if (<= t_0 0.0)
(* x (* x (fma 5.0 (* eps (* x x)) (* 10.0 (* eps (* eps (+ x eps)))))))
t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = fma(5.0, x, eps) * ((eps * eps) * (eps * eps));
double tmp;
if (t_0 <= -2e-289) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = x * (x * fma(5.0, (eps * (x * x)), (10.0 * (eps * (eps * (x + eps))))));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64(fma(5.0, x, eps) * Float64(Float64(eps * eps) * Float64(eps * eps))) tmp = 0.0 if (t_0 <= -2e-289) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(x * Float64(x * fma(5.0, Float64(eps * Float64(x * x)), Float64(10.0 * Float64(eps * Float64(eps * Float64(x + eps))))))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(5.0 * x + eps), $MachinePrecision] * N[(N[(eps * eps), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-289], t$95$1, If[LessEqual[t$95$0, 0.0], N[(x * N[(x * N[(5.0 * N[(eps * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(10.0 * N[(eps * N[(eps * N[(x + eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := \mathsf{fma}\left(5, x, \varepsilon\right) \cdot \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-289}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;x \cdot \left(x \cdot \mathsf{fma}\left(5, \varepsilon \cdot \left(x \cdot x\right), 10 \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \left(x + \varepsilon\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -2e-289 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 99.5%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f6495.9
Simplified95.9%
Taylor expanded in eps around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6496.0
Simplified96.0%
*-commutativeN/A
div-invN/A
associate-*l*N/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
pow2N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6495.2
Applied egg-rr95.2%
if -2e-289 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
pow-powN/A
pow2N/A
pow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f6499.4
Applied egg-rr99.4%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
associate-+l+N/A
associate-*l*N/A
associate-*r*N/A
unpow2N/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
Final simplification98.6%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (fma 5.0 x eps) (* (* eps eps) (* eps eps)))))
(if (<= t_0 -2e-289)
t_1
(if (<= t_0 0.0)
(* eps (* (* x (* x x)) (fma eps 10.0 (* x 5.0))))
t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = fma(5.0, x, eps) * ((eps * eps) * (eps * eps));
double tmp;
if (t_0 <= -2e-289) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = eps * ((x * (x * x)) * fma(eps, 10.0, (x * 5.0)));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64(fma(5.0, x, eps) * Float64(Float64(eps * eps) * Float64(eps * eps))) tmp = 0.0 if (t_0 <= -2e-289) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(eps * Float64(Float64(x * Float64(x * x)) * fma(eps, 10.0, Float64(x * 5.0)))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(5.0 * x + eps), $MachinePrecision] * N[(N[(eps * eps), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-289], t$95$1, If[LessEqual[t$95$0, 0.0], N[(eps * N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(eps * 10.0 + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := \mathsf{fma}\left(5, x, \varepsilon\right) \cdot \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-289}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\varepsilon \cdot \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \mathsf{fma}\left(\varepsilon, 10, x \cdot 5\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -2e-289 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 99.5%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f6495.9
Simplified95.9%
Taylor expanded in eps around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6496.0
Simplified96.0%
*-commutativeN/A
div-invN/A
associate-*l*N/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
pow2N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6495.2
Applied egg-rr95.2%
if -2e-289 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.5%
Taylor expanded in eps around 0
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
distribute-rgt-inN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6499.4
Simplified99.4%
Final simplification98.6%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (pow (+ x eps) 5.0) (pow x 5.0)))
(t_1 (* (fma 5.0 x eps) (* (* eps eps) (* eps eps)))))
(if (<= t_0 -2e-305)
t_1
(if (<= t_0 0.0) (* (* x 5.0) (* x (* eps (* x x)))) t_1))))
double code(double x, double eps) {
double t_0 = pow((x + eps), 5.0) - pow(x, 5.0);
double t_1 = fma(5.0, x, eps) * ((eps * eps) * (eps * eps));
double tmp;
if (t_0 <= -2e-305) {
tmp = t_1;
} else if (t_0 <= 0.0) {
tmp = (x * 5.0) * (x * (eps * (x * x)));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, eps) t_0 = Float64((Float64(x + eps) ^ 5.0) - (x ^ 5.0)) t_1 = Float64(fma(5.0, x, eps) * Float64(Float64(eps * eps) * Float64(eps * eps))) tmp = 0.0 if (t_0 <= -2e-305) tmp = t_1; elseif (t_0 <= 0.0) tmp = Float64(Float64(x * 5.0) * Float64(x * Float64(eps * Float64(x * x)))); else tmp = t_1; end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[(x + eps), $MachinePrecision], 5.0], $MachinePrecision] - N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(5.0 * x + eps), $MachinePrecision] * N[(N[(eps * eps), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -2e-305], t$95$1, If[LessEqual[t$95$0, 0.0], N[(N[(x * 5.0), $MachinePrecision] * N[(x * N[(eps * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(x + \varepsilon\right)}^{5} - {x}^{5}\\
t_1 := \mathsf{fma}\left(5, x, \varepsilon\right) \cdot \left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)\\
\mathbf{if}\;t\_0 \leq -2 \cdot 10^{-305}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t\_0 \leq 0:\\
\;\;\;\;\left(x \cdot 5\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(x \cdot x\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < -1.99999999999999999e-305 or 0.0 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) Initial program 98.8%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f6494.3
Simplified94.3%
Taylor expanded in eps around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f6494.3
Simplified94.3%
*-commutativeN/A
div-invN/A
associate-*l*N/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
pow2N/A
pow2N/A
*-lowering-*.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6493.5
Applied egg-rr93.5%
if -1.99999999999999999e-305 < (-.f64 (pow.f64 (+.f64 x eps) #s(literal 5 binary64)) (pow.f64 x #s(literal 5 binary64))) < 0.0Initial program 83.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
+-commutativeN/A
distribute-lft-inN/A
*-lowering-*.f64N/A
distribute-lft1-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
pow-lowering-pow.f6499.9
Simplified99.9%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8
Applied egg-rr99.8%
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.9
Applied egg-rr99.9%
Final simplification98.6%
(FPCore (x eps) :precision binary64 (* (* x 5.0) (* x (* eps (* x x)))))
double code(double x, double eps) {
return (x * 5.0) * (x * (eps * (x * x)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (x * 5.0d0) * (x * (eps * (x * x)))
end function
public static double code(double x, double eps) {
return (x * 5.0) * (x * (eps * (x * x)));
}
def code(x, eps): return (x * 5.0) * (x * (eps * (x * x)))
function code(x, eps) return Float64(Float64(x * 5.0) * Float64(x * Float64(eps * Float64(x * x)))) end
function tmp = code(x, eps) tmp = (x * 5.0) * (x * (eps * (x * x))); end
code[x_, eps_] := N[(N[(x * 5.0), $MachinePrecision] * N[(x * N[(eps * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 5\right) \cdot \left(x \cdot \left(\varepsilon \cdot \left(x \cdot x\right)\right)\right)
\end{array}
Initial program 86.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
+-commutativeN/A
distribute-lft-inN/A
*-lowering-*.f64N/A
distribute-lft1-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
pow-lowering-pow.f6481.6
Simplified81.6%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6481.5
Applied egg-rr81.5%
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6481.5
Applied egg-rr81.5%
Final simplification81.5%
(FPCore (x eps) :precision binary64 (* (* x eps) (* 5.0 (* x (* x x)))))
double code(double x, double eps) {
return (x * eps) * (5.0 * (x * (x * x)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (x * eps) * (5.0d0 * (x * (x * x)))
end function
public static double code(double x, double eps) {
return (x * eps) * (5.0 * (x * (x * x)));
}
def code(x, eps): return (x * eps) * (5.0 * (x * (x * x)))
function code(x, eps) return Float64(Float64(x * eps) * Float64(5.0 * Float64(x * Float64(x * x)))) end
function tmp = code(x, eps) tmp = (x * eps) * (5.0 * (x * (x * x))); end
code[x_, eps_] := N[(N[(x * eps), $MachinePrecision] * N[(5.0 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \varepsilon\right) \cdot \left(5 \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)
\end{array}
Initial program 86.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
+-commutativeN/A
distribute-lft-inN/A
*-lowering-*.f64N/A
distribute-lft1-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
pow-lowering-pow.f6481.6
Simplified81.6%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-commutativeN/A
*-commutativeN/A
associate-*l*N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6481.5
Applied egg-rr81.5%
Final simplification81.5%
(FPCore (x eps) :precision binary64 (* eps (* (* x (* x x)) (* x 5.0))))
double code(double x, double eps) {
return eps * ((x * (x * x)) * (x * 5.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((x * (x * x)) * (x * 5.0d0))
end function
public static double code(double x, double eps) {
return eps * ((x * (x * x)) * (x * 5.0));
}
def code(x, eps): return eps * ((x * (x * x)) * (x * 5.0))
function code(x, eps) return Float64(eps * Float64(Float64(x * Float64(x * x)) * Float64(x * 5.0))) end
function tmp = code(x, eps) tmp = eps * ((x * (x * x)) * (x * 5.0)); end
code[x_, eps_] := N[(eps * N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\left(x \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot 5\right)\right)
\end{array}
Initial program 86.9%
Taylor expanded in x around inf
distribute-rgt-inN/A
*-commutativeN/A
associate-*r*N/A
+-commutativeN/A
distribute-lft-inN/A
*-lowering-*.f64N/A
distribute-lft1-inN/A
metadata-evalN/A
*-lowering-*.f64N/A
pow-lowering-pow.f6481.6
Simplified81.6%
metadata-evalN/A
pow-plusN/A
cube-unmultN/A
*-commutativeN/A
*-commutativeN/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6481.5
Applied egg-rr81.5%
Final simplification81.5%
(FPCore (x eps) :precision binary64 (* eps (* (* x x) (* 5.0 (* x x)))))
double code(double x, double eps) {
return eps * ((x * x) * (5.0 * (x * x)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((x * x) * (5.0d0 * (x * x)))
end function
public static double code(double x, double eps) {
return eps * ((x * x) * (5.0 * (x * x)));
}
def code(x, eps): return eps * ((x * x) * (5.0 * (x * x)))
function code(x, eps) return Float64(eps * Float64(Float64(x * x) * Float64(5.0 * Float64(x * x)))) end
function tmp = code(x, eps) tmp = eps * ((x * x) * (5.0 * (x * x))); end
code[x_, eps_] := N[(eps * N[(N[(x * x), $MachinePrecision] * N[(5.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\left(x \cdot x\right) \cdot \left(5 \cdot \left(x \cdot x\right)\right)\right)
\end{array}
Initial program 86.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified81.8%
Taylor expanded in eps around 0
metadata-evalN/A
pow-plusN/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
unpow3N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6481.5
Simplified81.5%
(FPCore (x eps) :precision binary64 (* 10.0 (* eps (* x (* eps (* x eps))))))
double code(double x, double eps) {
return 10.0 * (eps * (x * (eps * (x * eps))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 10.0d0 * (eps * (x * (eps * (x * eps))))
end function
public static double code(double x, double eps) {
return 10.0 * (eps * (x * (eps * (x * eps))));
}
def code(x, eps): return 10.0 * (eps * (x * (eps * (x * eps))))
function code(x, eps) return Float64(10.0 * Float64(eps * Float64(x * Float64(eps * Float64(x * eps))))) end
function tmp = code(x, eps) tmp = 10.0 * (eps * (x * (eps * (x * eps)))); end
code[x_, eps_] := N[(10.0 * N[(eps * N[(x * N[(eps * N[(x * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
10 \cdot \left(\varepsilon \cdot \left(x \cdot \left(\varepsilon \cdot \left(x \cdot \varepsilon\right)\right)\right)\right)
\end{array}
Initial program 86.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified81.9%
Taylor expanded in eps around inf
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6468.6
Simplified68.6%
Final simplification68.6%
herbie shell --seed 2024204
(FPCore (x eps)
:name "ENA, Section 1.4, Exercise 4b, n=5"
:precision binary64
:pre (and (and (<= -1000000000.0 x) (<= x 1000000000.0)) (and (<= -1.0 eps) (<= eps 1.0)))
(- (pow (+ x eps) 5.0) (pow x 5.0)))