
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<=
(-
(* (exp (* (+ -1.0 eps_m) x)) (+ (/ 1.0 eps_m) 1.0))
(* (exp (* (- -1.0 eps_m) x)) (- (/ 1.0 eps_m) 1.0)))
0.0)
(/ (+ x 1.0) (exp x))
(* 0.5 (- (* 1.0 (exp (* x eps_m))) (/ -1.0 (exp (fma x eps_m x)))))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (((exp(((-1.0 + eps_m) * x)) * ((1.0 / eps_m) + 1.0)) - (exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) <= 0.0) {
tmp = (x + 1.0) / exp(x);
} else {
tmp = 0.5 * ((1.0 * exp((x * eps_m))) - (-1.0 / exp(fma(x, eps_m, x))));
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (Float64(Float64(exp(Float64(Float64(-1.0 + eps_m) * x)) * Float64(Float64(1.0 / eps_m) + 1.0)) - Float64(exp(Float64(Float64(-1.0 - eps_m) * x)) * Float64(Float64(1.0 / eps_m) - 1.0))) <= 0.0) tmp = Float64(Float64(x + 1.0) / exp(x)); else tmp = Float64(0.5 * Float64(Float64(1.0 * exp(Float64(x * eps_m))) - Float64(-1.0 / exp(fma(x, eps_m, x))))); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[N[(N[(N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], N[(0.5 * N[(N[(1.0 * N[Exp[N[(x * eps$95$m), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(-1.0 / N[Exp[N[(x * eps$95$m + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{\left(-1 + eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} + 1\right) - e^{\left(-1 - eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} - 1\right) \leq 0:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{else}:\\
\;\;\;\;0.5 \cdot \left(1 \cdot e^{x \cdot eps\_m} - \frac{-1}{e^{\mathsf{fma}\left(x, eps\_m, x\right)}}\right)\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0Initial program 41.9%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Applied rewrites100.0%
if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6499.0
Applied rewrites99.0%
Taylor expanded in eps around inf
lower-*.f6499.0
Applied rewrites99.0%
Taylor expanded in eps around inf
Applied rewrites100.0%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-*.f64100.0
Applied rewrites100.0%
Final simplification100.0%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<=
(-
(* (exp (* (+ -1.0 eps_m) x)) (+ (/ 1.0 eps_m) 1.0))
(* (exp (* (- -1.0 eps_m) x)) (- (/ 1.0 eps_m) 1.0)))
0.0)
(/ 1.0 (fma (fma (fma 0.375 x -0.3333333333333333) x 0.5) (* x x) 1.0))
(fma (* 0.3333333333333333 x) (* x x) 1.0)))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (((exp(((-1.0 + eps_m) * x)) * ((1.0 / eps_m) + 1.0)) - (exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) <= 0.0) {
tmp = 1.0 / fma(fma(fma(0.375, x, -0.3333333333333333), x, 0.5), (x * x), 1.0);
} else {
tmp = fma((0.3333333333333333 * x), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (Float64(Float64(exp(Float64(Float64(-1.0 + eps_m) * x)) * Float64(Float64(1.0 / eps_m) + 1.0)) - Float64(exp(Float64(Float64(-1.0 - eps_m) * x)) * Float64(Float64(1.0 / eps_m) - 1.0))) <= 0.0) tmp = Float64(1.0 / fma(fma(fma(0.375, x, -0.3333333333333333), x, 0.5), Float64(x * x), 1.0)); else tmp = fma(Float64(0.3333333333333333 * x), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[N[(N[(N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(1.0 / N[(N[(N[(0.375 * x + -0.3333333333333333), $MachinePrecision] * x + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(0.3333333333333333 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{\left(-1 + eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} + 1\right) - e^{\left(-1 - eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} - 1\right) \leq 0:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.375, x, -0.3333333333333333\right), x, 0.5\right), x \cdot x, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333 \cdot x, x \cdot x, 1\right)\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0Initial program 41.9%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites82.3%
Applied rewrites82.3%
Taylor expanded in x around 0
Applied rewrites91.7%
if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites20.8%
Taylor expanded in x around 0
Applied rewrites38.8%
Taylor expanded in x around inf
Applied rewrites38.8%
Final simplification58.4%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<=
(-
(* (exp (* (+ -1.0 eps_m) x)) (+ (/ 1.0 eps_m) 1.0))
(* (exp (* (- -1.0 eps_m) x)) (- (/ 1.0 eps_m) 1.0)))
0.0)
(/ 1.0 (fma (fma -0.3333333333333333 x 0.5) (* x x) 1.0))
(fma (* 0.3333333333333333 x) (* x x) 1.0)))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (((exp(((-1.0 + eps_m) * x)) * ((1.0 / eps_m) + 1.0)) - (exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) <= 0.0) {
tmp = 1.0 / fma(fma(-0.3333333333333333, x, 0.5), (x * x), 1.0);
} else {
tmp = fma((0.3333333333333333 * x), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (Float64(Float64(exp(Float64(Float64(-1.0 + eps_m) * x)) * Float64(Float64(1.0 / eps_m) + 1.0)) - Float64(exp(Float64(Float64(-1.0 - eps_m) * x)) * Float64(Float64(1.0 / eps_m) - 1.0))) <= 0.0) tmp = Float64(1.0 / fma(fma(-0.3333333333333333, x, 0.5), Float64(x * x), 1.0)); else tmp = fma(Float64(0.3333333333333333 * x), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[N[(N[(N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(1.0 / N[(N[(-0.3333333333333333 * x + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(0.3333333333333333 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;e^{\left(-1 + eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} + 1\right) - e^{\left(-1 - eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} - 1\right) \leq 0:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(\mathsf{fma}\left(-0.3333333333333333, x, 0.5\right), x \cdot x, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333 \cdot x, x \cdot x, 1\right)\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0Initial program 41.9%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites82.3%
Applied rewrites82.3%
Taylor expanded in x around 0
Applied rewrites88.7%
if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites20.8%
Taylor expanded in x around 0
Applied rewrites38.8%
Taylor expanded in x around inf
Applied rewrites38.8%
Final simplification57.3%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<= eps_m 5.6e-20)
(/ (+ x 1.0) (exp x))
(if (<= eps_m 2e+58)
(/ (- (* 1.0 (exp (* (+ -1.0 eps_m) x))) -1.0) 2.0)
(/
(-
(* 1.0 (+ (/ 1.0 eps_m) 1.0))
(* (exp (* (- -1.0 eps_m) x)) (- (/ 1.0 eps_m) 1.0)))
2.0))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (eps_m <= 5.6e-20) {
tmp = (x + 1.0) / exp(x);
} else if (eps_m <= 2e+58) {
tmp = ((1.0 * exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0;
} else {
tmp = ((1.0 * ((1.0 / eps_m) + 1.0)) - (exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) / 2.0;
}
return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
real(8), intent (in) :: x
real(8), intent (in) :: eps_m
real(8) :: tmp
if (eps_m <= 5.6d-20) then
tmp = (x + 1.0d0) / exp(x)
else if (eps_m <= 2d+58) then
tmp = ((1.0d0 * exp((((-1.0d0) + eps_m) * x))) - (-1.0d0)) / 2.0d0
else
tmp = ((1.0d0 * ((1.0d0 / eps_m) + 1.0d0)) - (exp((((-1.0d0) - eps_m) * x)) * ((1.0d0 / eps_m) - 1.0d0))) / 2.0d0
end if
code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
double tmp;
if (eps_m <= 5.6e-20) {
tmp = (x + 1.0) / Math.exp(x);
} else if (eps_m <= 2e+58) {
tmp = ((1.0 * Math.exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0;
} else {
tmp = ((1.0 * ((1.0 / eps_m) + 1.0)) - (Math.exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) / 2.0;
}
return tmp;
}
eps_m = math.fabs(eps) def code(x, eps_m): tmp = 0 if eps_m <= 5.6e-20: tmp = (x + 1.0) / math.exp(x) elif eps_m <= 2e+58: tmp = ((1.0 * math.exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0 else: tmp = ((1.0 * ((1.0 / eps_m) + 1.0)) - (math.exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) / 2.0 return tmp
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (eps_m <= 5.6e-20) tmp = Float64(Float64(x + 1.0) / exp(x)); elseif (eps_m <= 2e+58) tmp = Float64(Float64(Float64(1.0 * exp(Float64(Float64(-1.0 + eps_m) * x))) - -1.0) / 2.0); else tmp = Float64(Float64(Float64(1.0 * Float64(Float64(1.0 / eps_m) + 1.0)) - Float64(exp(Float64(Float64(-1.0 - eps_m) * x)) * Float64(Float64(1.0 / eps_m) - 1.0))) / 2.0); end return tmp end
eps_m = abs(eps); function tmp_2 = code(x, eps_m) tmp = 0.0; if (eps_m <= 5.6e-20) tmp = (x + 1.0) / exp(x); elseif (eps_m <= 2e+58) tmp = ((1.0 * exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0; else tmp = ((1.0 * ((1.0 / eps_m) + 1.0)) - (exp(((-1.0 - eps_m) * x)) * ((1.0 / eps_m) - 1.0))) / 2.0; end tmp_2 = tmp; end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 5.6e-20], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 2e+58], N[(N[(N[(1.0 * N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 * N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 5.6 \cdot 10^{-20}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{elif}\;eps\_m \leq 2 \cdot 10^{+58}:\\
\;\;\;\;\frac{1 \cdot e^{\left(-1 + eps\_m\right) \cdot x} - -1}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 \cdot \left(\frac{1}{eps\_m} + 1\right) - e^{\left(-1 - eps\_m\right) \cdot x} \cdot \left(\frac{1}{eps\_m} - 1\right)}{2}\\
\end{array}
\end{array}
if eps < 5.6000000000000005e-20Initial program 70.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites61.3%
Applied rewrites61.3%
if 5.6000000000000005e-20 < eps < 1.99999999999999989e58Initial program 80.6%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6470.3
Applied rewrites70.3%
Taylor expanded in x around 0
Applied rewrites70.3%
Taylor expanded in eps around inf
Applied rewrites100.0%
if 1.99999999999999989e58 < eps Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites65.0%
Final simplification63.7%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(let* ((t_0 (exp (* (+ -1.0 eps_m) x))))
(if (<= x -215.0)
(* (- (/ (exp (- x)) eps_m) -1.0) 0.5)
(if (<= x 4.8e-20)
(/ (- (* 1.0 t_0) -1.0) 2.0)
(if (<= x 1e+192)
(/ (+ x 1.0) (exp x))
(/ (- (* t_0 (+ (/ 1.0 eps_m) 1.0)) -1.0) 2.0))))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double t_0 = exp(((-1.0 + eps_m) * x));
double tmp;
if (x <= -215.0) {
tmp = ((exp(-x) / eps_m) - -1.0) * 0.5;
} else if (x <= 4.8e-20) {
tmp = ((1.0 * t_0) - -1.0) / 2.0;
} else if (x <= 1e+192) {
tmp = (x + 1.0) / exp(x);
} else {
tmp = ((t_0 * ((1.0 / eps_m) + 1.0)) - -1.0) / 2.0;
}
return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
real(8), intent (in) :: x
real(8), intent (in) :: eps_m
real(8) :: t_0
real(8) :: tmp
t_0 = exp((((-1.0d0) + eps_m) * x))
if (x <= (-215.0d0)) then
tmp = ((exp(-x) / eps_m) - (-1.0d0)) * 0.5d0
else if (x <= 4.8d-20) then
tmp = ((1.0d0 * t_0) - (-1.0d0)) / 2.0d0
else if (x <= 1d+192) then
tmp = (x + 1.0d0) / exp(x)
else
tmp = ((t_0 * ((1.0d0 / eps_m) + 1.0d0)) - (-1.0d0)) / 2.0d0
end if
code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
double t_0 = Math.exp(((-1.0 + eps_m) * x));
double tmp;
if (x <= -215.0) {
tmp = ((Math.exp(-x) / eps_m) - -1.0) * 0.5;
} else if (x <= 4.8e-20) {
tmp = ((1.0 * t_0) - -1.0) / 2.0;
} else if (x <= 1e+192) {
tmp = (x + 1.0) / Math.exp(x);
} else {
tmp = ((t_0 * ((1.0 / eps_m) + 1.0)) - -1.0) / 2.0;
}
return tmp;
}
eps_m = math.fabs(eps) def code(x, eps_m): t_0 = math.exp(((-1.0 + eps_m) * x)) tmp = 0 if x <= -215.0: tmp = ((math.exp(-x) / eps_m) - -1.0) * 0.5 elif x <= 4.8e-20: tmp = ((1.0 * t_0) - -1.0) / 2.0 elif x <= 1e+192: tmp = (x + 1.0) / math.exp(x) else: tmp = ((t_0 * ((1.0 / eps_m) + 1.0)) - -1.0) / 2.0 return tmp
eps_m = abs(eps) function code(x, eps_m) t_0 = exp(Float64(Float64(-1.0 + eps_m) * x)) tmp = 0.0 if (x <= -215.0) tmp = Float64(Float64(Float64(exp(Float64(-x)) / eps_m) - -1.0) * 0.5); elseif (x <= 4.8e-20) tmp = Float64(Float64(Float64(1.0 * t_0) - -1.0) / 2.0); elseif (x <= 1e+192) tmp = Float64(Float64(x + 1.0) / exp(x)); else tmp = Float64(Float64(Float64(t_0 * Float64(Float64(1.0 / eps_m) + 1.0)) - -1.0) / 2.0); end return tmp end
eps_m = abs(eps); function tmp_2 = code(x, eps_m) t_0 = exp(((-1.0 + eps_m) * x)); tmp = 0.0; if (x <= -215.0) tmp = ((exp(-x) / eps_m) - -1.0) * 0.5; elseif (x <= 4.8e-20) tmp = ((1.0 * t_0) - -1.0) / 2.0; elseif (x <= 1e+192) tmp = (x + 1.0) / exp(x); else tmp = ((t_0 * ((1.0 / eps_m) + 1.0)) - -1.0) / 2.0; end tmp_2 = tmp; end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, -215.0], N[(N[(N[(N[Exp[(-x)], $MachinePrecision] / eps$95$m), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 4.8e-20], N[(N[(N[(1.0 * t$95$0), $MachinePrecision] - -1.0), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1e+192], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(t$95$0 * N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
t_0 := e^{\left(-1 + eps\_m\right) \cdot x}\\
\mathbf{if}\;x \leq -215:\\
\;\;\;\;\left(\frac{e^{-x}}{eps\_m} - -1\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 4.8 \cdot 10^{-20}:\\
\;\;\;\;\frac{1 \cdot t\_0 - -1}{2}\\
\mathbf{elif}\;x \leq 10^{+192}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 \cdot \left(\frac{1}{eps\_m} + 1\right) - -1}{2}\\
\end{array}
\end{array}
if x < -215Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites63.7%
Taylor expanded in eps around 0
lower-/.f64N/A
neg-mul-1N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f6437.5
Applied rewrites37.5%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
Applied rewrites37.5%
if -215 < x < 4.79999999999999986e-20Initial program 59.9%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6458.5
Applied rewrites58.5%
Taylor expanded in x around 0
Applied rewrites38.9%
Taylor expanded in eps around inf
Applied rewrites80.1%
if 4.79999999999999986e-20 < x < 1.00000000000000004e192Initial program 90.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites66.4%
Applied rewrites66.4%
if 1.00000000000000004e192 < x Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites37.9%
Final simplification63.5%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<= eps_m 5.6e-20)
(/ (+ x 1.0) (exp x))
(if (<= eps_m 2e+67)
(/ (- (* 1.0 (exp (* (+ -1.0 eps_m) x))) -1.0) 2.0)
(/ (- (+ (/ 1.0 eps_m) 1.0) (/ -1.0 (exp (fma eps_m x x)))) 2.0))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (eps_m <= 5.6e-20) {
tmp = (x + 1.0) / exp(x);
} else if (eps_m <= 2e+67) {
tmp = ((1.0 * exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0;
} else {
tmp = (((1.0 / eps_m) + 1.0) - (-1.0 / exp(fma(eps_m, x, x)))) / 2.0;
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (eps_m <= 5.6e-20) tmp = Float64(Float64(x + 1.0) / exp(x)); elseif (eps_m <= 2e+67) tmp = Float64(Float64(Float64(1.0 * exp(Float64(Float64(-1.0 + eps_m) * x))) - -1.0) / 2.0); else tmp = Float64(Float64(Float64(Float64(1.0 / eps_m) + 1.0) - Float64(-1.0 / exp(fma(eps_m, x, x)))) / 2.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 5.6e-20], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 2e+67], N[(N[(N[(1.0 * N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision] - N[(-1.0 / N[Exp[N[(eps$95$m * x + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 5.6 \cdot 10^{-20}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{elif}\;eps\_m \leq 2 \cdot 10^{+67}:\\
\;\;\;\;\frac{1 \cdot e^{\left(-1 + eps\_m\right) \cdot x} - -1}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(\frac{1}{eps\_m} + 1\right) - \frac{-1}{e^{\mathsf{fma}\left(eps\_m, x, x\right)}}}{2}\\
\end{array}
\end{array}
if eps < 5.6000000000000005e-20Initial program 70.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites61.3%
Applied rewrites61.3%
if 5.6000000000000005e-20 < eps < 1.99999999999999997e67Initial program 83.9%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6475.3
Applied rewrites75.3%
Taylor expanded in x around 0
Applied rewrites67.2%
Taylor expanded in eps around inf
Applied rewrites91.9%
if 1.99999999999999997e67 < eps Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6465.5
Applied rewrites65.5%
Final simplification63.7%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(let* ((t_0 (/ (- (* 1.0 (exp (* (+ -1.0 eps_m) x))) -1.0) 2.0)))
(if (<= x -215.0)
(* (- (/ (exp (- x)) eps_m) -1.0) 0.5)
(if (<= x 4.8e-20) t_0 (if (<= x 1e+192) (/ (+ x 1.0) (exp x)) t_0)))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double t_0 = ((1.0 * exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0;
double tmp;
if (x <= -215.0) {
tmp = ((exp(-x) / eps_m) - -1.0) * 0.5;
} else if (x <= 4.8e-20) {
tmp = t_0;
} else if (x <= 1e+192) {
tmp = (x + 1.0) / exp(x);
} else {
tmp = t_0;
}
return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
real(8), intent (in) :: x
real(8), intent (in) :: eps_m
real(8) :: t_0
real(8) :: tmp
t_0 = ((1.0d0 * exp((((-1.0d0) + eps_m) * x))) - (-1.0d0)) / 2.0d0
if (x <= (-215.0d0)) then
tmp = ((exp(-x) / eps_m) - (-1.0d0)) * 0.5d0
else if (x <= 4.8d-20) then
tmp = t_0
else if (x <= 1d+192) then
tmp = (x + 1.0d0) / exp(x)
else
tmp = t_0
end if
code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
double t_0 = ((1.0 * Math.exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0;
double tmp;
if (x <= -215.0) {
tmp = ((Math.exp(-x) / eps_m) - -1.0) * 0.5;
} else if (x <= 4.8e-20) {
tmp = t_0;
} else if (x <= 1e+192) {
tmp = (x + 1.0) / Math.exp(x);
} else {
tmp = t_0;
}
return tmp;
}
eps_m = math.fabs(eps) def code(x, eps_m): t_0 = ((1.0 * math.exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0 tmp = 0 if x <= -215.0: tmp = ((math.exp(-x) / eps_m) - -1.0) * 0.5 elif x <= 4.8e-20: tmp = t_0 elif x <= 1e+192: tmp = (x + 1.0) / math.exp(x) else: tmp = t_0 return tmp
eps_m = abs(eps) function code(x, eps_m) t_0 = Float64(Float64(Float64(1.0 * exp(Float64(Float64(-1.0 + eps_m) * x))) - -1.0) / 2.0) tmp = 0.0 if (x <= -215.0) tmp = Float64(Float64(Float64(exp(Float64(-x)) / eps_m) - -1.0) * 0.5); elseif (x <= 4.8e-20) tmp = t_0; elseif (x <= 1e+192) tmp = Float64(Float64(x + 1.0) / exp(x)); else tmp = t_0; end return tmp end
eps_m = abs(eps); function tmp_2 = code(x, eps_m) t_0 = ((1.0 * exp(((-1.0 + eps_m) * x))) - -1.0) / 2.0; tmp = 0.0; if (x <= -215.0) tmp = ((exp(-x) / eps_m) - -1.0) * 0.5; elseif (x <= 4.8e-20) tmp = t_0; elseif (x <= 1e+192) tmp = (x + 1.0) / exp(x); else tmp = t_0; end tmp_2 = tmp; end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(N[(1.0 * N[Exp[N[(N[(-1.0 + eps$95$m), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -215.0], N[(N[(N[(N[Exp[(-x)], $MachinePrecision] / eps$95$m), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 4.8e-20], t$95$0, If[LessEqual[x, 1e+192], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], t$95$0]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
t_0 := \frac{1 \cdot e^{\left(-1 + eps\_m\right) \cdot x} - -1}{2}\\
\mathbf{if}\;x \leq -215:\\
\;\;\;\;\left(\frac{e^{-x}}{eps\_m} - -1\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 4.8 \cdot 10^{-20}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 10^{+192}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -215Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites63.7%
Taylor expanded in eps around 0
lower-/.f64N/A
neg-mul-1N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f6437.5
Applied rewrites37.5%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
Applied rewrites37.5%
if -215 < x < 4.79999999999999986e-20 or 1.00000000000000004e192 < x Initial program 69.2%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6468.2
Applied rewrites68.2%
Taylor expanded in x around 0
Applied rewrites38.7%
Taylor expanded in eps around inf
Applied rewrites70.2%
if 4.79999999999999986e-20 < x < 1.00000000000000004e192Initial program 90.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites66.4%
Applied rewrites66.4%
Final simplification63.5%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<= x -800.0)
(* (- (/ (exp (- x)) eps_m) -1.0) 0.5)
(if (<= x 1e+192)
(/ (+ x 1.0) (exp x))
(fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (x <= -800.0) {
tmp = ((exp(-x) / eps_m) - -1.0) * 0.5;
} else if (x <= 1e+192) {
tmp = (x + 1.0) / exp(x);
} else {
tmp = fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (x <= -800.0) tmp = Float64(Float64(Float64(exp(Float64(-x)) / eps_m) - -1.0) * 0.5); elseif (x <= 1e+192) tmp = Float64(Float64(x + 1.0) / exp(x)); else tmp = fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[x, -800.0], N[(N[(N[(N[Exp[(-x)], $MachinePrecision] / eps$95$m), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 1e+192], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;x \leq -800:\\
\;\;\;\;\left(\frac{e^{-x}}{eps\_m} - -1\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 10^{+192}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)\\
\end{array}
\end{array}
if x < -800Initial program 100.0%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites63.7%
Taylor expanded in eps around 0
lower-/.f64N/A
neg-mul-1N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f6437.5
Applied rewrites37.5%
lift-/.f64N/A
div-invN/A
metadata-evalN/A
Applied rewrites37.5%
if -800 < x < 1.00000000000000004e192Initial program 67.3%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites66.9%
Applied rewrites66.9%
if 1.00000000000000004e192 < x Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites39.4%
Taylor expanded in x around 0
Applied rewrites62.1%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<= eps_m 5.6e-20)
(/ (+ x 1.0) (exp x))
(if (<= eps_m 1.8e+186)
(fma (* 0.3333333333333333 x) (* x x) 1.0)
(/ (- 1.0 (* (fma (- -1.0 eps_m) x 1.0) (- (/ 1.0 eps_m) 1.0))) 2.0))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (eps_m <= 5.6e-20) {
tmp = (x + 1.0) / exp(x);
} else if (eps_m <= 1.8e+186) {
tmp = fma((0.3333333333333333 * x), (x * x), 1.0);
} else {
tmp = (1.0 - (fma((-1.0 - eps_m), x, 1.0) * ((1.0 / eps_m) - 1.0))) / 2.0;
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (eps_m <= 5.6e-20) tmp = Float64(Float64(x + 1.0) / exp(x)); elseif (eps_m <= 1.8e+186) tmp = fma(Float64(0.3333333333333333 * x), Float64(x * x), 1.0); else tmp = Float64(Float64(1.0 - Float64(fma(Float64(-1.0 - eps_m), x, 1.0) * Float64(Float64(1.0 / eps_m) - 1.0))) / 2.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 5.6e-20], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 1.8e+186], N[(N[(0.3333333333333333 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(1.0 - N[(N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x + 1.0), $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 5.6 \cdot 10^{-20}:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\
\mathbf{elif}\;eps\_m \leq 1.8 \cdot 10^{+186}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333 \cdot x, x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \mathsf{fma}\left(-1 - eps\_m, x, 1\right) \cdot \left(\frac{1}{eps\_m} - 1\right)}{2}\\
\end{array}
\end{array}
if eps < 5.6000000000000005e-20Initial program 70.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites61.3%
Applied rewrites61.3%
if 5.6000000000000005e-20 < eps < 1.8000000000000001e186Initial program 95.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites36.3%
Taylor expanded in x around 0
Applied rewrites65.9%
Taylor expanded in x around inf
Applied rewrites65.9%
if 1.8000000000000001e186 < eps Initial program 100.0%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6457.2
Applied rewrites57.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6445.2
Applied rewrites45.2%
Taylor expanded in eps around inf
Applied rewrites45.2%
Final simplification59.9%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(if (<= eps_m 5.6e-20)
(* (exp (- x)) (+ x 1.0))
(if (<= eps_m 1.8e+186)
(fma (* 0.3333333333333333 x) (* x x) 1.0)
(/ (- 1.0 (* (fma (- -1.0 eps_m) x 1.0) (- (/ 1.0 eps_m) 1.0))) 2.0))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (eps_m <= 5.6e-20) {
tmp = exp(-x) * (x + 1.0);
} else if (eps_m <= 1.8e+186) {
tmp = fma((0.3333333333333333 * x), (x * x), 1.0);
} else {
tmp = (1.0 - (fma((-1.0 - eps_m), x, 1.0) * ((1.0 / eps_m) - 1.0))) / 2.0;
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (eps_m <= 5.6e-20) tmp = Float64(exp(Float64(-x)) * Float64(x + 1.0)); elseif (eps_m <= 1.8e+186) tmp = fma(Float64(0.3333333333333333 * x), Float64(x * x), 1.0); else tmp = Float64(Float64(1.0 - Float64(fma(Float64(-1.0 - eps_m), x, 1.0) * Float64(Float64(1.0 / eps_m) - 1.0))) / 2.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 5.6e-20], N[(N[Exp[(-x)], $MachinePrecision] * N[(x + 1.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 1.8e+186], N[(N[(0.3333333333333333 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(1.0 - N[(N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x + 1.0), $MachinePrecision] * N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 5.6 \cdot 10^{-20}:\\
\;\;\;\;e^{-x} \cdot \left(x + 1\right)\\
\mathbf{elif}\;eps\_m \leq 1.8 \cdot 10^{+186}:\\
\;\;\;\;\mathsf{fma}\left(0.3333333333333333 \cdot x, x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \mathsf{fma}\left(-1 - eps\_m, x, 1\right) \cdot \left(\frac{1}{eps\_m} - 1\right)}{2}\\
\end{array}
\end{array}
if eps < 5.6000000000000005e-20Initial program 70.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites61.3%
Applied rewrites61.3%
if 5.6000000000000005e-20 < eps < 1.8000000000000001e186Initial program 95.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites36.3%
Taylor expanded in x around 0
Applied rewrites65.9%
Taylor expanded in x around inf
Applied rewrites65.9%
if 1.8000000000000001e186 < eps Initial program 100.0%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6457.2
Applied rewrites57.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6445.2
Applied rewrites45.2%
Taylor expanded in eps around inf
Applied rewrites45.2%
Final simplification59.8%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(let* ((t_0 (- (/ 1.0 eps_m) 1.0)))
(if (<= x -6.2e-5)
(/ (- 1.0 (* (fma (- -1.0 eps_m) x 1.0) t_0)) 2.0)
(if (<= x 1.82)
(fma (fma (fma -0.125 x 0.3333333333333333) x -0.5) (* x x) 1.0)
(if (<= x 1.05e+192)
(/ (- (+ (/ 1.0 eps_m) 1.0) t_0) 2.0)
(fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0))))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double t_0 = (1.0 / eps_m) - 1.0;
double tmp;
if (x <= -6.2e-5) {
tmp = (1.0 - (fma((-1.0 - eps_m), x, 1.0) * t_0)) / 2.0;
} else if (x <= 1.82) {
tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), (x * x), 1.0);
} else if (x <= 1.05e+192) {
tmp = (((1.0 / eps_m) + 1.0) - t_0) / 2.0;
} else {
tmp = fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) t_0 = Float64(Float64(1.0 / eps_m) - 1.0) tmp = 0.0 if (x <= -6.2e-5) tmp = Float64(Float64(1.0 - Float64(fma(Float64(-1.0 - eps_m), x, 1.0) * t_0)) / 2.0); elseif (x <= 1.82) tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), Float64(x * x), 1.0); elseif (x <= 1.05e+192) tmp = Float64(Float64(Float64(Float64(1.0 / eps_m) + 1.0) - t_0) / 2.0); else tmp = fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]}, If[LessEqual[x, -6.2e-5], N[(N[(1.0 - N[(N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x + 1.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.82], N[(N[(N[(-0.125 * x + 0.3333333333333333), $MachinePrecision] * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], If[LessEqual[x, 1.05e+192], N[(N[(N[(N[(1.0 / eps$95$m), $MachinePrecision] + 1.0), $MachinePrecision] - t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
t_0 := \frac{1}{eps\_m} - 1\\
\mathbf{if}\;x \leq -6.2 \cdot 10^{-5}:\\
\;\;\;\;\frac{1 - \mathsf{fma}\left(-1 - eps\_m, x, 1\right) \cdot t\_0}{2}\\
\mathbf{elif}\;x \leq 1.82:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.125, x, 0.3333333333333333\right), x, -0.5\right), x \cdot x, 1\right)\\
\mathbf{elif}\;x \leq 1.05 \cdot 10^{+192}:\\
\;\;\;\;\frac{\left(\frac{1}{eps\_m} + 1\right) - t\_0}{2}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)\\
\end{array}
\end{array}
if x < -6.20000000000000027e-5Initial program 100.0%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6463.7
Applied rewrites63.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6429.1
Applied rewrites29.1%
Taylor expanded in eps around inf
Applied rewrites29.1%
if -6.20000000000000027e-5 < x < 1.82000000000000006Initial program 58.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites68.3%
Taylor expanded in x around 0
Applied rewrites68.1%
if 1.82000000000000006 < x < 1.04999999999999997e192Initial program 97.5%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6410.3
Applied rewrites10.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f645.1
Applied rewrites5.1%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6461.8
Applied rewrites61.8%
if 1.04999999999999997e192 < x Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites39.4%
Taylor expanded in x around 0
Applied rewrites62.1%
Final simplification58.8%
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
:precision binary64
(let* ((t_0 (- (/ 1.0 eps_m) 1.0)))
(if (<= x -6.2e-5)
(/ (- 1.0 (* (fma (- -1.0 eps_m) x 1.0) t_0)) 2.0)
(if (<= x 1.82)
(fma (fma (fma -0.125 x 0.3333333333333333) x -0.5) (* x x) 1.0)
(if (<= x 1.05e+192)
(/ (- (/ 1.0 eps_m) t_0) 2.0)
(fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0))))))eps_m = fabs(eps);
double code(double x, double eps_m) {
double t_0 = (1.0 / eps_m) - 1.0;
double tmp;
if (x <= -6.2e-5) {
tmp = (1.0 - (fma((-1.0 - eps_m), x, 1.0) * t_0)) / 2.0;
} else if (x <= 1.82) {
tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), (x * x), 1.0);
} else if (x <= 1.05e+192) {
tmp = ((1.0 / eps_m) - t_0) / 2.0;
} else {
tmp = fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) t_0 = Float64(Float64(1.0 / eps_m) - 1.0) tmp = 0.0 if (x <= -6.2e-5) tmp = Float64(Float64(1.0 - Float64(fma(Float64(-1.0 - eps_m), x, 1.0) * t_0)) / 2.0); elseif (x <= 1.82) tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), Float64(x * x), 1.0); elseif (x <= 1.05e+192) tmp = Float64(Float64(Float64(1.0 / eps_m) - t_0) / 2.0); else tmp = fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(1.0 / eps$95$m), $MachinePrecision] - 1.0), $MachinePrecision]}, If[LessEqual[x, -6.2e-5], N[(N[(1.0 - N[(N[(N[(-1.0 - eps$95$m), $MachinePrecision] * x + 1.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.82], N[(N[(N[(-0.125 * x + 0.3333333333333333), $MachinePrecision] * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], If[LessEqual[x, 1.05e+192], N[(N[(N[(1.0 / eps$95$m), $MachinePrecision] - t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
t_0 := \frac{1}{eps\_m} - 1\\
\mathbf{if}\;x \leq -6.2 \cdot 10^{-5}:\\
\;\;\;\;\frac{1 - \mathsf{fma}\left(-1 - eps\_m, x, 1\right) \cdot t\_0}{2}\\
\mathbf{elif}\;x \leq 1.82:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.125, x, 0.3333333333333333\right), x, -0.5\right), x \cdot x, 1\right)\\
\mathbf{elif}\;x \leq 1.05 \cdot 10^{+192}:\\
\;\;\;\;\frac{\frac{1}{eps\_m} - t\_0}{2}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)\\
\end{array}
\end{array}
if x < -6.20000000000000027e-5Initial program 100.0%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6463.7
Applied rewrites63.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6429.1
Applied rewrites29.1%
Taylor expanded in eps around inf
Applied rewrites29.1%
if -6.20000000000000027e-5 < x < 1.82000000000000006Initial program 58.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites68.3%
Taylor expanded in x around 0
Applied rewrites68.1%
if 1.82000000000000006 < x < 1.04999999999999997e192Initial program 97.5%
Taylor expanded in x around 0
neg-mul-1N/A
+-commutativeN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
lower-fma.f64N/A
distribute-lft-inN/A
metadata-evalN/A
mul-1-negN/A
unsub-negN/A
lower--.f6410.3
Applied rewrites10.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f645.1
Applied rewrites5.1%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6461.8
Applied rewrites61.8%
Taylor expanded in eps around 0
Applied rewrites61.8%
if 1.04999999999999997e192 < x Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites39.4%
Taylor expanded in x around 0
Applied rewrites62.1%
Final simplification58.8%
eps_m = (fabs.f64 eps) (FPCore (x eps_m) :precision binary64 (if (<= eps_m 2.25e-42) (/ 1.0 (fma (* x x) 0.5 1.0)) (fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
double tmp;
if (eps_m <= 2.25e-42) {
tmp = 1.0 / fma((x * x), 0.5, 1.0);
} else {
tmp = fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
return tmp;
}
eps_m = abs(eps) function code(x, eps_m) tmp = 0.0 if (eps_m <= 2.25e-42) tmp = Float64(1.0 / fma(Float64(x * x), 0.5, 1.0)); else tmp = fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0); end return tmp end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 2.25e-42], N[(1.0 / N[(N[(x * x), $MachinePrecision] * 0.5 + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 2.25 \cdot 10^{-42}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(x \cdot x, 0.5, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)\\
\end{array}
\end{array}
if eps < 2.25e-42Initial program 71.9%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites59.9%
Taylor expanded in x around 0
Applied rewrites51.7%
Applied rewrites51.7%
Taylor expanded in x around 0
Applied rewrites52.0%
if 2.25e-42 < eps Initial program 92.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites28.7%
Taylor expanded in x around 0
Applied rewrites47.3%
eps_m = (fabs.f64 eps) (FPCore (x eps_m) :precision binary64 (fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
return fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
eps_m = abs(eps) function code(x, eps_m) return fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0) end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)
\end{array}
Initial program 78.4%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.2%
Taylor expanded in x around 0
Applied rewrites46.4%
eps_m = (fabs.f64 eps) (FPCore (x eps_m) :precision binary64 (fma (* 0.3333333333333333 x) (* x x) 1.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
return fma((0.3333333333333333 * x), (x * x), 1.0);
}
eps_m = abs(eps) function code(x, eps_m) return fma(Float64(0.3333333333333333 * x), Float64(x * x), 1.0) end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := N[(N[(0.3333333333333333 * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
\mathsf{fma}\left(0.3333333333333333 \cdot x, x \cdot x, 1\right)
\end{array}
Initial program 78.4%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites50.2%
Taylor expanded in x around 0
Applied rewrites46.4%
Taylor expanded in x around inf
Applied rewrites46.3%
eps_m = (fabs.f64 eps) (FPCore (x eps_m) :precision binary64 1.0)
eps_m = fabs(eps);
double code(double x, double eps_m) {
return 1.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
real(8), intent (in) :: x
real(8), intent (in) :: eps_m
code = 1.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
return 1.0;
}
eps_m = math.fabs(eps) def code(x, eps_m): return 1.0
eps_m = abs(eps) function code(x, eps_m) return 1.0 end
eps_m = abs(eps); function tmp = code(x, eps_m) tmp = 1.0; end
eps_m = N[Abs[eps], $MachinePrecision] code[x_, eps$95$m_] := 1.0
\begin{array}{l}
eps_m = \left|\varepsilon\right|
\\
1
\end{array}
Initial program 78.4%
Taylor expanded in x around 0
Applied rewrites36.0%
herbie shell --seed 2024254
(FPCore (x eps)
:name "NMSE Section 6.1 mentioned, A"
:precision binary64
(/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))