
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (+ -1.0 eps) x)))
(* (- (pow eps -1.0) 1.0) (exp (* (- -1.0 eps) x)))))
(t_1 (exp (- x))))
(if (<= t_0 0.0) (+ (* x t_1) t_1) (/ t_0 2.0))))
double code(double x, double eps) {
double t_0 = ((1.0 + pow(eps, -1.0)) * exp(((-1.0 + eps) * x))) - ((pow(eps, -1.0) - 1.0) * exp(((-1.0 - eps) * x)));
double t_1 = exp(-x);
double tmp;
if (t_0 <= 0.0) {
tmp = (x * t_1) + t_1;
} else {
tmp = t_0 / 2.0;
}
return tmp;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = ((1.0d0 + (eps ** (-1.0d0))) * exp((((-1.0d0) + eps) * x))) - (((eps ** (-1.0d0)) - 1.0d0) * exp((((-1.0d0) - eps) * x)))
t_1 = exp(-x)
if (t_0 <= 0.0d0) then
tmp = (x * t_1) + t_1
else
tmp = t_0 / 2.0d0
end if
code = tmp
end function
public static double code(double x, double eps) {
double t_0 = ((1.0 + Math.pow(eps, -1.0)) * Math.exp(((-1.0 + eps) * x))) - ((Math.pow(eps, -1.0) - 1.0) * Math.exp(((-1.0 - eps) * x)));
double t_1 = Math.exp(-x);
double tmp;
if (t_0 <= 0.0) {
tmp = (x * t_1) + t_1;
} else {
tmp = t_0 / 2.0;
}
return tmp;
}
def code(x, eps): t_0 = ((1.0 + math.pow(eps, -1.0)) * math.exp(((-1.0 + eps) * x))) - ((math.pow(eps, -1.0) - 1.0) * math.exp(((-1.0 - eps) * x))) t_1 = math.exp(-x) tmp = 0 if t_0 <= 0.0: tmp = (x * t_1) + t_1 else: tmp = t_0 / 2.0 return tmp
function code(x, eps) t_0 = Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(-1.0 + eps) * x))) - Float64(Float64((eps ^ -1.0) - 1.0) * exp(Float64(Float64(-1.0 - eps) * x)))) t_1 = exp(Float64(-x)) tmp = 0.0 if (t_0 <= 0.0) tmp = Float64(Float64(x * t_1) + t_1); else tmp = Float64(t_0 / 2.0); end return tmp end
function tmp_2 = code(x, eps) t_0 = ((1.0 + (eps ^ -1.0)) * exp(((-1.0 + eps) * x))) - (((eps ^ -1.0) - 1.0) * exp(((-1.0 - eps) * x))); t_1 = exp(-x); tmp = 0.0; if (t_0 <= 0.0) tmp = (x * t_1) + t_1; else tmp = t_0 / 2.0; end tmp_2 = tmp; end
code[x_, eps_] := Block[{t$95$0 = N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(-1.0 + eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[N[(N[(-1.0 - eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[t$95$0, 0.0], N[(N[(x * t$95$1), $MachinePrecision] + t$95$1), $MachinePrecision], N[(t$95$0 / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(-1 + \varepsilon\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right) \cdot e^{\left(-1 - \varepsilon\right) \cdot x}\\
t_1 := e^{-x}\\
\mathbf{if}\;t\_0 \leq 0:\\
\;\;\;\;x \cdot t\_1 + t\_1\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0}{2}\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0Initial program 41.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Applied rewrites100.0%
Applied rewrites100.0%
if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Final simplification100.0%
(FPCore (x eps)
:precision binary64
(if (<=
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (+ -1.0 eps) x)))
(* (- (pow eps -1.0) 1.0) (exp (* (- -1.0 eps) x))))
4.0)
(* (* 2.0 (/ (+ 1.0 x) (exp x))) 0.5)
(/ (- (+ (pow eps -1.0) 1.0) (/ -1.0 (exp (fma eps x x)))) 2.0)))
double code(double x, double eps) {
double tmp;
if ((((1.0 + pow(eps, -1.0)) * exp(((-1.0 + eps) * x))) - ((pow(eps, -1.0) - 1.0) * exp(((-1.0 - eps) * x)))) <= 4.0) {
tmp = (2.0 * ((1.0 + x) / exp(x))) * 0.5;
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (-1.0 / exp(fma(eps, x, x)))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(-1.0 + eps) * x))) - Float64(Float64((eps ^ -1.0) - 1.0) * exp(Float64(Float64(-1.0 - eps) * x)))) <= 4.0) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / exp(x))) * 0.5); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(-1.0 / exp(fma(eps, x, x)))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(-1.0 + eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[N[(N[(-1.0 - eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(-1.0 / N[Exp[N[(eps * x + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(-1 + \varepsilon\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right) \cdot e^{\left(-1 - \varepsilon\right) \cdot x} \leq 4:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{e^{x}}\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{-1}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}}{2}\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 4Initial program 57.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
if 4 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6452.9
Applied rewrites52.9%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
lower-fma.f6452.9
Applied rewrites52.9%
Final simplification80.3%
(FPCore (x eps)
:precision binary64
(if (<=
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (+ -1.0 eps) x)))
(* (- (pow eps -1.0) 1.0) (exp (* (- -1.0 eps) x))))
4.0)
(* (* 2.0 (/ (+ 1.0 x) (exp x))) 0.5)
(/ (- (pow eps -1.0) (/ -1.0 (exp (fma eps x x)))) 2.0)))
double code(double x, double eps) {
double tmp;
if ((((1.0 + pow(eps, -1.0)) * exp(((-1.0 + eps) * x))) - ((pow(eps, -1.0) - 1.0) * exp(((-1.0 - eps) * x)))) <= 4.0) {
tmp = (2.0 * ((1.0 + x) / exp(x))) * 0.5;
} else {
tmp = (pow(eps, -1.0) - (-1.0 / exp(fma(eps, x, x)))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(-1.0 + eps) * x))) - Float64(Float64((eps ^ -1.0) - 1.0) * exp(Float64(Float64(-1.0 - eps) * x)))) <= 4.0) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / exp(x))) * 0.5); else tmp = Float64(Float64((eps ^ -1.0) - Float64(-1.0 / exp(fma(eps, x, x)))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(-1.0 + eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[N[(N[(-1.0 - eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4.0], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[Power[eps, -1.0], $MachinePrecision] - N[(-1.0 / N[Exp[N[(eps * x + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(-1 + \varepsilon\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right) \cdot e^{\left(-1 - \varepsilon\right) \cdot x} \leq 4:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{e^{x}}\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{{\varepsilon}^{-1} - \frac{-1}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}}{2}\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 4Initial program 57.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
if 4 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6452.9
Applied rewrites52.9%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f643.1
Applied rewrites3.1%
Taylor expanded in eps around 0
Applied rewrites3.1%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
lower-fma.f6452.2
Applied rewrites52.2%
Final simplification80.0%
(FPCore (x eps)
:precision binary64
(if (<=
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (+ -1.0 eps) x)))
(* (- (pow eps -1.0) 1.0) (exp (* (- -1.0 eps) x))))
0.0)
(/ (+ x 1.0) (fma (fma 0.5 x 1.0) x 1.0))
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))))
double code(double x, double eps) {
double tmp;
if ((((1.0 + pow(eps, -1.0)) * exp(((-1.0 + eps) * x))) - ((pow(eps, -1.0) - 1.0) * exp(((-1.0 - eps) * x)))) <= 0.0) {
tmp = (x + 1.0) / fma(fma(0.5, x, 1.0), x, 1.0);
} else {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(-1.0 + eps) * x))) - Float64(Float64((eps ^ -1.0) - 1.0) * exp(Float64(Float64(-1.0 - eps) * x)))) <= 0.0) tmp = Float64(Float64(x + 1.0) / fma(fma(0.5, x, 1.0), x, 1.0)); else tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); end return tmp end
code[x_, eps_] := If[LessEqual[N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(-1.0 + eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[N[(N[(-1.0 - eps), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(N[(x + 1.0), $MachinePrecision] / N[(N[(0.5 * x + 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(-1 + \varepsilon\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right) \cdot e^{\left(-1 - \varepsilon\right) \cdot x} \leq 0:\\
\;\;\;\;\frac{x + 1}{\mathsf{fma}\left(\mathsf{fma}\left(0.5, x, 1\right), x, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\end{array}
\end{array}
if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 0.0Initial program 41.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites84.7%
Applied rewrites84.7%
if 0.0 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites28.8%
Applied rewrites28.8%
Taylor expanded in x around 0
Applied rewrites43.7%
Final simplification61.0%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (exp (- x))))
(if (<= eps 0.04)
(+ (* x t_0) t_0)
(if (<= eps 1.55e+160)
(/
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (- eps 1.0) x)))
(- (pow eps -1.0) 1.0))
2.0)
(/ (- (+ (pow eps -1.0) 1.0) (/ -1.0 (exp (fma eps x x)))) 2.0)))))
double code(double x, double eps) {
double t_0 = exp(-x);
double tmp;
if (eps <= 0.04) {
tmp = (x * t_0) + t_0;
} else if (eps <= 1.55e+160) {
tmp = (((1.0 + pow(eps, -1.0)) * exp(((eps - 1.0) * x))) - (pow(eps, -1.0) - 1.0)) / 2.0;
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (-1.0 / exp(fma(eps, x, x)))) / 2.0;
}
return tmp;
}
function code(x, eps) t_0 = exp(Float64(-x)) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(x * t_0) + t_0); elseif (eps <= 1.55e+160) tmp = Float64(Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(eps - 1.0) * x))) - Float64((eps ^ -1.0) - 1.0)) / 2.0); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(-1.0 / exp(fma(eps, x, x)))) / 2.0); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps, 0.04], N[(N[(x * t$95$0), $MachinePrecision] + t$95$0), $MachinePrecision], If[LessEqual[eps, 1.55e+160], N[(N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(-1.0 / N[Exp[N[(eps * x + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;x \cdot t\_0 + t\_0\\
\mathbf{elif}\;\varepsilon \leq 1.55 \cdot 10^{+160}:\\
\;\;\;\;\frac{\left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(\varepsilon - 1\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{-1}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
Applied rewrites70.9%
Applied rewrites70.9%
if 0.0400000000000000008 < eps < 1.5499999999999999e160Initial program 100.0%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6472.2
Applied rewrites72.2%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f6472.2
Applied rewrites72.2%
if 1.5499999999999999e160 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6477.6
Applied rewrites77.6%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
lower-fma.f6477.6
Applied rewrites77.6%
Final simplification71.9%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(* (* 2.0 (/ (+ 1.0 x) (exp x))) 0.5)
(if (<= eps 1.55e+160)
(/
(-
(* (+ 1.0 (pow eps -1.0)) (exp (* (- eps 1.0) x)))
(- (pow eps -1.0) 1.0))
2.0)
(/ (- (+ (pow eps -1.0) 1.0) (/ -1.0 (exp (fma eps x x)))) 2.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (2.0 * ((1.0 + x) / exp(x))) * 0.5;
} else if (eps <= 1.55e+160) {
tmp = (((1.0 + pow(eps, -1.0)) * exp(((eps - 1.0) * x))) - (pow(eps, -1.0) - 1.0)) / 2.0;
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (-1.0 / exp(fma(eps, x, x)))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / exp(x))) * 0.5); elseif (eps <= 1.55e+160) tmp = Float64(Float64(Float64(Float64(1.0 + (eps ^ -1.0)) * exp(Float64(Float64(eps - 1.0) * x))) - Float64((eps ^ -1.0) - 1.0)) / 2.0); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(-1.0 / exp(fma(eps, x, x)))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[eps, 1.55e+160], N[(N[(N[(N[(1.0 + N[Power[eps, -1.0], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(-1.0 / N[Exp[N[(eps * x + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{e^{x}}\right) \cdot 0.5\\
\mathbf{elif}\;\varepsilon \leq 1.55 \cdot 10^{+160}:\\
\;\;\;\;\frac{\left(1 + {\varepsilon}^{-1}\right) \cdot e^{\left(\varepsilon - 1\right) \cdot x} - \left({\varepsilon}^{-1} - 1\right)}{2}\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{-1}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
if 0.0400000000000000008 < eps < 1.5499999999999999e160Initial program 100.0%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6472.2
Applied rewrites72.2%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f6472.2
Applied rewrites72.2%
if 1.5499999999999999e160 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6477.6
Applied rewrites77.6%
Taylor expanded in eps around inf
exp-negN/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
lower-exp.f64N/A
*-commutativeN/A
+-commutativeN/A
distribute-lft1-inN/A
lower-fma.f6477.6
Applied rewrites77.6%
Final simplification71.9%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (exp (- x))))
(if (<= x -2.25e+70)
(/ (- (/ t_0 eps) (- (pow eps -1.0) 1.0)) 2.0)
(if (<= x -3.4e-192)
(fma
(* 0.5 x)
(fma (- eps 1.0) (+ (pow eps -1.0) 1.0) (/ (- 1.0 (* eps eps)) eps))
1.0)
(* (+ x 1.0) t_0)))))
double code(double x, double eps) {
double t_0 = exp(-x);
double tmp;
if (x <= -2.25e+70) {
tmp = ((t_0 / eps) - (pow(eps, -1.0) - 1.0)) / 2.0;
} else if (x <= -3.4e-192) {
tmp = fma((0.5 * x), fma((eps - 1.0), (pow(eps, -1.0) + 1.0), ((1.0 - (eps * eps)) / eps)), 1.0);
} else {
tmp = (x + 1.0) * t_0;
}
return tmp;
}
function code(x, eps) t_0 = exp(Float64(-x)) tmp = 0.0 if (x <= -2.25e+70) tmp = Float64(Float64(Float64(t_0 / eps) - Float64((eps ^ -1.0) - 1.0)) / 2.0); elseif (x <= -3.4e-192) tmp = fma(Float64(0.5 * x), fma(Float64(eps - 1.0), Float64((eps ^ -1.0) + 1.0), Float64(Float64(1.0 - Float64(eps * eps)) / eps)), 1.0); else tmp = Float64(Float64(x + 1.0) * t_0); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[x, -2.25e+70], N[(N[(N[(t$95$0 / eps), $MachinePrecision] - N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, -3.4e-192], N[(N[(0.5 * x), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] + N[(N[(1.0 - N[(eps * eps), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(x + 1.0), $MachinePrecision] * t$95$0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;x \leq -2.25 \cdot 10^{+70}:\\
\;\;\;\;\frac{\frac{t\_0}{\varepsilon} - \left({\varepsilon}^{-1} - 1\right)}{2}\\
\mathbf{elif}\;x \leq -3.4 \cdot 10^{-192}:\\
\;\;\;\;\mathsf{fma}\left(0.5 \cdot x, \mathsf{fma}\left(\varepsilon - 1, {\varepsilon}^{-1} + 1, \frac{1 - \varepsilon \cdot \varepsilon}{\varepsilon}\right), 1\right)\\
\mathbf{else}:\\
\;\;\;\;\left(x + 1\right) \cdot t\_0\\
\end{array}
\end{array}
if x < -2.25e70Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6454.6
Applied rewrites54.6%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f643.1
Applied rewrites3.1%
Taylor expanded in eps around 0
lower-/.f64N/A
neg-mul-1N/A
lower-exp.f64N/A
neg-mul-1N/A
lower-neg.f6453.1
Applied rewrites53.1%
if -2.25e70 < x < -3.40000000000000002e-192Initial program 67.0%
Taylor expanded in x around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites50.9%
Taylor expanded in eps around 0
Applied rewrites70.8%
if -3.40000000000000002e-192 < x Initial program 73.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites71.2%
Applied rewrites71.2%
Final simplification68.9%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (+ (pow eps -1.0) 1.0)))
(if (<= x -150.0)
(/ (- t_0 (* x eps)) 2.0)
(if (<= x 1.8)
(fma (fma (fma -0.125 x 0.3333333333333333) x -0.5) (* x x) 1.0)
(/ (- t_0 (- (pow eps -1.0) 1.0)) 2.0)))))
double code(double x, double eps) {
double t_0 = pow(eps, -1.0) + 1.0;
double tmp;
if (x <= -150.0) {
tmp = (t_0 - (x * eps)) / 2.0;
} else if (x <= 1.8) {
tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), (x * x), 1.0);
} else {
tmp = (t_0 - (pow(eps, -1.0) - 1.0)) / 2.0;
}
return tmp;
}
function code(x, eps) t_0 = Float64((eps ^ -1.0) + 1.0) tmp = 0.0 if (x <= -150.0) tmp = Float64(Float64(t_0 - Float64(x * eps)) / 2.0); elseif (x <= 1.8) tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), Float64(x * x), 1.0); else tmp = Float64(Float64(t_0 - Float64((eps ^ -1.0) - 1.0)) / 2.0); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[x, -150.0], N[(N[(t$95$0 - N[(x * eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.8], N[(N[(N[(-0.125 * x + 0.3333333333333333), $MachinePrecision] * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(t$95$0 - N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\varepsilon}^{-1} + 1\\
\mathbf{if}\;x \leq -150:\\
\;\;\;\;\frac{t\_0 - x \cdot \varepsilon}{2}\\
\mathbf{elif}\;x \leq 1.8:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.125, x, 0.3333333333333333\right), x, -0.5\right), x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{t\_0 - \left({\varepsilon}^{-1} - 1\right)}{2}\\
\end{array}
\end{array}
if x < -150Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6452.8
Applied rewrites52.8%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6422.8
Applied rewrites22.8%
Taylor expanded in eps around inf
Applied rewrites22.8%
if -150 < x < 1.80000000000000004Initial program 54.4%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites77.5%
Taylor expanded in x around 0
Applied rewrites76.9%
if 1.80000000000000004 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6428.4
Applied rewrites28.4%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6453.4
Applied rewrites53.4%
Final simplification61.4%
(FPCore (x eps)
:precision binary64
(if (<= x -150.0)
(/ (- (+ (pow eps -1.0) 1.0) (* x eps)) 2.0)
(if (<= x 1.8)
(fma (fma (fma -0.125 x 0.3333333333333333) x -0.5) (* x x) 1.0)
(/ (- (pow eps -1.0) (- (pow eps -1.0) 1.0)) 2.0))))
double code(double x, double eps) {
double tmp;
if (x <= -150.0) {
tmp = ((pow(eps, -1.0) + 1.0) - (x * eps)) / 2.0;
} else if (x <= 1.8) {
tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), (x * x), 1.0);
} else {
tmp = (pow(eps, -1.0) - (pow(eps, -1.0) - 1.0)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -150.0) tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(x * eps)) / 2.0); elseif (x <= 1.8) tmp = fma(fma(fma(-0.125, x, 0.3333333333333333), x, -0.5), Float64(x * x), 1.0); else tmp = Float64(Float64((eps ^ -1.0) - Float64((eps ^ -1.0) - 1.0)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -150.0], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(x * eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.8], N[(N[(N[(-0.125 * x + 0.3333333333333333), $MachinePrecision] * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision], N[(N[(N[Power[eps, -1.0], $MachinePrecision] - N[(N[Power[eps, -1.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -150:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - x \cdot \varepsilon}{2}\\
\mathbf{elif}\;x \leq 1.8:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.125, x, 0.3333333333333333\right), x, -0.5\right), x \cdot x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{{\varepsilon}^{-1} - \left({\varepsilon}^{-1} - 1\right)}{2}\\
\end{array}
\end{array}
if x < -150Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6452.8
Applied rewrites52.8%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6422.8
Applied rewrites22.8%
Taylor expanded in eps around inf
Applied rewrites22.8%
if -150 < x < 1.80000000000000004Initial program 54.4%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites77.5%
Taylor expanded in x around 0
Applied rewrites76.9%
if 1.80000000000000004 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6428.4
Applied rewrites28.4%
Taylor expanded in x around 0
lower--.f64N/A
lower-/.f6453.4
Applied rewrites53.4%
Taylor expanded in eps around 0
Applied rewrites53.4%
Final simplification61.4%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(* (* 2.0 (/ (+ 1.0 x) (exp x))) 0.5)
(if (<= eps 8.5e+159)
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))
(/
(-
(+ (pow eps -1.0) 1.0)
(/ (fma (fma (- eps 1.0) x (- x 1.0)) eps (- 1.0 x)) eps))
2.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (2.0 * ((1.0 + x) / exp(x))) * 0.5;
} else if (eps <= 8.5e+159) {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (fma(fma((eps - 1.0), x, (x - 1.0)), eps, (1.0 - x)) / eps)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / exp(x))) * 0.5); elseif (eps <= 8.5e+159) tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(fma(fma(Float64(eps - 1.0), x, Float64(x - 1.0)), eps, Float64(1.0 - x)) / eps)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[eps, 8.5e+159], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(N[(N[(eps - 1.0), $MachinePrecision] * x + N[(x - 1.0), $MachinePrecision]), $MachinePrecision] * eps + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{e^{x}}\right) \cdot 0.5\\
\mathbf{elif}\;\varepsilon \leq 8.5 \cdot 10^{+159}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon - 1, x, x - 1\right), \varepsilon, 1 - x\right)}{\varepsilon}}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
if 0.0400000000000000008 < eps < 8.50000000000000076e159Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites34.2%
Applied rewrites34.2%
Taylor expanded in x around 0
Applied rewrites48.7%
if 8.50000000000000076e159 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6477.6
Applied rewrites77.6%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6437.6
Applied rewrites37.6%
Taylor expanded in eps around 0
Applied rewrites67.6%
Final simplification66.9%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(* (+ x 1.0) (exp (- x)))
(if (<= eps 8.5e+159)
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))
(/
(-
(+ (pow eps -1.0) 1.0)
(/ (fma (fma (- eps 1.0) x (- x 1.0)) eps (- 1.0 x)) eps))
2.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (x + 1.0) * exp(-x);
} else if (eps <= 8.5e+159) {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (fma(fma((eps - 1.0), x, (x - 1.0)), eps, (1.0 - x)) / eps)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(x + 1.0) * exp(Float64(-x))); elseif (eps <= 8.5e+159) tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(fma(fma(Float64(eps - 1.0), x, Float64(x - 1.0)), eps, Float64(1.0 - x)) / eps)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision], If[LessEqual[eps, 8.5e+159], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(N[(N[(eps - 1.0), $MachinePrecision] * x + N[(x - 1.0), $MachinePrecision]), $MachinePrecision] * eps + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\left(x + 1\right) \cdot e^{-x}\\
\mathbf{elif}\;\varepsilon \leq 8.5 \cdot 10^{+159}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon - 1, x, x - 1\right), \varepsilon, 1 - x\right)}{\varepsilon}}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
Applied rewrites70.9%
if 0.0400000000000000008 < eps < 8.50000000000000076e159Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites34.2%
Applied rewrites34.2%
Taylor expanded in x around 0
Applied rewrites48.7%
if 8.50000000000000076e159 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6477.6
Applied rewrites77.6%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6437.6
Applied rewrites37.6%
Taylor expanded in eps around 0
Applied rewrites67.6%
Final simplification66.8%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(*
(*
2.0
(/ (+ 1.0 x) (fma (fma (fma 0.16666666666666666 x 0.5) x 1.0) x 1.0)))
0.5)
(if (<= eps 8.5e+159)
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))
(/
(-
(+ (pow eps -1.0) 1.0)
(/ (fma (fma (- eps 1.0) x (- x 1.0)) eps (- 1.0 x)) eps))
2.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (2.0 * ((1.0 + x) / fma(fma(fma(0.16666666666666666, x, 0.5), x, 1.0), x, 1.0))) * 0.5;
} else if (eps <= 8.5e+159) {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (fma(fma((eps - 1.0), x, (x - 1.0)), eps, (1.0 - x)) / eps)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / fma(fma(fma(0.16666666666666666, x, 0.5), x, 1.0), x, 1.0))) * 0.5); elseif (eps <= 8.5e+159) tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(fma(fma(Float64(eps - 1.0), x, Float64(x - 1.0)), eps, Float64(1.0 - x)) / eps)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[(N[(N[(0.16666666666666666 * x + 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[eps, 8.5e+159], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(N[(N[(eps - 1.0), $MachinePrecision] * x + N[(x - 1.0), $MachinePrecision]), $MachinePrecision] * eps + N[(1.0 - x), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.5\right), x, 1\right), x, 1\right)}\right) \cdot 0.5\\
\mathbf{elif}\;\varepsilon \leq 8.5 \cdot 10^{+159}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - \frac{\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon - 1, x, x - 1\right), \varepsilon, 1 - x\right)}{\varepsilon}}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
Taylor expanded in x around 0
Applied rewrites66.4%
if 0.0400000000000000008 < eps < 8.50000000000000076e159Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites34.2%
Applied rewrites34.2%
Taylor expanded in x around 0
Applied rewrites48.7%
if 8.50000000000000076e159 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6477.6
Applied rewrites77.6%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6437.6
Applied rewrites37.6%
Taylor expanded in eps around 0
Applied rewrites67.6%
Final simplification63.6%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(*
(*
2.0
(/ (+ 1.0 x) (fma (fma (fma 0.16666666666666666 x 0.5) x 1.0) x 1.0)))
0.5)
(if (<= eps 2.4e+160)
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))
(fma
(* 0.5 x)
(fma (- eps 1.0) (+ (pow eps -1.0) 1.0) (/ (- 1.0 (* eps eps)) eps))
1.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (2.0 * ((1.0 + x) / fma(fma(fma(0.16666666666666666, x, 0.5), x, 1.0), x, 1.0))) * 0.5;
} else if (eps <= 2.4e+160) {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
} else {
tmp = fma((0.5 * x), fma((eps - 1.0), (pow(eps, -1.0) + 1.0), ((1.0 - (eps * eps)) / eps)), 1.0);
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(2.0 * Float64(Float64(1.0 + x) / fma(fma(fma(0.16666666666666666, x, 0.5), x, 1.0), x, 1.0))) * 0.5); elseif (eps <= 2.4e+160) tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); else tmp = fma(Float64(0.5 * x), fma(Float64(eps - 1.0), Float64((eps ^ -1.0) + 1.0), Float64(Float64(1.0 - Float64(eps * eps)) / eps)), 1.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] / N[(N[(N[(0.16666666666666666 * x + 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[eps, 2.4e+160], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(0.5 * x), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] + N[(N[(1.0 - N[(eps * eps), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\left(2 \cdot \frac{1 + x}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.5\right), x, 1\right), x, 1\right)}\right) \cdot 0.5\\
\mathbf{elif}\;\varepsilon \leq 2.4 \cdot 10^{+160}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.5 \cdot x, \mathsf{fma}\left(\varepsilon - 1, {\varepsilon}^{-1} + 1, \frac{1 - \varepsilon \cdot \varepsilon}{\varepsilon}\right), 1\right)\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
Taylor expanded in x around 0
Applied rewrites66.4%
if 0.0400000000000000008 < eps < 2.4000000000000001e160Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites34.2%
Applied rewrites34.2%
Taylor expanded in x around 0
Applied rewrites48.7%
if 2.4000000000000001e160 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites21.9%
Taylor expanded in eps around 0
Applied rewrites58.6%
Final simplification62.6%
(FPCore (x eps)
:precision binary64
(if (<= eps 0.04)
(/ (+ x 1.0) (fma (fma 0.5 x 1.0) x 1.0))
(if (<= eps 3.7e+269)
(* (+ x 1.0) (fma (fma 0.5 x -1.0) x 1.0))
(/ (- (+ (pow eps -1.0) 1.0) (* x eps)) 2.0))))
double code(double x, double eps) {
double tmp;
if (eps <= 0.04) {
tmp = (x + 1.0) / fma(fma(0.5, x, 1.0), x, 1.0);
} else if (eps <= 3.7e+269) {
tmp = (x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0);
} else {
tmp = ((pow(eps, -1.0) + 1.0) - (x * eps)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (eps <= 0.04) tmp = Float64(Float64(x + 1.0) / fma(fma(0.5, x, 1.0), x, 1.0)); elseif (eps <= 3.7e+269) tmp = Float64(Float64(x + 1.0) * fma(fma(0.5, x, -1.0), x, 1.0)); else tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(x * eps)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[eps, 0.04], N[(N[(x + 1.0), $MachinePrecision] / N[(N[(0.5 * x + 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[eps, 3.7e+269], N[(N[(x + 1.0), $MachinePrecision] * N[(N[(0.5 * x + -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(x * eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 0.04:\\
\;\;\;\;\frac{x + 1}{\mathsf{fma}\left(\mathsf{fma}\left(0.5, x, 1\right), x, 1\right)}\\
\mathbf{elif}\;\varepsilon \leq 3.7 \cdot 10^{+269}:\\
\;\;\;\;\left(x + 1\right) \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, -1\right), x, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - x \cdot \varepsilon}{2}\\
\end{array}
\end{array}
if eps < 0.0400000000000000008Initial program 65.6%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites70.9%
Taylor expanded in x around 0
Applied rewrites62.1%
Applied rewrites62.1%
if 0.0400000000000000008 < eps < 3.6999999999999999e269Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites32.7%
Applied rewrites32.7%
Taylor expanded in x around 0
Applied rewrites44.0%
if 3.6999999999999999e269 < eps Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6470.9
Applied rewrites70.9%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6423.7
Applied rewrites23.7%
Taylor expanded in eps around inf
Applied rewrites23.7%
Final simplification56.1%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (fma (fma 0.5 x 1.0) x 1.0)))
(if (<= x -80.0)
(/ (- (+ (pow eps -1.0) 1.0) (* x eps)) 2.0)
(if (<= x 1.35e+154)
(/ (fma x x -1.0) (* t_0 (- x 1.0)))
(/ (+ x 1.0) t_0)))))
double code(double x, double eps) {
double t_0 = fma(fma(0.5, x, 1.0), x, 1.0);
double tmp;
if (x <= -80.0) {
tmp = ((pow(eps, -1.0) + 1.0) - (x * eps)) / 2.0;
} else if (x <= 1.35e+154) {
tmp = fma(x, x, -1.0) / (t_0 * (x - 1.0));
} else {
tmp = (x + 1.0) / t_0;
}
return tmp;
}
function code(x, eps) t_0 = fma(fma(0.5, x, 1.0), x, 1.0) tmp = 0.0 if (x <= -80.0) tmp = Float64(Float64(Float64((eps ^ -1.0) + 1.0) - Float64(x * eps)) / 2.0); elseif (x <= 1.35e+154) tmp = Float64(fma(x, x, -1.0) / Float64(t_0 * Float64(x - 1.0))); else tmp = Float64(Float64(x + 1.0) / t_0); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[(0.5 * x + 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]}, If[LessEqual[x, -80.0], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] + 1.0), $MachinePrecision] - N[(x * eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.35e+154], N[(N[(x * x + -1.0), $MachinePrecision] / N[(t$95$0 * N[(x - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(x + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\mathsf{fma}\left(0.5, x, 1\right), x, 1\right)\\
\mathbf{if}\;x \leq -80:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} + 1\right) - x \cdot \varepsilon}{2}\\
\mathbf{elif}\;x \leq 1.35 \cdot 10^{+154}:\\
\;\;\;\;\frac{\mathsf{fma}\left(x, x, -1\right)}{t\_0 \cdot \left(x - 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{x + 1}{t\_0}\\
\end{array}
\end{array}
if x < -80Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
lower-/.f6452.8
Applied rewrites52.8%
Taylor expanded in x around 0
associate--l+N/A
mul-1-negN/A
associate-*r*N/A
distribute-lft-neg-inN/A
distribute-lft1-inN/A
lower-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
lower-fma.f64N/A
distribute-neg-inN/A
metadata-evalN/A
unsub-negN/A
lower--.f64N/A
lower--.f64N/A
lower-/.f6422.8
Applied rewrites22.8%
Taylor expanded in eps around inf
Applied rewrites22.8%
if -80 < x < 1.35000000000000003e154Initial program 63.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites72.1%
Taylor expanded in x around 0
Applied rewrites62.5%
Applied rewrites67.0%
if 1.35000000000000003e154 < x Initial program 100.0%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites59.3%
Taylor expanded in x around 0
Applied rewrites59.3%
Applied rewrites59.3%
Final simplification58.9%
(FPCore (x eps) :precision binary64 (fma (fma 0.3333333333333333 x -0.5) (* x x) 1.0))
double code(double x, double eps) {
return fma(fma(0.3333333333333333, x, -0.5), (x * x), 1.0);
}
function code(x, eps) return fma(fma(0.3333333333333333, x, -0.5), Float64(x * x), 1.0) end
code[x_, eps_] := N[(N[(0.3333333333333333 * x + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, x, -0.5\right), x \cdot x, 1\right)
\end{array}
Initial program 75.4%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites58.8%
Taylor expanded in x around 0
Applied rewrites50.5%
(FPCore (x eps) :precision binary64 1.0)
double code(double x, double eps) {
return 1.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 1.0d0
end function
public static double code(double x, double eps) {
return 1.0;
}
def code(x, eps): return 1.0
function code(x, eps) return 1.0 end
function tmp = code(x, eps) tmp = 1.0; end
code[x_, eps_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 75.4%
Taylor expanded in x around 0
Applied rewrites42.6%
herbie shell --seed 2024307
(FPCore (x eps)
:name "NMSE Section 6.1 mentioned, A"
:precision binary64
(/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))