
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))
double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps): return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps) return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0) end
function tmp = code(x, eps) tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0; end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}
(FPCore (x eps) :precision binary64 (* (- (exp (* x (+ -1.0 eps))) (/ -1.0 (exp (fma x eps x)))) 0.5))
double code(double x, double eps) {
return (exp((x * (-1.0 + eps))) - (-1.0 / exp(fma(x, eps, x)))) * 0.5;
}
function code(x, eps) return Float64(Float64(exp(Float64(x * Float64(-1.0 + eps))) - Float64(-1.0 / exp(fma(x, eps, x)))) * 0.5) end
code[x_, eps_] := N[(N[(N[Exp[N[(x * N[(-1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[(-1.0 / N[Exp[N[(x * eps + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x \cdot \left(-1 + \varepsilon\right)} - \frac{-1}{e^{\mathsf{fma}\left(x, \varepsilon, x\right)}}\right) \cdot 0.5
\end{array}
Initial program 73.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
lift-exp.f64N/A
lift-neg.f64N/A
lift-fma.f64N/A
rec-expN/A
lift-fma.f64N/A
lift-exp.f64N/A
lift-/.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (exp (- x))))
(if (<= eps 1.0)
(* (+ t_0 t_0) 0.5)
(* (+ (exp (* x eps)) (exp (- (fma x eps x)))) 0.5))))
double code(double x, double eps) {
double t_0 = exp(-x);
double tmp;
if (eps <= 1.0) {
tmp = (t_0 + t_0) * 0.5;
} else {
tmp = (exp((x * eps)) + exp(-fma(x, eps, x))) * 0.5;
}
return tmp;
}
function code(x, eps) t_0 = exp(Float64(-x)) tmp = 0.0 if (eps <= 1.0) tmp = Float64(Float64(t_0 + t_0) * 0.5); else tmp = Float64(Float64(exp(Float64(x * eps)) + exp(Float64(-fma(x, eps, x)))) * 0.5); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps, 1.0], N[(N[(t$95$0 + t$95$0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;\varepsilon \leq 1:\\
\;\;\;\;\left(t\_0 + t\_0\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\end{array}
\end{array}
if eps < 1Initial program 64.6%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.5%
Taylor expanded in eps around 0
mul-1-negN/A
lower-+.f64N/A
lower-exp.f64N/A
lift-neg.f64N/A
lower-exp.f64N/A
lift-neg.f6487.2
Applied rewrites87.2%
if 1 < eps Initial program 100.0%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in eps around inf
*-commutativeN/A
distribute-rgt-neg-inN/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Final simplification90.5%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (exp (- x))))
(if (<= x -2.8e-233)
(* (+ 1.0 (exp (- (fma x eps x)))) 0.5)
(if (<= x 3.1e+26)
(* (- (exp (* x eps)) (/ -1.0 (fma (- eps -1.0) x 1.0))) 0.5)
(* (+ t_0 t_0) 0.5)))))
double code(double x, double eps) {
double t_0 = exp(-x);
double tmp;
if (x <= -2.8e-233) {
tmp = (1.0 + exp(-fma(x, eps, x))) * 0.5;
} else if (x <= 3.1e+26) {
tmp = (exp((x * eps)) - (-1.0 / fma((eps - -1.0), x, 1.0))) * 0.5;
} else {
tmp = (t_0 + t_0) * 0.5;
}
return tmp;
}
function code(x, eps) t_0 = exp(Float64(-x)) tmp = 0.0 if (x <= -2.8e-233) tmp = Float64(Float64(1.0 + exp(Float64(-fma(x, eps, x)))) * 0.5); elseif (x <= 3.1e+26) tmp = Float64(Float64(exp(Float64(x * eps)) - Float64(-1.0 / fma(Float64(eps - -1.0), x, 1.0))) * 0.5); else tmp = Float64(Float64(t_0 + t_0) * 0.5); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[x, -2.8e-233], N[(N[(1.0 + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 3.1e+26], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - N[(-1.0 / N[(N[(eps - -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(t$95$0 + t$95$0), $MachinePrecision] * 0.5), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;x \leq -2.8 \cdot 10^{-233}:\\
\;\;\;\;\left(1 + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 3.1 \cdot 10^{+26}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - \frac{-1}{\mathsf{fma}\left(\varepsilon - -1, x, 1\right)}\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\left(t\_0 + t\_0\right) \cdot 0.5\\
\end{array}
\end{array}
if x < -2.8000000000000001e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutative68.7
distribute-rgt-neg-in68.7
Applied rewrites68.7%
if -2.8000000000000001e-233 < x < 3.1e26Initial program 53.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
lift-exp.f64N/A
lift-neg.f64N/A
lift-fma.f64N/A
rec-expN/A
lift-fma.f64N/A
lift-exp.f64N/A
lift-/.f6499.7
Applied rewrites99.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6496.5
Applied rewrites96.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6481.8
Applied rewrites81.8%
if 3.1e26 < x Initial program 100.0%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in eps around 0
mul-1-negN/A
lower-+.f64N/A
lower-exp.f64N/A
lift-neg.f64N/A
lower-exp.f64N/A
lift-neg.f6466.2
Applied rewrites66.2%
Final simplification72.5%
(FPCore (x eps) :precision binary64 (* (+ (exp (* x (+ -1.0 eps))) (exp (- (fma x eps x)))) 0.5))
double code(double x, double eps) {
return (exp((x * (-1.0 + eps))) + exp(-fma(x, eps, x))) * 0.5;
}
function code(x, eps) return Float64(Float64(exp(Float64(x * Float64(-1.0 + eps))) + exp(Float64(-fma(x, eps, x)))) * 0.5) end
code[x_, eps_] := N[(N[(N[Exp[N[(x * N[(-1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(e^{x \cdot \left(-1 + \varepsilon\right)} + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5
\end{array}
Initial program 73.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(if (<= x -2.8e-233)
(* (+ 1.0 (exp (- (fma x eps x)))) 0.5)
(if (<= x 3.1e+26)
(* (- (exp (* x eps)) (/ -1.0 (fma (- eps -1.0) x 1.0))) 0.5)
(/ (- (- (pow eps -1.0) -1.0) (* (- (/ 1.0 eps) 1.0) 1.0)) 2.0))))
double code(double x, double eps) {
double tmp;
if (x <= -2.8e-233) {
tmp = (1.0 + exp(-fma(x, eps, x))) * 0.5;
} else if (x <= 3.1e+26) {
tmp = (exp((x * eps)) - (-1.0 / fma((eps - -1.0), x, 1.0))) * 0.5;
} else {
tmp = ((pow(eps, -1.0) - -1.0) - (((1.0 / eps) - 1.0) * 1.0)) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -2.8e-233) tmp = Float64(Float64(1.0 + exp(Float64(-fma(x, eps, x)))) * 0.5); elseif (x <= 3.1e+26) tmp = Float64(Float64(exp(Float64(x * eps)) - Float64(-1.0 / fma(Float64(eps - -1.0), x, 1.0))) * 0.5); else tmp = Float64(Float64(Float64((eps ^ -1.0) - -1.0) - Float64(Float64(Float64(1.0 / eps) - 1.0) * 1.0)) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -2.8e-233], N[(N[(1.0 + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 3.1e+26], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - N[(-1.0 / N[(N[(eps - -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[Power[eps, -1.0], $MachinePrecision] - -1.0), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * 1.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.8 \cdot 10^{-233}:\\
\;\;\;\;\left(1 + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 3.1 \cdot 10^{+26}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - \frac{-1}{\mathsf{fma}\left(\varepsilon - -1, x, 1\right)}\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left({\varepsilon}^{-1} - -1\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot 1}{2}\\
\end{array}
\end{array}
if x < -2.8000000000000001e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutative68.7
distribute-rgt-neg-in68.7
Applied rewrites68.7%
if -2.8000000000000001e-233 < x < 3.1e26Initial program 53.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
lift-exp.f64N/A
lift-neg.f64N/A
lift-fma.f64N/A
rec-expN/A
lift-fma.f64N/A
lift-exp.f64N/A
lift-/.f6499.7
Applied rewrites99.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6496.5
Applied rewrites96.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6481.8
Applied rewrites81.8%
if 3.1e26 < x Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites24.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f64N/A
inv-powN/A
lower-pow.f6462.4
Applied rewrites62.4%
Final simplification71.6%
(FPCore (x eps)
:precision binary64
(if (<= x -2.8e-233)
(* (+ 1.0 (exp (- (fma x eps x)))) 0.5)
(if (<= x 2.25e+17)
(* (- (exp (* x eps)) (/ -1.0 (fma (- eps -1.0) x 1.0))) 0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0))))
double code(double x, double eps) {
double tmp;
if (x <= -2.8e-233) {
tmp = (1.0 + exp(-fma(x, eps, x))) * 0.5;
} else if (x <= 2.25e+17) {
tmp = (exp((x * eps)) - (-1.0 / fma((eps - -1.0), x, 1.0))) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -2.8e-233) tmp = Float64(Float64(1.0 + exp(Float64(-fma(x, eps, x)))) * 0.5); elseif (x <= 2.25e+17) tmp = Float64(Float64(exp(Float64(x * eps)) - Float64(-1.0 / fma(Float64(eps - -1.0), x, 1.0))) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -2.8e-233], N[(N[(1.0 + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 2.25e+17], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - N[(-1.0 / N[(N[(eps - -1.0), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.8 \cdot 10^{-233}:\\
\;\;\;\;\left(1 + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 2.25 \cdot 10^{+17}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - \frac{-1}{\mathsf{fma}\left(\varepsilon - -1, x, 1\right)}\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -2.8000000000000001e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutative68.7
distribute-rgt-neg-in68.7
Applied rewrites68.7%
if -2.8000000000000001e-233 < x < 2.25e17Initial program 51.5%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
lift-exp.f64N/A
lift-neg.f64N/A
lift-fma.f64N/A
rec-expN/A
lift-fma.f64N/A
lift-exp.f64N/A
lift-/.f6499.7
Applied rewrites99.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6484.4
Applied rewrites84.4%
if 2.25e17 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
Final simplification66.7%
(FPCore (x eps)
:precision binary64
(if (<= x -2.8e-233)
(* (+ 1.0 (exp (- (fma x eps x)))) 0.5)
(if (<= x 1.95e+17)
(* (- (exp (* x eps)) (- (fma x eps x) 1.0)) 0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0))))
double code(double x, double eps) {
double tmp;
if (x <= -2.8e-233) {
tmp = (1.0 + exp(-fma(x, eps, x))) * 0.5;
} else if (x <= 1.95e+17) {
tmp = (exp((x * eps)) - (fma(x, eps, x) - 1.0)) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -2.8e-233) tmp = Float64(Float64(1.0 + exp(Float64(-fma(x, eps, x)))) * 0.5); elseif (x <= 1.95e+17) tmp = Float64(Float64(exp(Float64(x * eps)) - Float64(fma(x, eps, x) - 1.0)) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -2.8e-233], N[(N[(1.0 + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 1.95e+17], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - N[(N[(x * eps + x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.8 \cdot 10^{-233}:\\
\;\;\;\;\left(1 + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 1.95 \cdot 10^{+17}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - \left(\mathsf{fma}\left(x, \varepsilon, x\right) - 1\right)\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -2.8000000000000001e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutative68.7
distribute-rgt-neg-in68.7
Applied rewrites68.7%
if -2.8000000000000001e-233 < x < 1.95e17Initial program 51.5%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
lift-exp.f64N/A
lift-neg.f64N/A
lift-fma.f64N/A
rec-expN/A
lift-fma.f64N/A
lift-exp.f64N/A
lift-/.f6499.7
Applied rewrites99.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
*-commutativeN/A
+-commutativeN/A
distribute-rgt1-inN/A
*-commutativeN/A
+-commutativeN/A
lower--.f64N/A
lift-fma.f6484.8
Applied rewrites84.8%
if 1.95e17 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
Final simplification66.9%
(FPCore (x eps)
:precision binary64
(if (<= x -0.00036)
(* (- (exp (- x)) -1.0) 0.5)
(if (<= x -3.7e-233)
(* (fma (fma -1.0 (/ (- (* eps eps) 1.0) (- eps 1.0)) -1.0) x 2.0) 0.5)
(if (<= x 2.25e+17)
(* (- (exp (* x eps)) -1.0) 0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0)))))
double code(double x, double eps) {
double tmp;
if (x <= -0.00036) {
tmp = (exp(-x) - -1.0) * 0.5;
} else if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (((eps * eps) - 1.0) / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else if (x <= 2.25e+17) {
tmp = (exp((x * eps)) - -1.0) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -0.00036) tmp = Float64(Float64(exp(Float64(-x)) - -1.0) * 0.5); elseif (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(Float64(Float64(eps * eps) - 1.0) / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); elseif (x <= 2.25e+17) tmp = Float64(Float64(exp(Float64(x * eps)) - -1.0) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -0.00036], N[(N[(N[Exp[(-x)], $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision] / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 2.25e+17], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00036:\\
\;\;\;\;\left(e^{-x} - -1\right) \cdot 0.5\\
\mathbf{elif}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{\varepsilon \cdot \varepsilon - 1}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 2.25 \cdot 10^{+17}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - -1\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -3.60000000000000023e-4Initial program 97.8%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites63.4%
Taylor expanded in eps around 0
mul-1-negN/A
lift-neg.f6496.4
Applied rewrites96.4%
if -3.60000000000000023e-4 < x < -3.6999999999999998e-233Initial program 55.8%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6477.9
Applied rewrites77.9%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6487.9
Applied rewrites87.9%
Taylor expanded in eps around 0
Applied rewrites87.9%
if -3.6999999999999998e-233 < x < 2.25e17Initial program 51.5%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites83.8%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6484.0
Applied rewrites84.0%
if 2.25e17 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
(FPCore (x eps)
:precision binary64
(if (<= x -5e-260)
(* (+ 1.0 (exp (- (fma x eps x)))) 0.5)
(if (<= x 2.25e+17)
(* (- (exp (* x eps)) -1.0) 0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0))))
double code(double x, double eps) {
double tmp;
if (x <= -5e-260) {
tmp = (1.0 + exp(-fma(x, eps, x))) * 0.5;
} else if (x <= 2.25e+17) {
tmp = (exp((x * eps)) - -1.0) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -5e-260) tmp = Float64(Float64(1.0 + exp(Float64(-fma(x, eps, x)))) * 0.5); elseif (x <= 2.25e+17) tmp = Float64(Float64(exp(Float64(x * eps)) - -1.0) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -5e-260], N[(N[(1.0 + N[Exp[(-N[(x * eps + x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 2.25e+17], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -5 \cdot 10^{-260}:\\
\;\;\;\;\left(1 + e^{-\mathsf{fma}\left(x, \varepsilon, x\right)}\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 2.25 \cdot 10^{+17}:\\
\;\;\;\;\left(e^{x \cdot \varepsilon} - -1\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -5.0000000000000003e-260Initial program 71.2%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutative71.3
distribute-rgt-neg-in71.3
Applied rewrites71.3%
if -5.0000000000000003e-260 < x < 2.25e17Initial program 52.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites81.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f6482.2
Applied rewrites82.2%
if 2.25e17 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
Final simplification66.6%
(FPCore (x eps)
:precision binary64
(if (<= x -0.00036)
(* (- (exp (- x)) -1.0) 0.5)
(if (<= x -3.7e-233)
(* (fma (fma -1.0 (/ (- (* eps eps) 1.0) (- eps 1.0)) -1.0) x 2.0) 0.5)
(if (<= x 5e-222)
1.0
(if (<= x 3.55e+16)
(*
(fma
(fma -1.0 (- eps -1.0) (/ (+ -1.0 (* eps eps)) (- eps -1.0)))
x
2.0)
0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0))))))
double code(double x, double eps) {
double tmp;
if (x <= -0.00036) {
tmp = (exp(-x) - -1.0) * 0.5;
} else if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (((eps * eps) - 1.0) / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else if (x <= 5e-222) {
tmp = 1.0;
} else if (x <= 3.55e+16) {
tmp = fma(fma(-1.0, (eps - -1.0), ((-1.0 + (eps * eps)) / (eps - -1.0))), x, 2.0) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -0.00036) tmp = Float64(Float64(exp(Float64(-x)) - -1.0) * 0.5); elseif (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(Float64(Float64(eps * eps) - 1.0) / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); elseif (x <= 5e-222) tmp = 1.0; elseif (x <= 3.55e+16) tmp = Float64(fma(fma(-1.0, Float64(eps - -1.0), Float64(Float64(-1.0 + Float64(eps * eps)) / Float64(eps - -1.0))), x, 2.0) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -0.00036], N[(N[(N[Exp[(-x)], $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision] / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 5e-222], 1.0, If[LessEqual[x, 3.55e+16], N[(N[(N[(-1.0 * N[(eps - -1.0), $MachinePrecision] + N[(N[(-1.0 + N[(eps * eps), $MachinePrecision]), $MachinePrecision] / N[(eps - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00036:\\
\;\;\;\;\left(e^{-x} - -1\right) \cdot 0.5\\
\mathbf{elif}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{\varepsilon \cdot \varepsilon - 1}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 5 \cdot 10^{-222}:\\
\;\;\;\;1\\
\mathbf{elif}\;x \leq 3.55 \cdot 10^{+16}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \varepsilon - -1, \frac{-1 + \varepsilon \cdot \varepsilon}{\varepsilon - -1}\right), x, 2\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -3.60000000000000023e-4Initial program 97.8%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites63.4%
Taylor expanded in eps around 0
mul-1-negN/A
lift-neg.f6496.4
Applied rewrites96.4%
if -3.60000000000000023e-4 < x < -3.6999999999999998e-233Initial program 55.8%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6477.9
Applied rewrites77.9%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6487.9
Applied rewrites87.9%
Taylor expanded in eps around 0
Applied rewrites87.9%
if -3.6999999999999998e-233 < x < 5.00000000000000008e-222Initial program 46.7%
Taylor expanded in x around 0
Applied rewrites95.6%
if 5.00000000000000008e-222 < x < 3.55e16Initial program 55.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6463.5
Applied rewrites63.5%
lift--.f64N/A
flip--N/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lift-+.f6473.4
Applied rewrites73.4%
if 3.55e16 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
Final simplification75.6%
(FPCore (x eps)
:precision binary64
(if (<= x -3.7e-233)
(* (fma (fma -1.0 (/ (- (* eps eps) 1.0) (- eps 1.0)) -1.0) x 2.0) 0.5)
(if (<= x 5e-222)
1.0
(if (<= x 3.55e+16)
(*
(fma
(fma -1.0 (- eps -1.0) (/ (+ -1.0 (* eps eps)) (- eps -1.0)))
x
2.0)
0.5)
(/
(-
(* (+ 1.0 (/ 1.0 eps)) (* (- eps 1.0) x))
(* (- (/ 1.0 eps) 1.0) (fma -1.0 x 1.0)))
2.0)))))
double code(double x, double eps) {
double tmp;
if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (((eps * eps) - 1.0) / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else if (x <= 5e-222) {
tmp = 1.0;
} else if (x <= 3.55e+16) {
tmp = fma(fma(-1.0, (eps - -1.0), ((-1.0 + (eps * eps)) / (eps - -1.0))), x, 2.0) * 0.5;
} else {
tmp = (((1.0 + (1.0 / eps)) * ((eps - 1.0) * x)) - (((1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(Float64(Float64(eps * eps) - 1.0) / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); elseif (x <= 5e-222) tmp = 1.0; elseif (x <= 3.55e+16) tmp = Float64(fma(fma(-1.0, Float64(eps - -1.0), Float64(Float64(-1.0 + Float64(eps * eps)) / Float64(eps - -1.0))), x, 2.0) * 0.5); else tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(Float64(eps - 1.0) * x)) - Float64(Float64(Float64(1.0 / eps) - 1.0) * fma(-1.0, x, 1.0))) / 2.0); end return tmp end
code[x_, eps_] := If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision] / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 5e-222], 1.0, If[LessEqual[x, 3.55e+16], N[(N[(N[(-1.0 * N[(eps - -1.0), $MachinePrecision] + N[(N[(-1.0 + N[(eps * eps), $MachinePrecision]), $MachinePrecision] / N[(eps - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(N[(eps - 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[(-1.0 * x + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{\varepsilon \cdot \varepsilon - 1}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 5 \cdot 10^{-222}:\\
\;\;\;\;1\\
\mathbf{elif}\;x \leq 3.55 \cdot 10^{+16}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \varepsilon - -1, \frac{-1 + \varepsilon \cdot \varepsilon}{\varepsilon - -1}\right), x, 2\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\left(\varepsilon - 1\right) \cdot x\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot \mathsf{fma}\left(-1, x, 1\right)}{2}\\
\end{array}
\end{array}
if x < -3.6999999999999998e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6445.7
Applied rewrites45.7%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6459.5
Applied rewrites59.5%
Taylor expanded in eps around 0
Applied rewrites61.1%
if -3.6999999999999998e-233 < x < 5.00000000000000008e-222Initial program 46.7%
Taylor expanded in x around 0
Applied rewrites95.6%
if 5.00000000000000008e-222 < x < 3.55e16Initial program 55.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6463.5
Applied rewrites63.5%
lift--.f64N/A
flip--N/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lift-+.f6473.4
Applied rewrites73.4%
if 3.55e16 < x Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f6418.3
Applied rewrites18.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6429.8
Applied rewrites29.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
lift--.f6429.8
Applied rewrites29.8%
Taylor expanded in eps around 0
Applied rewrites43.0%
Final simplification63.6%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (* eps eps) 1.0)))
(if (<= x -3.7e-233)
(* (fma (fma -1.0 (/ t_0 (- eps 1.0)) -1.0) x 2.0) 0.5)
(if (<= x 5e-222)
1.0
(if (<= x 4.6e-66)
(*
(fma
(fma -1.0 (- eps -1.0) (/ (+ -1.0 (* eps eps)) (- eps -1.0)))
x
2.0)
0.5)
(* (fma (fma -1.0 (/ t_0 -1.0) (+ -1.0 eps)) x 2.0) 0.5))))))
double code(double x, double eps) {
double t_0 = (eps * eps) - 1.0;
double tmp;
if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (t_0 / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else if (x <= 5e-222) {
tmp = 1.0;
} else if (x <= 4.6e-66) {
tmp = fma(fma(-1.0, (eps - -1.0), ((-1.0 + (eps * eps)) / (eps - -1.0))), x, 2.0) * 0.5;
} else {
tmp = fma(fma(-1.0, (t_0 / -1.0), (-1.0 + eps)), x, 2.0) * 0.5;
}
return tmp;
}
function code(x, eps) t_0 = Float64(Float64(eps * eps) - 1.0) tmp = 0.0 if (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(t_0 / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); elseif (x <= 5e-222) tmp = 1.0; elseif (x <= 4.6e-66) tmp = Float64(fma(fma(-1.0, Float64(eps - -1.0), Float64(Float64(-1.0 + Float64(eps * eps)) / Float64(eps - -1.0))), x, 2.0) * 0.5); else tmp = Float64(fma(fma(-1.0, Float64(t_0 / -1.0), Float64(-1.0 + eps)), x, 2.0) * 0.5); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision]}, If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(t$95$0 / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 5e-222], 1.0, If[LessEqual[x, 4.6e-66], N[(N[(N[(-1.0 * N[(eps - -1.0), $MachinePrecision] + N[(N[(-1.0 + N[(eps * eps), $MachinePrecision]), $MachinePrecision] / N[(eps - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[(-1.0 * N[(t$95$0 / -1.0), $MachinePrecision] + N[(-1.0 + eps), $MachinePrecision]), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \varepsilon \cdot \varepsilon - 1\\
\mathbf{if}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{t\_0}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 5 \cdot 10^{-222}:\\
\;\;\;\;1\\
\mathbf{elif}\;x \leq 4.6 \cdot 10^{-66}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \varepsilon - -1, \frac{-1 + \varepsilon \cdot \varepsilon}{\varepsilon - -1}\right), x, 2\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{t\_0}{-1}, -1 + \varepsilon\right), x, 2\right) \cdot 0.5\\
\end{array}
\end{array}
if x < -3.6999999999999998e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6445.7
Applied rewrites45.7%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6459.5
Applied rewrites59.5%
Taylor expanded in eps around 0
Applied rewrites61.1%
if -3.6999999999999998e-233 < x < 5.00000000000000008e-222Initial program 46.7%
Taylor expanded in x around 0
Applied rewrites95.6%
if 5.00000000000000008e-222 < x < 4.59999999999999984e-66Initial program 58.4%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.9%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6471.2
Applied rewrites71.2%
lift--.f64N/A
flip--N/A
lower-/.f64N/A
metadata-evalN/A
unpow2N/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lift-+.f6480.1
Applied rewrites80.1%
if 4.59999999999999984e-66 < x Initial program 91.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6411.3
Applied rewrites11.3%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6419.7
Applied rewrites19.7%
Taylor expanded in eps around 0
Applied rewrites40.4%
Final simplification61.5%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- (* eps eps) 1.0)))
(if (<= x -3.7e-233)
(* (fma (fma -1.0 (/ t_0 (- eps 1.0)) -1.0) x 2.0) 0.5)
(if (<= x 2.4e-183)
1.0
(* (fma (fma -1.0 (/ t_0 -1.0) (+ -1.0 eps)) x 2.0) 0.5)))))
double code(double x, double eps) {
double t_0 = (eps * eps) - 1.0;
double tmp;
if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (t_0 / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else if (x <= 2.4e-183) {
tmp = 1.0;
} else {
tmp = fma(fma(-1.0, (t_0 / -1.0), (-1.0 + eps)), x, 2.0) * 0.5;
}
return tmp;
}
function code(x, eps) t_0 = Float64(Float64(eps * eps) - 1.0) tmp = 0.0 if (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(t_0 / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); elseif (x <= 2.4e-183) tmp = 1.0; else tmp = Float64(fma(fma(-1.0, Float64(t_0 / -1.0), Float64(-1.0 + eps)), x, 2.0) * 0.5); end return tmp end
code[x_, eps_] := Block[{t$95$0 = N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision]}, If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(t$95$0 / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], If[LessEqual[x, 2.4e-183], 1.0, N[(N[(N[(-1.0 * N[(t$95$0 / -1.0), $MachinePrecision] + N[(-1.0 + eps), $MachinePrecision]), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \varepsilon \cdot \varepsilon - 1\\
\mathbf{if}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{t\_0}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{elif}\;x \leq 2.4 \cdot 10^{-183}:\\
\;\;\;\;1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{t\_0}{-1}, -1 + \varepsilon\right), x, 2\right) \cdot 0.5\\
\end{array}
\end{array}
if x < -3.6999999999999998e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6445.7
Applied rewrites45.7%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6459.5
Applied rewrites59.5%
Taylor expanded in eps around 0
Applied rewrites61.1%
if -3.6999999999999998e-233 < x < 2.39999999999999993e-183Initial program 50.5%
Taylor expanded in x around 0
Applied rewrites90.7%
if 2.39999999999999993e-183 < x Initial program 83.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.8%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6423.5
Applied rewrites23.5%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6431.1
Applied rewrites31.1%
Taylor expanded in eps around 0
Applied rewrites47.7%
Final simplification60.9%
(FPCore (x eps) :precision binary64 (if (<= x -3.7e-233) (* (fma (fma -1.0 (/ (- (* eps eps) 1.0) (- eps 1.0)) -1.0) x 2.0) 0.5) (* (- (fma (- x) (- 1.0 eps) 1.0) -1.0) 0.5)))
double code(double x, double eps) {
double tmp;
if (x <= -3.7e-233) {
tmp = fma(fma(-1.0, (((eps * eps) - 1.0) / (eps - 1.0)), -1.0), x, 2.0) * 0.5;
} else {
tmp = (fma(-x, (1.0 - eps), 1.0) - -1.0) * 0.5;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -3.7e-233) tmp = Float64(fma(fma(-1.0, Float64(Float64(Float64(eps * eps) - 1.0) / Float64(eps - 1.0)), -1.0), x, 2.0) * 0.5); else tmp = Float64(Float64(fma(Float64(-x), Float64(1.0 - eps), 1.0) - -1.0) * 0.5); end return tmp end
code[x_, eps_] := If[LessEqual[x, -3.7e-233], N[(N[(N[(-1.0 * N[(N[(N[(eps * eps), $MachinePrecision] - 1.0), $MachinePrecision] / N[(eps - 1.0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[((-x) * N[(1.0 - eps), $MachinePrecision] + 1.0), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.7 \cdot 10^{-233}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \frac{\varepsilon \cdot \varepsilon - 1}{\varepsilon - 1}, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\left(\mathsf{fma}\left(-x, 1 - \varepsilon, 1\right) - -1\right) \cdot 0.5\\
\end{array}
\end{array}
if x < -3.6999999999999998e-233Initial program 74.1%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6445.7
Applied rewrites45.7%
lift-+.f64N/A
flip-+N/A
lower-/.f64N/A
unpow2N/A
metadata-evalN/A
lower--.f64N/A
unpow2N/A
lower-*.f64N/A
lift--.f6459.5
Applied rewrites59.5%
Taylor expanded in eps around 0
Applied rewrites61.1%
if -3.6999999999999998e-233 < x Initial program 73.7%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites56.8%
Taylor expanded in x around 0
mul-1-negN/A
distribute-lft-neg-inN/A
+-commutativeN/A
lower-fma.f64N/A
lift-neg.f64N/A
lift--.f6449.6
Applied rewrites49.6%
Final simplification54.1%
(FPCore (x eps) :precision binary64 (if (<= x -5e-260) (* (fma (fma -1.0 (- eps -1.0) -1.0) x 2.0) 0.5) (* (- (fma (- x) (- 1.0 eps) 1.0) -1.0) 0.5)))
double code(double x, double eps) {
double tmp;
if (x <= -5e-260) {
tmp = fma(fma(-1.0, (eps - -1.0), -1.0), x, 2.0) * 0.5;
} else {
tmp = (fma(-x, (1.0 - eps), 1.0) - -1.0) * 0.5;
}
return tmp;
}
function code(x, eps) tmp = 0.0 if (x <= -5e-260) tmp = Float64(fma(fma(-1.0, Float64(eps - -1.0), -1.0), x, 2.0) * 0.5); else tmp = Float64(Float64(fma(Float64(-x), Float64(1.0 - eps), 1.0) - -1.0) * 0.5); end return tmp end
code[x_, eps_] := If[LessEqual[x, -5e-260], N[(N[(N[(-1.0 * N[(eps - -1.0), $MachinePrecision] + -1.0), $MachinePrecision] * x + 2.0), $MachinePrecision] * 0.5), $MachinePrecision], N[(N[(N[((-x) * N[(1.0 - eps), $MachinePrecision] + 1.0), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -5 \cdot 10^{-260}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, \varepsilon - -1, -1\right), x, 2\right) \cdot 0.5\\
\mathbf{else}:\\
\;\;\;\;\left(\mathsf{fma}\left(-x, 1 - \varepsilon, 1\right) - -1\right) \cdot 0.5\\
\end{array}
\end{array}
if x < -5.0000000000000003e-260Initial program 71.2%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lift--.f6450.6
Applied rewrites50.6%
Taylor expanded in eps around 0
Applied rewrites58.4%
if -5.0000000000000003e-260 < x Initial program 75.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites54.0%
Taylor expanded in x around 0
mul-1-negN/A
distribute-lft-neg-inN/A
+-commutativeN/A
lower-fma.f64N/A
lift-neg.f64N/A
lift--.f6446.3
Applied rewrites46.3%
Final simplification51.5%
(FPCore (x eps) :precision binary64 (* (- (fma (- x) (- 1.0 eps) 1.0) -1.0) 0.5))
double code(double x, double eps) {
return (fma(-x, (1.0 - eps), 1.0) - -1.0) * 0.5;
}
function code(x, eps) return Float64(Float64(fma(Float64(-x), Float64(1.0 - eps), 1.0) - -1.0) * 0.5) end
code[x_, eps_] := N[(N[(N[((-x) * N[(1.0 - eps), $MachinePrecision] + 1.0), $MachinePrecision] - -1.0), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(-x, 1 - \varepsilon, 1\right) - -1\right) \cdot 0.5
\end{array}
Initial program 73.9%
Taylor expanded in eps around inf
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites64.5%
Taylor expanded in x around 0
mul-1-negN/A
distribute-lft-neg-inN/A
+-commutativeN/A
lower-fma.f64N/A
lift-neg.f64N/A
lift--.f6454.4
Applied rewrites54.4%
Final simplification54.4%
(FPCore (x eps) :precision binary64 1.0)
double code(double x, double eps) {
return 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 1.0d0
end function
public static double code(double x, double eps) {
return 1.0;
}
def code(x, eps): return 1.0
function code(x, eps) return 1.0 end
function tmp = code(x, eps) tmp = 1.0; end
code[x_, eps_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 73.9%
Taylor expanded in x around 0
Applied rewrites44.9%
herbie shell --seed 2025057
(FPCore (x eps)
:name "NMSE Section 6.1 mentioned, A"
:precision binary64
(/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))