NMSE Section 6.1 mentioned, A

Percentage Accurate: 74.3% → 99.9%
Time: 17.1s
Alternatives: 10
Speedup: 2.1×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 74.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 99.9% accurate, 1.0× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;\varepsilon \leq 10^{-5}:\\ \;\;\;\;\frac{t_0 \cdot \left(\left(x + 1\right) - -1\right) + x \cdot t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= eps 1e-5)
     (/ (+ (* t_0 (- (+ x 1.0) -1.0)) (* x t_0)) 2.0)
     (/ (+ (exp (* x (+ eps -1.0))) (exp (* eps (- x)))) 2.0))))
eps = abs(eps);
double code(double x, double eps) {
	double t_0 = exp(-x);
	double tmp;
	if (eps <= 1e-5) {
		tmp = ((t_0 * ((x + 1.0) - -1.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (exp((x * (eps + -1.0))) + exp((eps * -x))) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (eps <= 1d-5) then
        tmp = ((t_0 * ((x + 1.0d0) - (-1.0d0))) + (x * t_0)) / 2.0d0
    else
        tmp = (exp((x * (eps + (-1.0d0)))) + exp((eps * -x))) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (eps <= 1e-5) {
		tmp = ((t_0 * ((x + 1.0) - -1.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (Math.exp((x * (eps + -1.0))) + Math.exp((eps * -x))) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	t_0 = math.exp(-x)
	tmp = 0
	if eps <= 1e-5:
		tmp = ((t_0 * ((x + 1.0) - -1.0)) + (x * t_0)) / 2.0
	else:
		tmp = (math.exp((x * (eps + -1.0))) + math.exp((eps * -x))) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (eps <= 1e-5)
		tmp = Float64(Float64(Float64(t_0 * Float64(Float64(x + 1.0) - -1.0)) + Float64(x * t_0)) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(eps + -1.0))) + exp(Float64(eps * Float64(-x)))) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	t_0 = exp(-x);
	tmp = 0.0;
	if (eps <= 1e-5)
		tmp = ((t_0 * ((x + 1.0) - -1.0)) + (x * t_0)) / 2.0;
	else
		tmp = (exp((x * (eps + -1.0))) + exp((eps * -x))) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps, 1e-5], N[(N[(N[(t$95$0 * N[(N[(x + 1.0), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision] + N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(eps * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;\varepsilon \leq 10^{-5}:\\
\;\;\;\;\frac{t_0 \cdot \left(\left(x + 1\right) - -1\right) + x \cdot t_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{\varepsilon \cdot \left(-x\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1.00000000000000008e-5

    1. Initial program 63.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. sub-neg63.9%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. neg-sub063.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      3. associate-+r-63.9%

        \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
    3. Simplified63.9%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around 0 68.4%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Step-by-step derivation
      1. associate--r+68.4%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
      2. associate-*r*68.4%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
      3. neg-mul-168.4%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
      4. cancel-sign-sub68.4%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
      5. distribute-rgt1-in68.4%

        \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
      6. distribute-rgt-out--68.4%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
      7. neg-mul-168.4%

        \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
      8. neg-mul-168.4%

        \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
    6. Simplified68.4%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]

    if 1.00000000000000008e-5 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. Simplified66.6%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(e^{\varepsilon + -1}\right)}^{x} - \left(\frac{1}{\varepsilon} + -1\right) \cdot {\left(e^{-x}\right)}^{\left(1 + \varepsilon\right)}}{2}} \]
      2. Taylor expanded in eps around inf 100.0%

        \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
      3. Taylor expanded in eps around inf 100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(\varepsilon \cdot x\right)}}}{2} \]
      4. Step-by-step derivation
        1. *-commutative100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
      5. Simplified100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification77.6%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 10^{-5}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \end{array} \]

    Alternative 2: 90.9% accurate, 2.0× speedup?

    \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq -2.2 \cdot 10^{-273}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 6.8 \cdot 10^{+79}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \end{array} \]
    NOTE: eps should be positive before calling this function
    (FPCore (x eps)
     :precision binary64
     (if (<= x -2.2e-273)
       (/ (+ 1.0 (exp (* eps (- x)))) 2.0)
       (if (<= x 6.8e+79)
         (/ (+ 1.0 (exp (- (* eps x) x))) 2.0)
         (/
          (*
           x
           (/
            (+ -1.0 (* (- 1.0 eps) (- 1.0 eps)))
            (+ (+ eps -1.0) (/ (+ eps -1.0) eps))))
          2.0))))
    eps = abs(eps);
    double code(double x, double eps) {
    	double tmp;
    	if (x <= -2.2e-273) {
    		tmp = (1.0 + exp((eps * -x))) / 2.0;
    	} else if (x <= 6.8e+79) {
    		tmp = (1.0 + exp(((eps * x) - x))) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    NOTE: eps should be positive before calling this function
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        real(8) :: tmp
        if (x <= (-2.2d-273)) then
            tmp = (1.0d0 + exp((eps * -x))) / 2.0d0
        else if (x <= 6.8d+79) then
            tmp = (1.0d0 + exp(((eps * x) - x))) / 2.0d0
        else
            tmp = (x * (((-1.0d0) + ((1.0d0 - eps) * (1.0d0 - eps))) / ((eps + (-1.0d0)) + ((eps + (-1.0d0)) / eps)))) / 2.0d0
        end if
        code = tmp
    end function
    
    eps = Math.abs(eps);
    public static double code(double x, double eps) {
    	double tmp;
    	if (x <= -2.2e-273) {
    		tmp = (1.0 + Math.exp((eps * -x))) / 2.0;
    	} else if (x <= 6.8e+79) {
    		tmp = (1.0 + Math.exp(((eps * x) - x))) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    eps = abs(eps)
    def code(x, eps):
    	tmp = 0
    	if x <= -2.2e-273:
    		tmp = (1.0 + math.exp((eps * -x))) / 2.0
    	elif x <= 6.8e+79:
    		tmp = (1.0 + math.exp(((eps * x) - x))) / 2.0
    	else:
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0
    	return tmp
    
    eps = abs(eps)
    function code(x, eps)
    	tmp = 0.0
    	if (x <= -2.2e-273)
    		tmp = Float64(Float64(1.0 + exp(Float64(eps * Float64(-x)))) / 2.0);
    	elseif (x <= 6.8e+79)
    		tmp = Float64(Float64(1.0 + exp(Float64(Float64(eps * x) - x))) / 2.0);
    	else
    		tmp = Float64(Float64(x * Float64(Float64(-1.0 + Float64(Float64(1.0 - eps) * Float64(1.0 - eps))) / Float64(Float64(eps + -1.0) + Float64(Float64(eps + -1.0) / eps)))) / 2.0);
    	end
    	return tmp
    end
    
    eps = abs(eps)
    function tmp_2 = code(x, eps)
    	tmp = 0.0;
    	if (x <= -2.2e-273)
    		tmp = (1.0 + exp((eps * -x))) / 2.0;
    	elseif (x <= 6.8e+79)
    		tmp = (1.0 + exp(((eps * x) - x))) / 2.0;
    	else
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: eps should be positive before calling this function
    code[x_, eps_] := If[LessEqual[x, -2.2e-273], N[(N[(1.0 + N[Exp[N[(eps * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 6.8e+79], N[(N[(1.0 + N[Exp[N[(N[(eps * x), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(N[(-1.0 + N[(N[(1.0 - eps), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(eps + -1.0), $MachinePrecision] + N[(N[(eps + -1.0), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
    
    \begin{array}{l}
    eps = |eps|\\
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq -2.2 \cdot 10^{-273}:\\
    \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\
    
    \mathbf{elif}\;x \leq 6.8 \cdot 10^{+79}:\\
    \;\;\;\;\frac{1 + e^{\varepsilon \cdot x - x}}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if x < -2.1999999999999998e-273

      1. Initial program 78.9%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. sub-neg78.9%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        2. neg-sub078.9%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        3. associate-+r-78.9%

          \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      3. Simplified78.9%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in x around 0 43.6%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      5. Step-by-step derivation
        1. metadata-eval43.6%

          \[\leadsto \frac{\left(1 + \frac{\color{blue}{--1}}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        2. distribute-neg-frac43.6%

          \[\leadsto \frac{\left(1 + \color{blue}{\left(-\frac{-1}{\varepsilon}\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        3. sub-neg43.6%

          \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      6. Simplified43.6%

        \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      7. Taylor expanded in eps around inf 62.2%

        \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
      8. Step-by-step derivation
        1. cancel-sign-sub-inv62.2%

          \[\leadsto \frac{\color{blue}{1 + \left(--1\right) \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        2. associate-*r*62.2%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}}{2} \]
        3. exp-prod50.9%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{{\left(e^{-1 \cdot x}\right)}^{\left(1 + \varepsilon\right)}}}{2} \]
        4. remove-double-neg50.9%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right)}}{2} \]
        5. neg-mul-150.9%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right)}}{2} \]
        6. sub-neg50.9%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\color{blue}{\left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        7. exp-prod62.2%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{e^{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        8. associate-*r*62.2%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        9. remove-double-neg62.2%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{\left(-\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)\right)}}{2} \]
        10. mul-1-neg62.2%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \left(-\color{blue}{-1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        11. metadata-eval62.2%

          \[\leadsto \frac{1 + \color{blue}{1} \cdot \left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}{2} \]
        12. *-lft-identity62.2%

          \[\leadsto \frac{1 + \color{blue}{\left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}}{2} \]
        13. mul-1-neg62.2%

          \[\leadsto \frac{1 + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}\right)}{2} \]
        14. remove-double-neg62.2%

          \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        15. associate-*r*62.2%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        16. neg-mul-162.2%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 - -1 \cdot \varepsilon\right)}}{2} \]
      9. Simplified62.2%

        \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(\varepsilon + 1\right)}}}{2} \]
      10. Taylor expanded in eps around inf 62.5%

        \[\leadsto \frac{1 + e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
      11. Step-by-step derivation
        1. associate-*r*62.5%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
        2. mul-1-neg62.5%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      12. Simplified62.5%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}}{2} \]

      if -2.1999999999999998e-273 < x < 6.80000000000000063e79

      1. Initial program 56.3%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. sub-neg56.3%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        2. neg-sub056.3%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        3. associate-+r-56.3%

          \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      3. Simplified56.3%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in x around 0 34.9%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      5. Step-by-step derivation
        1. metadata-eval34.9%

          \[\leadsto \frac{\left(1 + \frac{\color{blue}{--1}}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        2. distribute-neg-frac34.9%

          \[\leadsto \frac{\left(1 + \color{blue}{\left(-\frac{-1}{\varepsilon}\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        3. sub-neg34.9%

          \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      6. Simplified34.9%

        \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      7. Taylor expanded in eps around inf 77.3%

        \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
      8. Step-by-step derivation
        1. cancel-sign-sub-inv77.3%

          \[\leadsto \frac{\color{blue}{1 + \left(--1\right) \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        2. associate-*r*77.3%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}}{2} \]
        3. exp-prod73.7%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{{\left(e^{-1 \cdot x}\right)}^{\left(1 + \varepsilon\right)}}}{2} \]
        4. remove-double-neg73.7%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right)}}{2} \]
        5. neg-mul-173.7%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right)}}{2} \]
        6. sub-neg73.7%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\color{blue}{\left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        7. exp-prod77.3%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{e^{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        8. associate-*r*77.3%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        9. remove-double-neg77.3%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{\left(-\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)\right)}}{2} \]
        10. mul-1-neg77.3%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \left(-\color{blue}{-1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        11. metadata-eval77.3%

          \[\leadsto \frac{1 + \color{blue}{1} \cdot \left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}{2} \]
        12. *-lft-identity77.3%

          \[\leadsto \frac{1 + \color{blue}{\left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}}{2} \]
        13. mul-1-neg77.3%

          \[\leadsto \frac{1 + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}\right)}{2} \]
        14. remove-double-neg77.3%

          \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        15. associate-*r*77.3%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        16. neg-mul-177.3%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 - -1 \cdot \varepsilon\right)}}{2} \]
      9. Simplified77.3%

        \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(\varepsilon + 1\right)}}}{2} \]
      10. Step-by-step derivation
        1. distribute-rgt-in77.3%

          \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot \left(-x\right) + 1 \cdot \left(-x\right)}}}{2} \]
        2. *-un-lft-identity77.3%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \left(-x\right) + \color{blue}{\left(-x\right)}}}{2} \]
        3. unsub-neg77.3%

          \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot \left(-x\right) - x}}}{2} \]
        4. add-sqr-sqrt10.5%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} - x}}{2} \]
        5. sqrt-unprod78.3%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} - x}}{2} \]
        6. sqr-neg78.3%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \sqrt{\color{blue}{x \cdot x}} - x}}{2} \]
        7. sqrt-unprod68.7%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} - x}}{2} \]
        8. add-sqr-sqrt79.1%

          \[\leadsto \frac{1 + e^{\varepsilon \cdot \color{blue}{x} - x}}{2} \]
      11. Applied egg-rr79.1%

        \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
      12. Step-by-step derivation
        1. *-commutative79.1%

          \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon} - x}}{2} \]
      13. Simplified79.1%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon - x}}}{2} \]

      if 6.80000000000000063e79 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. sub-neg100.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        2. neg-sub0100.0%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        3. associate-+r-100.0%

          \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      3. Simplified100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in x around 0 28.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      5. Taylor expanded in x around inf 18.3%

        \[\leadsto \frac{\color{blue}{-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}}{2} \]
      6. Step-by-step derivation
        1. mul-1-neg18.3%

          \[\leadsto \frac{\color{blue}{-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
        2. distribute-rgt-neg-in18.3%

          \[\leadsto \frac{\color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
        3. *-commutative18.3%

          \[\leadsto \frac{x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)}{2} \]
        4. distribute-rgt-neg-in18.3%

          \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}}{2} \]
        5. mul-1-neg18.3%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)}{2} \]
        6. distribute-lft-in18.3%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot 1 + -1 \cdot \frac{1}{\varepsilon}\right)}\right)}{2} \]
        7. metadata-eval18.3%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + -1 \cdot \frac{1}{\varepsilon}\right)\right)}{2} \]
        8. associate-*r/18.3%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1 \cdot 1}{\varepsilon}}\right)\right)}{2} \]
        9. metadata-eval18.3%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)}{2} \]
      7. Simplified18.3%

        \[\leadsto \frac{\color{blue}{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}}{2} \]
      8. Step-by-step derivation
        1. distribute-lft-in18.3%

          \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot -1 + \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}}{2} \]
        2. flip-+18.3%

          \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right) \cdot \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}{\left(1 - \varepsilon\right) \cdot -1 - \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}}}}{2} \]
      9. Applied egg-rr18.3%

        \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
      10. Step-by-step derivation
        1. *-commutative18.3%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        2. neg-mul-118.3%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        3. *-commutative18.3%

          \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        4. neg-mul-118.3%

          \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        5. sqr-neg18.3%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        6. *-commutative18.3%

          \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{-1 \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        7. neg-mul-118.3%

          \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
      11. Simplified18.3%

        \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
      12. Taylor expanded in eps around inf 71.7%

        \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \color{blue}{1}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
    3. Recombined 3 regimes into one program.
    4. Final simplification71.5%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.2 \cdot 10^{-273}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 6.8 \cdot 10^{+79}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \]

    Alternative 3: 85.1% accurate, 2.1× speedup?

    \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 1.25 \cdot 10^{-5}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \end{array} \]
    NOTE: eps should be positive before calling this function
    (FPCore (x eps)
     :precision binary64
     (if (<= x 1.25e-5)
       (/ (+ 1.0 (exp (* eps (- x)))) 2.0)
       (/
        (*
         x
         (/
          (+ -1.0 (* (- 1.0 eps) (- 1.0 eps)))
          (+ (+ eps -1.0) (/ (+ eps -1.0) eps))))
        2.0)))
    eps = abs(eps);
    double code(double x, double eps) {
    	double tmp;
    	if (x <= 1.25e-5) {
    		tmp = (1.0 + exp((eps * -x))) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    NOTE: eps should be positive before calling this function
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        real(8) :: tmp
        if (x <= 1.25d-5) then
            tmp = (1.0d0 + exp((eps * -x))) / 2.0d0
        else
            tmp = (x * (((-1.0d0) + ((1.0d0 - eps) * (1.0d0 - eps))) / ((eps + (-1.0d0)) + ((eps + (-1.0d0)) / eps)))) / 2.0d0
        end if
        code = tmp
    end function
    
    eps = Math.abs(eps);
    public static double code(double x, double eps) {
    	double tmp;
    	if (x <= 1.25e-5) {
    		tmp = (1.0 + Math.exp((eps * -x))) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    eps = abs(eps)
    def code(x, eps):
    	tmp = 0
    	if x <= 1.25e-5:
    		tmp = (1.0 + math.exp((eps * -x))) / 2.0
    	else:
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0
    	return tmp
    
    eps = abs(eps)
    function code(x, eps)
    	tmp = 0.0
    	if (x <= 1.25e-5)
    		tmp = Float64(Float64(1.0 + exp(Float64(eps * Float64(-x)))) / 2.0);
    	else
    		tmp = Float64(Float64(x * Float64(Float64(-1.0 + Float64(Float64(1.0 - eps) * Float64(1.0 - eps))) / Float64(Float64(eps + -1.0) + Float64(Float64(eps + -1.0) / eps)))) / 2.0);
    	end
    	return tmp
    end
    
    eps = abs(eps)
    function tmp_2 = code(x, eps)
    	tmp = 0.0;
    	if (x <= 1.25e-5)
    		tmp = (1.0 + exp((eps * -x))) / 2.0;
    	else
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: eps should be positive before calling this function
    code[x_, eps_] := If[LessEqual[x, 1.25e-5], N[(N[(1.0 + N[Exp[N[(eps * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(N[(-1.0 + N[(N[(1.0 - eps), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(eps + -1.0), $MachinePrecision] + N[(N[(eps + -1.0), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
    
    \begin{array}{l}
    eps = |eps|\\
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq 1.25 \cdot 10^{-5}:\\
    \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < 1.25000000000000006e-5

      1. Initial program 63.3%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. sub-neg63.3%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        2. neg-sub063.3%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        3. associate-+r-63.3%

          \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      3. Simplified63.3%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in x around 0 38.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      5. Step-by-step derivation
        1. metadata-eval38.5%

          \[\leadsto \frac{\left(1 + \frac{\color{blue}{--1}}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        2. distribute-neg-frac38.5%

          \[\leadsto \frac{\left(1 + \color{blue}{\left(-\frac{-1}{\varepsilon}\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        3. sub-neg38.5%

          \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      6. Simplified38.5%

        \[\leadsto \frac{\color{blue}{\left(1 - \frac{-1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      7. Taylor expanded in eps around inf 73.0%

        \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
      8. Step-by-step derivation
        1. cancel-sign-sub-inv73.0%

          \[\leadsto \frac{\color{blue}{1 + \left(--1\right) \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        2. associate-*r*73.0%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}}{2} \]
        3. exp-prod65.1%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{{\left(e^{-1 \cdot x}\right)}^{\left(1 + \varepsilon\right)}}}{2} \]
        4. remove-double-neg65.1%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right)}}{2} \]
        5. neg-mul-165.1%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right)}}{2} \]
        6. sub-neg65.1%

          \[\leadsto \frac{1 + \left(--1\right) \cdot {\left(e^{-1 \cdot x}\right)}^{\color{blue}{\left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        7. exp-prod73.0%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{e^{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        8. associate-*r*73.0%

          \[\leadsto \frac{1 + \left(--1\right) \cdot e^{\color{blue}{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        9. remove-double-neg73.0%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \color{blue}{\left(-\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)\right)}}{2} \]
        10. mul-1-neg73.0%

          \[\leadsto \frac{1 + \left(--1\right) \cdot \left(-\color{blue}{-1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        11. metadata-eval73.0%

          \[\leadsto \frac{1 + \color{blue}{1} \cdot \left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}{2} \]
        12. *-lft-identity73.0%

          \[\leadsto \frac{1 + \color{blue}{\left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}}{2} \]
        13. mul-1-neg73.0%

          \[\leadsto \frac{1 + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}\right)}{2} \]
        14. remove-double-neg73.0%

          \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
        15. associate-*r*73.0%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
        16. neg-mul-173.0%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 - -1 \cdot \varepsilon\right)}}{2} \]
      9. Simplified73.0%

        \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(\varepsilon + 1\right)}}}{2} \]
      10. Taylor expanded in eps around inf 74.2%

        \[\leadsto \frac{1 + e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
      11. Step-by-step derivation
        1. associate-*r*74.2%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
        2. mul-1-neg74.2%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      12. Simplified74.2%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}}{2} \]

      if 1.25000000000000006e-5 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. sub-neg100.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        2. neg-sub0100.0%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
        3. associate-+r-100.0%

          \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      3. Simplified100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in x around 0 31.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      5. Taylor expanded in x around inf 14.4%

        \[\leadsto \frac{\color{blue}{-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}}{2} \]
      6. Step-by-step derivation
        1. mul-1-neg14.4%

          \[\leadsto \frac{\color{blue}{-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
        2. distribute-rgt-neg-in14.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
        3. *-commutative14.4%

          \[\leadsto \frac{x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)}{2} \]
        4. distribute-rgt-neg-in14.4%

          \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}}{2} \]
        5. mul-1-neg14.4%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)}{2} \]
        6. distribute-lft-in14.4%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot 1 + -1 \cdot \frac{1}{\varepsilon}\right)}\right)}{2} \]
        7. metadata-eval14.4%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + -1 \cdot \frac{1}{\varepsilon}\right)\right)}{2} \]
        8. associate-*r/14.4%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1 \cdot 1}{\varepsilon}}\right)\right)}{2} \]
        9. metadata-eval14.4%

          \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)}{2} \]
      7. Simplified14.4%

        \[\leadsto \frac{\color{blue}{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}}{2} \]
      8. Step-by-step derivation
        1. distribute-lft-in14.4%

          \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot -1 + \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}}{2} \]
        2. flip-+16.8%

          \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right) \cdot \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}{\left(1 - \varepsilon\right) \cdot -1 - \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}}}}{2} \]
      9. Applied egg-rr16.8%

        \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
      10. Step-by-step derivation
        1. *-commutative16.8%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        2. neg-mul-116.8%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        3. *-commutative16.8%

          \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        4. neg-mul-116.8%

          \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        5. sqr-neg16.8%

          \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        6. *-commutative16.8%

          \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{-1 \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        7. neg-mul-116.8%

          \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
      11. Simplified16.8%

        \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
      12. Taylor expanded in eps around inf 65.9%

        \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \color{blue}{1}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification71.7%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.25 \cdot 10^{-5}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \]

    Alternative 4: 78.0% accurate, 2.1× speedup?

    \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 2.4 \cdot 10^{-7}:\\ \;\;\;\;\frac{e^{-x} \cdot 2}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \end{array} \]
    NOTE: eps should be positive before calling this function
    (FPCore (x eps)
     :precision binary64
     (if (<= x 2.4e-7)
       (/ (* (exp (- x)) 2.0) 2.0)
       (/
        (*
         x
         (/
          (+ -1.0 (* (- 1.0 eps) (- 1.0 eps)))
          (+ (+ eps -1.0) (/ (+ eps -1.0) eps))))
        2.0)))
    eps = abs(eps);
    double code(double x, double eps) {
    	double tmp;
    	if (x <= 2.4e-7) {
    		tmp = (exp(-x) * 2.0) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    NOTE: eps should be positive before calling this function
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        real(8) :: tmp
        if (x <= 2.4d-7) then
            tmp = (exp(-x) * 2.0d0) / 2.0d0
        else
            tmp = (x * (((-1.0d0) + ((1.0d0 - eps) * (1.0d0 - eps))) / ((eps + (-1.0d0)) + ((eps + (-1.0d0)) / eps)))) / 2.0d0
        end if
        code = tmp
    end function
    
    eps = Math.abs(eps);
    public static double code(double x, double eps) {
    	double tmp;
    	if (x <= 2.4e-7) {
    		tmp = (Math.exp(-x) * 2.0) / 2.0;
    	} else {
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	}
    	return tmp;
    }
    
    eps = abs(eps)
    def code(x, eps):
    	tmp = 0
    	if x <= 2.4e-7:
    		tmp = (math.exp(-x) * 2.0) / 2.0
    	else:
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0
    	return tmp
    
    eps = abs(eps)
    function code(x, eps)
    	tmp = 0.0
    	if (x <= 2.4e-7)
    		tmp = Float64(Float64(exp(Float64(-x)) * 2.0) / 2.0);
    	else
    		tmp = Float64(Float64(x * Float64(Float64(-1.0 + Float64(Float64(1.0 - eps) * Float64(1.0 - eps))) / Float64(Float64(eps + -1.0) + Float64(Float64(eps + -1.0) / eps)))) / 2.0);
    	end
    	return tmp
    end
    
    eps = abs(eps)
    function tmp_2 = code(x, eps)
    	tmp = 0.0;
    	if (x <= 2.4e-7)
    		tmp = (exp(-x) * 2.0) / 2.0;
    	else
    		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: eps should be positive before calling this function
    code[x_, eps_] := If[LessEqual[x, 2.4e-7], N[(N[(N[Exp[(-x)], $MachinePrecision] * 2.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(N[(-1.0 + N[(N[(1.0 - eps), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(eps + -1.0), $MachinePrecision] + N[(N[(eps + -1.0), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
    
    \begin{array}{l}
    eps = |eps|\\
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq 2.4 \cdot 10^{-7}:\\
    \;\;\;\;\frac{e^{-x} \cdot 2}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < 2.39999999999999979e-7

      1. Initial program 63.3%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. Simplified40.1%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(e^{\varepsilon + -1}\right)}^{x} - \left(\frac{1}{\varepsilon} + -1\right) \cdot {\left(e^{-x}\right)}^{\left(1 + \varepsilon\right)}}{2}} \]
        2. Taylor expanded in eps around inf 97.7%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        3. Taylor expanded in eps around 0 75.4%

          \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} - -1 \cdot e^{-1 \cdot x}}}{2} \]
        4. Step-by-step derivation
          1. cancel-sign-sub-inv75.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} + \left(--1\right) \cdot e^{-1 \cdot x}}}{2} \]
          2. metadata-eval75.4%

            \[\leadsto \frac{e^{-1 \cdot x} + \color{blue}{1} \cdot e^{-1 \cdot x}}{2} \]
          3. distribute-rgt1-in75.4%

            \[\leadsto \frac{\color{blue}{\left(1 + 1\right) \cdot e^{-1 \cdot x}}}{2} \]
          4. metadata-eval75.4%

            \[\leadsto \frac{\color{blue}{2} \cdot e^{-1 \cdot x}}{2} \]
          5. neg-mul-175.4%

            \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
        5. Simplified75.4%

          \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]

        if 2.39999999999999979e-7 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. sub-neg100.0%

            \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
          2. neg-sub0100.0%

            \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
          3. associate-+r-100.0%

            \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
        3. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        4. Taylor expanded in x around 0 31.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in x around inf 14.4%

          \[\leadsto \frac{\color{blue}{-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}}{2} \]
        6. Step-by-step derivation
          1. mul-1-neg14.4%

            \[\leadsto \frac{\color{blue}{-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
          2. distribute-rgt-neg-in14.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
          3. *-commutative14.4%

            \[\leadsto \frac{x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)}{2} \]
          4. distribute-rgt-neg-in14.4%

            \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}}{2} \]
          5. mul-1-neg14.4%

            \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)}{2} \]
          6. distribute-lft-in14.4%

            \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot 1 + -1 \cdot \frac{1}{\varepsilon}\right)}\right)}{2} \]
          7. metadata-eval14.4%

            \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + -1 \cdot \frac{1}{\varepsilon}\right)\right)}{2} \]
          8. associate-*r/14.4%

            \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1 \cdot 1}{\varepsilon}}\right)\right)}{2} \]
          9. metadata-eval14.4%

            \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)}{2} \]
        7. Simplified14.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}}{2} \]
        8. Step-by-step derivation
          1. distribute-lft-in14.4%

            \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot -1 + \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}}{2} \]
          2. flip-+16.8%

            \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right) \cdot \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}{\left(1 - \varepsilon\right) \cdot -1 - \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}}}}{2} \]
        9. Applied egg-rr16.8%

          \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
        10. Step-by-step derivation
          1. *-commutative16.8%

            \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          2. neg-mul-116.8%

            \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          3. *-commutative16.8%

            \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          4. neg-mul-116.8%

            \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          5. sqr-neg16.8%

            \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          6. *-commutative16.8%

            \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{-1 \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          7. neg-mul-116.8%

            \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        11. Simplified16.8%

          \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
        12. Taylor expanded in eps around inf 65.9%

          \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \color{blue}{1}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
      3. Recombined 2 regimes into one program.
      4. Final simplification72.5%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.4 \cdot 10^{-7}:\\ \;\;\;\;\frac{e^{-x} \cdot 2}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \]

      Alternative 5: 78.1% accurate, 2.1× speedup?

      \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 3.8 \cdot 10^{-7}:\\ \;\;\;\;\frac{e^{-x} + 1}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \end{array} \]
      NOTE: eps should be positive before calling this function
      (FPCore (x eps)
       :precision binary64
       (if (<= x 3.8e-7)
         (/ (+ (exp (- x)) 1.0) 2.0)
         (/
          (*
           x
           (/
            (+ -1.0 (* (- 1.0 eps) (- 1.0 eps)))
            (+ (+ eps -1.0) (/ (+ eps -1.0) eps))))
          2.0)))
      eps = abs(eps);
      double code(double x, double eps) {
      	double tmp;
      	if (x <= 3.8e-7) {
      		tmp = (exp(-x) + 1.0) / 2.0;
      	} else {
      		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
      	}
      	return tmp;
      }
      
      NOTE: eps should be positive before calling this function
      real(8) function code(x, eps)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps
          real(8) :: tmp
          if (x <= 3.8d-7) then
              tmp = (exp(-x) + 1.0d0) / 2.0d0
          else
              tmp = (x * (((-1.0d0) + ((1.0d0 - eps) * (1.0d0 - eps))) / ((eps + (-1.0d0)) + ((eps + (-1.0d0)) / eps)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps = Math.abs(eps);
      public static double code(double x, double eps) {
      	double tmp;
      	if (x <= 3.8e-7) {
      		tmp = (Math.exp(-x) + 1.0) / 2.0;
      	} else {
      		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps = abs(eps)
      def code(x, eps):
      	tmp = 0
      	if x <= 3.8e-7:
      		tmp = (math.exp(-x) + 1.0) / 2.0
      	else:
      		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0
      	return tmp
      
      eps = abs(eps)
      function code(x, eps)
      	tmp = 0.0
      	if (x <= 3.8e-7)
      		tmp = Float64(Float64(exp(Float64(-x)) + 1.0) / 2.0);
      	else
      		tmp = Float64(Float64(x * Float64(Float64(-1.0 + Float64(Float64(1.0 - eps) * Float64(1.0 - eps))) / Float64(Float64(eps + -1.0) + Float64(Float64(eps + -1.0) / eps)))) / 2.0);
      	end
      	return tmp
      end
      
      eps = abs(eps)
      function tmp_2 = code(x, eps)
      	tmp = 0.0;
      	if (x <= 3.8e-7)
      		tmp = (exp(-x) + 1.0) / 2.0;
      	else
      		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      NOTE: eps should be positive before calling this function
      code[x_, eps_] := If[LessEqual[x, 3.8e-7], N[(N[(N[Exp[(-x)], $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(N[(-1.0 + N[(N[(1.0 - eps), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(eps + -1.0), $MachinePrecision] + N[(N[(eps + -1.0), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
      
      \begin{array}{l}
      eps = |eps|\\
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 3.8 \cdot 10^{-7}:\\
      \;\;\;\;\frac{e^{-x} + 1}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 3.80000000000000015e-7

        1. Initial program 63.3%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. Simplified40.1%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot {\left(e^{\varepsilon + -1}\right)}^{x} - \left(\frac{1}{\varepsilon} + -1\right) \cdot {\left(e^{-x}\right)}^{\left(1 + \varepsilon\right)}}{2}} \]
          2. Taylor expanded in eps around inf 97.7%

            \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
          3. Taylor expanded in eps around inf 97.7%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(\varepsilon \cdot x\right)}}}{2} \]
          4. Step-by-step derivation
            1. *-commutative97.7%

              \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
          5. Simplified97.7%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
          6. Taylor expanded in eps around 0 75.4%

            \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
          7. Step-by-step derivation
            1. mul-1-neg75.4%

              \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
          8. Simplified75.4%

            \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

          if 3.80000000000000015e-7 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. sub-neg100.0%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            2. neg-sub0100.0%

              \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            3. associate-+r-100.0%

              \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
          3. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 31.0%

            \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          5. Taylor expanded in x around inf 14.4%

            \[\leadsto \frac{\color{blue}{-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}}{2} \]
          6. Step-by-step derivation
            1. mul-1-neg14.4%

              \[\leadsto \frac{\color{blue}{-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
            2. distribute-rgt-neg-in14.4%

              \[\leadsto \frac{\color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
            3. *-commutative14.4%

              \[\leadsto \frac{x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)}{2} \]
            4. distribute-rgt-neg-in14.4%

              \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}}{2} \]
            5. mul-1-neg14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)}{2} \]
            6. distribute-lft-in14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot 1 + -1 \cdot \frac{1}{\varepsilon}\right)}\right)}{2} \]
            7. metadata-eval14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + -1 \cdot \frac{1}{\varepsilon}\right)\right)}{2} \]
            8. associate-*r/14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1 \cdot 1}{\varepsilon}}\right)\right)}{2} \]
            9. metadata-eval14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)}{2} \]
          7. Simplified14.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}}{2} \]
          8. Step-by-step derivation
            1. distribute-lft-in14.4%

              \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot -1 + \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}}{2} \]
            2. flip-+16.8%

              \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right) \cdot \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}{\left(1 - \varepsilon\right) \cdot -1 - \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}}}}{2} \]
          9. Applied egg-rr16.8%

            \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
          10. Step-by-step derivation
            1. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            2. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            3. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            4. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            5. sqr-neg16.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            6. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{-1 \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            7. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          11. Simplified16.8%

            \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
          12. Taylor expanded in eps around inf 65.9%

            \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \color{blue}{1}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification72.5%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 3.8 \cdot 10^{-7}:\\ \;\;\;\;\frac{e^{-x} + 1}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \]

        Alternative 6: 71.3% accurate, 9.1× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 9.2 \cdot 10^{-7}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps)
         :precision binary64
         (if (<= x 9.2e-7)
           (/ (- 2.0 (* eps x)) 2.0)
           (/
            (*
             x
             (/
              (+ -1.0 (* (- 1.0 eps) (- 1.0 eps)))
              (+ (+ eps -1.0) (/ (+ eps -1.0) eps))))
            2.0)))
        eps = abs(eps);
        double code(double x, double eps) {
        	double tmp;
        	if (x <= 9.2e-7) {
        		tmp = (2.0 - (eps * x)) / 2.0;
        	} else {
        		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
        	}
        	return tmp;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            real(8) :: tmp
            if (x <= 9.2d-7) then
                tmp = (2.0d0 - (eps * x)) / 2.0d0
            else
                tmp = (x * (((-1.0d0) + ((1.0d0 - eps) * (1.0d0 - eps))) / ((eps + (-1.0d0)) + ((eps + (-1.0d0)) / eps)))) / 2.0d0
            end if
            code = tmp
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	double tmp;
        	if (x <= 9.2e-7) {
        		tmp = (2.0 - (eps * x)) / 2.0;
        	} else {
        		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
        	}
        	return tmp;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	tmp = 0
        	if x <= 9.2e-7:
        		tmp = (2.0 - (eps * x)) / 2.0
        	else:
        		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0
        	return tmp
        
        eps = abs(eps)
        function code(x, eps)
        	tmp = 0.0
        	if (x <= 9.2e-7)
        		tmp = Float64(Float64(2.0 - Float64(eps * x)) / 2.0);
        	else
        		tmp = Float64(Float64(x * Float64(Float64(-1.0 + Float64(Float64(1.0 - eps) * Float64(1.0 - eps))) / Float64(Float64(eps + -1.0) + Float64(Float64(eps + -1.0) / eps)))) / 2.0);
        	end
        	return tmp
        end
        
        eps = abs(eps)
        function tmp_2 = code(x, eps)
        	tmp = 0.0;
        	if (x <= 9.2e-7)
        		tmp = (2.0 - (eps * x)) / 2.0;
        	else
        		tmp = (x * ((-1.0 + ((1.0 - eps) * (1.0 - eps))) / ((eps + -1.0) + ((eps + -1.0) / eps)))) / 2.0;
        	end
        	tmp_2 = tmp;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := If[LessEqual[x, 9.2e-7], N[(N[(2.0 - N[(eps * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(N[(-1.0 + N[(N[(1.0 - eps), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(eps + -1.0), $MachinePrecision] + N[(N[(eps + -1.0), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        \begin{array}{l}
        \mathbf{if}\;x \leq 9.2 \cdot 10^{-7}:\\
        \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\
        
        \mathbf{else}:\\
        \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 9.1999999999999998e-7

          1. Initial program 63.3%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Simplified63.3%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          3. Taylor expanded in x around 0 57.0%

            \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - \left(1 + \varepsilon\right) \cdot \left(1 - \frac{1}{\varepsilon}\right)\right)}}{2} \]
          4. Taylor expanded in eps around 0 60.1%

            \[\leadsto \frac{2 + x \cdot \left(\color{blue}{\frac{-1}{\varepsilon}} - \left(1 + \varepsilon\right) \cdot \left(1 - \frac{1}{\varepsilon}\right)\right)}{2} \]
          5. Taylor expanded in eps around 0 60.1%

            \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{2} \]
          6. Step-by-step derivation
            1. mul-1-neg60.1%

              \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-\varepsilon\right)}}{2} \]
          7. Simplified60.1%

            \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-\varepsilon\right)}}{2} \]

          if 9.1999999999999998e-7 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. sub-neg100.0%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            2. neg-sub0100.0%

              \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            3. associate-+r-100.0%

              \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
          3. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 31.0%

            \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          5. Taylor expanded in x around inf 14.4%

            \[\leadsto \frac{\color{blue}{-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}}{2} \]
          6. Step-by-step derivation
            1. mul-1-neg14.4%

              \[\leadsto \frac{\color{blue}{-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
            2. distribute-rgt-neg-in14.4%

              \[\leadsto \frac{\color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
            3. *-commutative14.4%

              \[\leadsto \frac{x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)}{2} \]
            4. distribute-rgt-neg-in14.4%

              \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}}{2} \]
            5. mul-1-neg14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)}{2} \]
            6. distribute-lft-in14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(-1 \cdot 1 + -1 \cdot \frac{1}{\varepsilon}\right)}\right)}{2} \]
            7. metadata-eval14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + -1 \cdot \frac{1}{\varepsilon}\right)\right)}{2} \]
            8. associate-*r/14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1 \cdot 1}{\varepsilon}}\right)\right)}{2} \]
            9. metadata-eval14.4%

              \[\leadsto \frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)}{2} \]
          7. Simplified14.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}}{2} \]
          8. Step-by-step derivation
            1. distribute-lft-in14.4%

              \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot -1 + \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}}{2} \]
            2. flip-+16.8%

              \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right) \cdot \left(\left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}\right)}{\left(1 - \varepsilon\right) \cdot -1 - \left(1 - \varepsilon\right) \cdot \frac{-1}{\varepsilon}}}}{2} \]
          9. Applied egg-rr16.8%

            \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(\left(1 - \varepsilon\right) \cdot -1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
          10. Step-by-step derivation
            1. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            2. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} \cdot \left(\left(1 - \varepsilon\right) \cdot -1\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            3. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            4. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\left(-\left(1 - \varepsilon\right)\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            5. sqr-neg16.8%

              \[\leadsto \frac{x \cdot \frac{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(1 - \varepsilon\right) \cdot -1 - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            6. *-commutative16.8%

              \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{-1 \cdot \left(1 - \varepsilon\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
            7. neg-mul-116.8%

              \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\color{blue}{\left(-\left(1 - \varepsilon\right)\right)} - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
          11. Simplified16.8%

            \[\leadsto \frac{x \cdot \color{blue}{\frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \frac{1 - \varepsilon}{\varepsilon} \cdot \frac{1 - \varepsilon}{\varepsilon}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}}{2} \]
          12. Taylor expanded in eps around inf 65.9%

            \[\leadsto \frac{x \cdot \frac{\left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right) - \color{blue}{1}}{\left(-\left(1 - \varepsilon\right)\right) - \frac{1 - \varepsilon}{\varepsilon}}}{2} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification61.9%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 9.2 \cdot 10^{-7}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \frac{-1 + \left(1 - \varepsilon\right) \cdot \left(1 - \varepsilon\right)}{\left(\varepsilon + -1\right) + \frac{\varepsilon + -1}{\varepsilon}}}{2}\\ \end{array} \]

        Alternative 7: 63.1% accurate, 25.0× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 0.0085:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps)
         :precision binary64
         (if (<= x 0.0085) (/ (- 2.0 (* eps x)) 2.0) 0.0))
        eps = abs(eps);
        double code(double x, double eps) {
        	double tmp;
        	if (x <= 0.0085) {
        		tmp = (2.0 - (eps * x)) / 2.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            real(8) :: tmp
            if (x <= 0.0085d0) then
                tmp = (2.0d0 - (eps * x)) / 2.0d0
            else
                tmp = 0.0d0
            end if
            code = tmp
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	double tmp;
        	if (x <= 0.0085) {
        		tmp = (2.0 - (eps * x)) / 2.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	tmp = 0
        	if x <= 0.0085:
        		tmp = (2.0 - (eps * x)) / 2.0
        	else:
        		tmp = 0.0
        	return tmp
        
        eps = abs(eps)
        function code(x, eps)
        	tmp = 0.0
        	if (x <= 0.0085)
        		tmp = Float64(Float64(2.0 - Float64(eps * x)) / 2.0);
        	else
        		tmp = 0.0;
        	end
        	return tmp
        end
        
        eps = abs(eps)
        function tmp_2 = code(x, eps)
        	tmp = 0.0;
        	if (x <= 0.0085)
        		tmp = (2.0 - (eps * x)) / 2.0;
        	else
        		tmp = 0.0;
        	end
        	tmp_2 = tmp;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := If[LessEqual[x, 0.0085], N[(N[(2.0 - N[(eps * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        \begin{array}{l}
        \mathbf{if}\;x \leq 0.0085:\\
        \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\
        
        \mathbf{else}:\\
        \;\;\;\;0\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 0.0085000000000000006

          1. Initial program 63.3%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Simplified63.3%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          3. Taylor expanded in x around 0 57.0%

            \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) - \left(1 + \varepsilon\right) \cdot \left(1 - \frac{1}{\varepsilon}\right)\right)}}{2} \]
          4. Taylor expanded in eps around 0 60.1%

            \[\leadsto \frac{2 + x \cdot \left(\color{blue}{\frac{-1}{\varepsilon}} - \left(1 + \varepsilon\right) \cdot \left(1 - \frac{1}{\varepsilon}\right)\right)}{2} \]
          5. Taylor expanded in eps around 0 60.1%

            \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)}}{2} \]
          6. Step-by-step derivation
            1. mul-1-neg60.1%

              \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-\varepsilon\right)}}{2} \]
          7. Simplified60.1%

            \[\leadsto \frac{2 + x \cdot \color{blue}{\left(-\varepsilon\right)}}{2} \]

          if 0.0085000000000000006 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          3. Taylor expanded in eps around 0 50.8%

            \[\leadsto \frac{\color{blue}{\frac{e^{-x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          4. Step-by-step derivation
            1. neg-mul-150.8%

              \[\leadsto \frac{\frac{e^{\color{blue}{-1 \cdot x}} - \frac{1}{e^{x}}}{\varepsilon}}{2} \]
            2. rec-exp50.8%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. neg-mul-150.8%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. div-sub50.8%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{-1 \cdot x}}{\varepsilon}}}{2} \]
            5. +-inverses50.8%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          5. Simplified50.8%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification57.3%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 0.0085:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

        Alternative 8: 63.6% accurate, 28.2× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq -0.6:\\ \;\;\;\;\frac{\varepsilon \cdot \left(-x\right)}{2}\\ \mathbf{elif}\;x \leq 530:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps)
         :precision binary64
         (if (<= x -0.6) (/ (* eps (- x)) 2.0) (if (<= x 530.0) 1.0 0.0)))
        eps = abs(eps);
        double code(double x, double eps) {
        	double tmp;
        	if (x <= -0.6) {
        		tmp = (eps * -x) / 2.0;
        	} else if (x <= 530.0) {
        		tmp = 1.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            real(8) :: tmp
            if (x <= (-0.6d0)) then
                tmp = (eps * -x) / 2.0d0
            else if (x <= 530.0d0) then
                tmp = 1.0d0
            else
                tmp = 0.0d0
            end if
            code = tmp
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	double tmp;
        	if (x <= -0.6) {
        		tmp = (eps * -x) / 2.0;
        	} else if (x <= 530.0) {
        		tmp = 1.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	tmp = 0
        	if x <= -0.6:
        		tmp = (eps * -x) / 2.0
        	elif x <= 530.0:
        		tmp = 1.0
        	else:
        		tmp = 0.0
        	return tmp
        
        eps = abs(eps)
        function code(x, eps)
        	tmp = 0.0
        	if (x <= -0.6)
        		tmp = Float64(Float64(eps * Float64(-x)) / 2.0);
        	elseif (x <= 530.0)
        		tmp = 1.0;
        	else
        		tmp = 0.0;
        	end
        	return tmp
        end
        
        eps = abs(eps)
        function tmp_2 = code(x, eps)
        	tmp = 0.0;
        	if (x <= -0.6)
        		tmp = (eps * -x) / 2.0;
        	elseif (x <= 530.0)
        		tmp = 1.0;
        	else
        		tmp = 0.0;
        	end
        	tmp_2 = tmp;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := If[LessEqual[x, -0.6], N[(N[(eps * (-x)), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 530.0], 1.0, 0.0]]
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        \begin{array}{l}
        \mathbf{if}\;x \leq -0.6:\\
        \;\;\;\;\frac{\varepsilon \cdot \left(-x\right)}{2}\\
        
        \mathbf{elif}\;x \leq 530:\\
        \;\;\;\;1\\
        
        \mathbf{else}:\\
        \;\;\;\;0\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 3 regimes
        2. if x < -0.599999999999999978

          1. Initial program 97.4%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. sub-neg97.4%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            2. neg-sub097.4%

              \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            3. associate-+r-97.4%

              \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
          3. Simplified97.4%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 56.7%

            \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
          5. Taylor expanded in eps around inf 19.9%

            \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
          6. Step-by-step derivation
            1. mul-1-neg19.9%

              \[\leadsto \frac{\color{blue}{-\varepsilon \cdot x}}{2} \]
            2. *-commutative19.9%

              \[\leadsto \frac{-\color{blue}{x \cdot \varepsilon}}{2} \]
            3. distribute-rgt-neg-in19.9%

              \[\leadsto \frac{\color{blue}{x \cdot \left(-\varepsilon\right)}}{2} \]
          7. Simplified19.9%

            \[\leadsto \frac{\color{blue}{x \cdot \left(-\varepsilon\right)}}{2} \]

          if -0.599999999999999978 < x < 530

          1. Initial program 54.4%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. sub-neg54.4%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            2. neg-sub054.4%

              \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            3. associate-+r-54.4%

              \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
          3. Simplified54.4%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 71.1%

            \[\leadsto \frac{\color{blue}{2}}{2} \]

          if 530 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          3. Taylor expanded in eps around 0 51.4%

            \[\leadsto \frac{\color{blue}{\frac{e^{-x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          4. Step-by-step derivation
            1. neg-mul-151.4%

              \[\leadsto \frac{\frac{e^{\color{blue}{-1 \cdot x}} - \frac{1}{e^{x}}}{\varepsilon}}{2} \]
            2. rec-exp51.4%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. neg-mul-151.4%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. div-sub51.4%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{-1 \cdot x}}{\varepsilon}}}{2} \]
            5. +-inverses51.4%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          5. Simplified51.4%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        3. Recombined 3 regimes into one program.
        4. Final simplification57.6%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.6:\\ \;\;\;\;\frac{\varepsilon \cdot \left(-x\right)}{2}\\ \mathbf{elif}\;x \leq 530:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

        Alternative 9: 56.4% accurate, 74.1× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps) :precision binary64 (if (<= x 520.0) 1.0 0.0))
        eps = abs(eps);
        double code(double x, double eps) {
        	double tmp;
        	if (x <= 520.0) {
        		tmp = 1.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            real(8) :: tmp
            if (x <= 520.0d0) then
                tmp = 1.0d0
            else
                tmp = 0.0d0
            end if
            code = tmp
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	double tmp;
        	if (x <= 520.0) {
        		tmp = 1.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	tmp = 0
        	if x <= 520.0:
        		tmp = 1.0
        	else:
        		tmp = 0.0
        	return tmp
        
        eps = abs(eps)
        function code(x, eps)
        	tmp = 0.0
        	if (x <= 520.0)
        		tmp = 1.0;
        	else
        		tmp = 0.0;
        	end
        	return tmp
        end
        
        eps = abs(eps)
        function tmp_2 = code(x, eps)
        	tmp = 0.0;
        	if (x <= 520.0)
        		tmp = 1.0;
        	else
        		tmp = 0.0;
        	end
        	tmp_2 = tmp;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := If[LessEqual[x, 520.0], 1.0, 0.0]
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        \begin{array}{l}
        \mathbf{if}\;x \leq 520:\\
        \;\;\;\;1\\
        
        \mathbf{else}:\\
        \;\;\;\;0\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if x < 520

          1. Initial program 63.5%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. sub-neg63.5%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \left(-\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            2. neg-sub063.5%

              \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + \color{blue}{\left(0 - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
            3. associate-+r-63.5%

              \[\leadsto \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
          3. Simplified63.5%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 56.7%

            \[\leadsto \frac{\color{blue}{2}}{2} \]

          if 520 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          3. Taylor expanded in eps around 0 51.4%

            \[\leadsto \frac{\color{blue}{\frac{e^{-x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          4. Step-by-step derivation
            1. neg-mul-151.4%

              \[\leadsto \frac{\frac{e^{\color{blue}{-1 \cdot x}} - \frac{1}{e^{x}}}{\varepsilon}}{2} \]
            2. rec-exp51.4%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. neg-mul-151.4%

              \[\leadsto \frac{\frac{e^{-1 \cdot x} - e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. div-sub51.4%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{-1 \cdot x}}{\varepsilon}}}{2} \]
            5. +-inverses51.4%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          5. Simplified51.4%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification55.1%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

        Alternative 10: 15.8% accurate, 227.0× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ 0 \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps) :precision binary64 0.0)
        eps = abs(eps);
        double code(double x, double eps) {
        	return 0.0;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = 0.0d0
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	return 0.0;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	return 0.0
        
        eps = abs(eps)
        function code(x, eps)
        	return 0.0
        end
        
        eps = abs(eps)
        function tmp = code(x, eps)
        	tmp = 0.0;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := 0.0
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        0
        \end{array}
        
        Derivation
        1. Initial program 74.5%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified74.5%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{\varepsilon \cdot x - x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Taylor expanded in eps around 0 17.0%

          \[\leadsto \frac{\color{blue}{\frac{e^{-x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        4. Step-by-step derivation
          1. neg-mul-117.0%

            \[\leadsto \frac{\frac{e^{\color{blue}{-1 \cdot x}} - \frac{1}{e^{x}}}{\varepsilon}}{2} \]
          2. rec-exp17.0%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]
          3. neg-mul-117.0%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} - e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
          4. div-sub17.0%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{-1 \cdot x}}{\varepsilon}}}{2} \]
          5. +-inverses17.2%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        5. Simplified17.2%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Final simplification17.2%

          \[\leadsto 0 \]

        Reproduce

        ?
        herbie shell --seed 2023271 
        (FPCore (x eps)
          :name "NMSE Section 6.1 mentioned, A"
          :precision binary64
          (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))