NMSE Section 6.1 mentioned, A

Percentage Accurate: 73.7% → 98.8%
Time: 11.6s
Alternatives: 11
Speedup: 1.1×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 73.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 98.8% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/ (+ (exp (* x (+ eps -1.0))) (exp (* x (- -1.0 eps)))) 2.0))
double code(double x, double eps) {
	return (exp((x * (eps + -1.0))) + exp((x * (-1.0 - eps)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (exp((x * (eps + (-1.0d0)))) + exp((x * ((-1.0d0) - eps)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (Math.exp((x * (eps + -1.0))) + Math.exp((x * (-1.0 - eps)))) / 2.0;
}
def code(x, eps):
	return (math.exp((x * (eps + -1.0))) + math.exp((x * (-1.0 - eps)))) / 2.0
function code(x, eps)
	return Float64(Float64(exp(Float64(x * Float64(eps + -1.0))) + exp(Float64(x * Float64(-1.0 - eps)))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (exp((x * (eps + -1.0))) + exp((x * (-1.0 - eps)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}
\end{array}
Derivation
  1. Initial program 72.0%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Step-by-step derivation
    1. div-sub72.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    2. +-rgt-identity72.0%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    3. div-sub72.0%

      \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
  3. Simplified72.0%

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
  4. Taylor expanded in eps around inf 99.7%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
  5. Step-by-step derivation
    1. mul-1-neg99.7%

      \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
    2. *-commutative99.7%

      \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
    3. mul-1-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
    4. exp-prod99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
    5. +-commutative99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
    6. remove-double-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
    7. mul-1-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
    8. sub-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
    9. exp-prod99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
    10. mul-1-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
    11. *-commutative99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
    12. sub-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
    13. mul-1-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
    14. remove-double-neg99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
    15. distribute-rgt-neg-in99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
    16. +-commutative99.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
  6. Simplified99.7%

    \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
  7. Final simplification99.7%

    \[\leadsto \frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]

Alternative 2: 88.6% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{x \cdot \left(-\varepsilon\right)}\\ \mathbf{if}\;x \leq 7.2 \cdot 10^{+18}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + t_0}{2}\\ \mathbf{elif}\;x \leq 1.22 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + t_0}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (* x (- eps)))))
   (if (<= x 7.2e+18)
     (/ (+ (exp (* x eps)) t_0) 2.0)
     (if (<= x 1.22e+84) 0.0 (/ (+ (exp (- (* x eps) x)) t_0) 2.0)))))
double code(double x, double eps) {
	double t_0 = exp((x * -eps));
	double tmp;
	if (x <= 7.2e+18) {
		tmp = (exp((x * eps)) + t_0) / 2.0;
	} else if (x <= 1.22e+84) {
		tmp = 0.0;
	} else {
		tmp = (exp(((x * eps) - x)) + t_0) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp((x * -eps))
    if (x <= 7.2d+18) then
        tmp = (exp((x * eps)) + t_0) / 2.0d0
    else if (x <= 1.22d+84) then
        tmp = 0.0d0
    else
        tmp = (exp(((x * eps) - x)) + t_0) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = Math.exp((x * -eps));
	double tmp;
	if (x <= 7.2e+18) {
		tmp = (Math.exp((x * eps)) + t_0) / 2.0;
	} else if (x <= 1.22e+84) {
		tmp = 0.0;
	} else {
		tmp = (Math.exp(((x * eps) - x)) + t_0) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	t_0 = math.exp((x * -eps))
	tmp = 0
	if x <= 7.2e+18:
		tmp = (math.exp((x * eps)) + t_0) / 2.0
	elif x <= 1.22e+84:
		tmp = 0.0
	else:
		tmp = (math.exp(((x * eps) - x)) + t_0) / 2.0
	return tmp
function code(x, eps)
	t_0 = exp(Float64(x * Float64(-eps)))
	tmp = 0.0
	if (x <= 7.2e+18)
		tmp = Float64(Float64(exp(Float64(x * eps)) + t_0) / 2.0);
	elseif (x <= 1.22e+84)
		tmp = 0.0;
	else
		tmp = Float64(Float64(exp(Float64(Float64(x * eps) - x)) + t_0) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = exp((x * -eps));
	tmp = 0.0;
	if (x <= 7.2e+18)
		tmp = (exp((x * eps)) + t_0) / 2.0;
	elseif (x <= 1.22e+84)
		tmp = 0.0;
	else
		tmp = (exp(((x * eps) - x)) + t_0) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, 7.2e+18], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.22e+84], 0.0, N[(N[(N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision] + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{x \cdot \left(-\varepsilon\right)}\\
\mathbf{if}\;x \leq 7.2 \cdot 10^{+18}:\\
\;\;\;\;\frac{e^{x \cdot \varepsilon} + t_0}{2}\\

\mathbf{elif}\;x \leq 1.22 \cdot 10^{+84}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \varepsilon - x} + t_0}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 7.2e18

    1. Initial program 62.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub62.1%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity62.1%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub62.1%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified62.1%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 99.6%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg99.6%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative99.6%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. remove-double-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
      7. mul-1-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
      8. sub-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. sub-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
      13. mul-1-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
      14. remove-double-neg99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. distribute-rgt-neg-in99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
      16. +-commutative99.6%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
    6. Simplified99.6%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
    7. Taylor expanded in eps around inf 99.2%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
    8. Step-by-step derivation
      1. associate-*r*99.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      2. neg-mul-199.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
    9. Simplified99.2%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
    10. Taylor expanded in x around inf 99.2%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
    11. Taylor expanded in eps around inf 99.5%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
    12. Step-by-step derivation
      1. *-commutative99.5%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
    13. Simplified99.5%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]

    if 7.2e18 < x < 1.2200000000000001e84

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. Simplified100.0%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
      2. Taylor expanded in eps around 0 80.3%

        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
      3. Step-by-step derivation
        1. div-sub80.3%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        2. rec-exp80.3%

          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
        3. mul-1-neg80.3%

          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
        4. +-inverses80.3%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      4. Simplified80.3%

        \[\leadsto \frac{\color{blue}{0}}{2} \]

      if 1.2200000000000001e84 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. div-sub100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        2. +-rgt-identity100.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        3. div-sub100.0%

          \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      3. Simplified100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in eps around inf 100.0%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
      5. Step-by-step derivation
        1. mul-1-neg100.0%

          \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        2. *-commutative100.0%

          \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        3. mul-1-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
        4. exp-prod100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
        5. +-commutative100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        6. remove-double-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
        7. mul-1-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
        8. sub-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        9. exp-prod100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
        10. mul-1-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
        11. *-commutative100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
        12. sub-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        13. mul-1-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
        14. remove-double-neg100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
        15. distribute-rgt-neg-in100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
        16. +-commutative100.0%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
      6. Simplified100.0%

        \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
      7. Taylor expanded in eps around inf 64.7%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
      8. Step-by-step derivation
        1. associate-*r*64.7%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
        2. neg-mul-164.7%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
      9. Simplified64.7%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
      10. Taylor expanded in x around inf 64.7%

        \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
    3. Recombined 3 regimes into one program.
    4. Final simplification91.6%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 7.2 \cdot 10^{+18}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 1.22 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \end{array} \]

    Alternative 3: 82.2% accurate, 1.1× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 7.2 \cdot 10^{+17}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 3 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+213}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (if (<= x 7.2e+17)
       (/ (+ (exp (* x eps)) (exp (* x (- eps)))) 2.0)
       (if (<= x 3e+84)
         0.0
         (if (<= x 3.3e+213) (/ (+ 1.0 (exp (- (* x eps) x))) 2.0) 0.0))))
    double code(double x, double eps) {
    	double tmp;
    	if (x <= 7.2e+17) {
    		tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0;
    	} else if (x <= 3e+84) {
    		tmp = 0.0;
    	} else if (x <= 3.3e+213) {
    		tmp = (1.0 + exp(((x * eps) - x))) / 2.0;
    	} else {
    		tmp = 0.0;
    	}
    	return tmp;
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        real(8) :: tmp
        if (x <= 7.2d+17) then
            tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0d0
        else if (x <= 3d+84) then
            tmp = 0.0d0
        else if (x <= 3.3d+213) then
            tmp = (1.0d0 + exp(((x * eps) - x))) / 2.0d0
        else
            tmp = 0.0d0
        end if
        code = tmp
    end function
    
    public static double code(double x, double eps) {
    	double tmp;
    	if (x <= 7.2e+17) {
    		tmp = (Math.exp((x * eps)) + Math.exp((x * -eps))) / 2.0;
    	} else if (x <= 3e+84) {
    		tmp = 0.0;
    	} else if (x <= 3.3e+213) {
    		tmp = (1.0 + Math.exp(((x * eps) - x))) / 2.0;
    	} else {
    		tmp = 0.0;
    	}
    	return tmp;
    }
    
    def code(x, eps):
    	tmp = 0
    	if x <= 7.2e+17:
    		tmp = (math.exp((x * eps)) + math.exp((x * -eps))) / 2.0
    	elif x <= 3e+84:
    		tmp = 0.0
    	elif x <= 3.3e+213:
    		tmp = (1.0 + math.exp(((x * eps) - x))) / 2.0
    	else:
    		tmp = 0.0
    	return tmp
    
    function code(x, eps)
    	tmp = 0.0
    	if (x <= 7.2e+17)
    		tmp = Float64(Float64(exp(Float64(x * eps)) + exp(Float64(x * Float64(-eps)))) / 2.0);
    	elseif (x <= 3e+84)
    		tmp = 0.0;
    	elseif (x <= 3.3e+213)
    		tmp = Float64(Float64(1.0 + exp(Float64(Float64(x * eps) - x))) / 2.0);
    	else
    		tmp = 0.0;
    	end
    	return tmp
    end
    
    function tmp_2 = code(x, eps)
    	tmp = 0.0;
    	if (x <= 7.2e+17)
    		tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0;
    	elseif (x <= 3e+84)
    		tmp = 0.0;
    	elseif (x <= 3.3e+213)
    		tmp = (1.0 + exp(((x * eps) - x))) / 2.0;
    	else
    		tmp = 0.0;
    	end
    	tmp_2 = tmp;
    end
    
    code[x_, eps_] := If[LessEqual[x, 7.2e+17], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 3e+84], 0.0, If[LessEqual[x, 3.3e+213], N[(N[(1.0 + N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq 7.2 \cdot 10^{+17}:\\
    \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\
    
    \mathbf{elif}\;x \leq 3 \cdot 10^{+84}:\\
    \;\;\;\;0\\
    
    \mathbf{elif}\;x \leq 3.3 \cdot 10^{+213}:\\
    \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;0\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if x < 7.2e17

      1. Initial program 62.1%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. div-sub62.1%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        2. +-rgt-identity62.1%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        3. div-sub62.1%

          \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      3. Simplified62.1%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in eps around inf 99.6%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
      5. Step-by-step derivation
        1. mul-1-neg99.6%

          \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        2. *-commutative99.6%

          \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        3. mul-1-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
        4. exp-prod99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
        5. +-commutative99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        6. remove-double-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
        7. mul-1-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
        8. sub-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        9. exp-prod99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
        10. mul-1-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
        11. *-commutative99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
        12. sub-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        13. mul-1-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
        14. remove-double-neg99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
        15. distribute-rgt-neg-in99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
        16. +-commutative99.6%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
      6. Simplified99.6%

        \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
      7. Taylor expanded in eps around inf 99.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
      8. Step-by-step derivation
        1. associate-*r*99.2%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
        2. neg-mul-199.2%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
      9. Simplified99.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
      10. Taylor expanded in x around inf 99.2%

        \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
      11. Taylor expanded in eps around inf 99.5%

        \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
      12. Step-by-step derivation
        1. *-commutative99.5%

          \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
      13. Simplified99.5%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]

      if 7.2e17 < x < 2.99999999999999996e84 or 3.3000000000000001e213 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        2. Taylor expanded in eps around 0 70.7%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        3. Step-by-step derivation
          1. div-sub70.7%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          2. rec-exp70.7%

            \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
          3. mul-1-neg70.7%

            \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
          4. +-inverses70.7%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        4. Simplified70.7%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 2.99999999999999996e84 < x < 3.3000000000000001e213

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. div-sub100.0%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          2. +-rgt-identity100.0%

            \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          3. div-sub100.0%

            \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        3. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        4. Taylor expanded in x around 0 28.6%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
        5. Taylor expanded in eps around inf 29.0%

          \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} + 1}}{2} \]
        6. Step-by-step derivation
          1. exp-prod29.0%

            \[\leadsto \frac{\color{blue}{{\left(e^{-1}\right)}^{\left(\left(1 - \varepsilon\right) \cdot x\right)}} + 1}{2} \]
          2. sub-neg29.0%

            \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)} + 1}{2} \]
          3. neg-mul-129.0%

            \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)} + 1}{2} \]
          4. exp-prod29.0%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}} + 1}{2} \]
          5. +-commutative29.0%

            \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}}}{2} \]
          6. neg-mul-129.0%

            \[\leadsto \frac{1 + e^{\color{blue}{-\left(1 + -1 \cdot \varepsilon\right) \cdot x}}}{2} \]
          7. *-commutative29.0%

            \[\leadsto \frac{1 + e^{-\color{blue}{x \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
          8. neg-mul-129.0%

            \[\leadsto \frac{1 + e^{-x \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
          9. sub-neg29.0%

            \[\leadsto \frac{1 + e^{-x \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
          10. distribute-rgt-neg-in29.0%

            \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
        7. Simplified29.0%

          \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
        8. Taylor expanded in x around 0 29.0%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon - 1\right) \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. sub-neg29.0%

            \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon + \left(-1\right)\right)} \cdot x}}{2} \]
          2. metadata-eval29.0%

            \[\leadsto \frac{1 + e^{\left(\varepsilon + \color{blue}{-1}\right) \cdot x}}{2} \]
          3. *-commutative29.0%

            \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(\varepsilon + -1\right)}}}{2} \]
          4. distribute-rgt-in29.0%

            \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x + -1 \cdot x}}}{2} \]
          5. neg-mul-129.0%

            \[\leadsto \frac{1 + e^{\varepsilon \cdot x + \color{blue}{\left(-x\right)}}}{2} \]
          6. sub-neg29.0%

            \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
        10. Simplified29.0%

          \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification87.1%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 7.2 \cdot 10^{+17}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 3 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 3.3 \cdot 10^{+213}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

      Alternative 4: 66.7% accurate, 1.9× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -400:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 6.8 \cdot 10^{+18} \lor \neg \left(x \leq 3.8 \cdot 10^{+84}\right) \land x \leq 1.4 \cdot 10^{+217}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      (FPCore (x eps)
       :precision binary64
       (if (<= x -400.0)
         (/ (/ (expm1 (- x)) eps) 2.0)
         (if (or (<= x 6.8e+18) (and (not (<= x 3.8e+84)) (<= x 1.4e+217)))
           (/ (+ 1.0 (exp (- (* x eps) x))) 2.0)
           0.0)))
      double code(double x, double eps) {
      	double tmp;
      	if (x <= -400.0) {
      		tmp = (expm1(-x) / eps) / 2.0;
      	} else if ((x <= 6.8e+18) || (!(x <= 3.8e+84) && (x <= 1.4e+217))) {
      		tmp = (1.0 + exp(((x * eps) - x))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      public static double code(double x, double eps) {
      	double tmp;
      	if (x <= -400.0) {
      		tmp = (Math.expm1(-x) / eps) / 2.0;
      	} else if ((x <= 6.8e+18) || (!(x <= 3.8e+84) && (x <= 1.4e+217))) {
      		tmp = (1.0 + Math.exp(((x * eps) - x))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      def code(x, eps):
      	tmp = 0
      	if x <= -400.0:
      		tmp = (math.expm1(-x) / eps) / 2.0
      	elif (x <= 6.8e+18) or (not (x <= 3.8e+84) and (x <= 1.4e+217)):
      		tmp = (1.0 + math.exp(((x * eps) - x))) / 2.0
      	else:
      		tmp = 0.0
      	return tmp
      
      function code(x, eps)
      	tmp = 0.0
      	if (x <= -400.0)
      		tmp = Float64(Float64(expm1(Float64(-x)) / eps) / 2.0);
      	elseif ((x <= 6.8e+18) || (!(x <= 3.8e+84) && (x <= 1.4e+217)))
      		tmp = Float64(Float64(1.0 + exp(Float64(Float64(x * eps) - x))) / 2.0);
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      code[x_, eps_] := If[LessEqual[x, -400.0], N[(N[(N[(Exp[(-x)] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 6.8e+18], And[N[Not[LessEqual[x, 3.8e+84]], $MachinePrecision], LessEqual[x, 1.4e+217]]], N[(N[(1.0 + N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq -400:\\
      \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\
      
      \mathbf{elif}\;x \leq 6.8 \cdot 10^{+18} \lor \neg \left(x \leq 3.8 \cdot 10^{+84}\right) \land x \leq 1.4 \cdot 10^{+217}:\\
      \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < -400

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. div-sub100.0%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          2. +-rgt-identity100.0%

            \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          3. div-sub100.0%

            \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        3. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        4. Taylor expanded in x around 0 66.1%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
        5. Taylor expanded in eps around 0 35.0%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
        6. Step-by-step derivation
          1. expm1-def35.0%

            \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
          2. neg-mul-135.0%

            \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
        7. Simplified35.0%

          \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]

        if -400 < x < 6.8e18 or 3.8000000000000001e84 < x < 1.39999999999999997e217

        1. Initial program 59.9%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. div-sub59.9%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          2. +-rgt-identity59.9%

            \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          3. div-sub59.9%

            \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        3. Simplified59.9%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        4. Taylor expanded in x around 0 36.9%

          \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
        5. Taylor expanded in eps around inf 77.1%

          \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} + 1}}{2} \]
        6. Step-by-step derivation
          1. exp-prod77.1%

            \[\leadsto \frac{\color{blue}{{\left(e^{-1}\right)}^{\left(\left(1 - \varepsilon\right) \cdot x\right)}} + 1}{2} \]
          2. sub-neg77.1%

            \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)} + 1}{2} \]
          3. neg-mul-177.1%

            \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)} + 1}{2} \]
          4. exp-prod77.1%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}} + 1}{2} \]
          5. +-commutative77.1%

            \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}}}{2} \]
          6. neg-mul-177.1%

            \[\leadsto \frac{1 + e^{\color{blue}{-\left(1 + -1 \cdot \varepsilon\right) \cdot x}}}{2} \]
          7. *-commutative77.1%

            \[\leadsto \frac{1 + e^{-\color{blue}{x \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
          8. neg-mul-177.1%

            \[\leadsto \frac{1 + e^{-x \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
          9. sub-neg77.1%

            \[\leadsto \frac{1 + e^{-x \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
          10. distribute-rgt-neg-in77.1%

            \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
        7. Simplified77.1%

          \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
        8. Taylor expanded in x around 0 77.1%

          \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon - 1\right) \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. sub-neg77.1%

            \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon + \left(-1\right)\right)} \cdot x}}{2} \]
          2. metadata-eval77.1%

            \[\leadsto \frac{1 + e^{\left(\varepsilon + \color{blue}{-1}\right) \cdot x}}{2} \]
          3. *-commutative77.1%

            \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(\varepsilon + -1\right)}}}{2} \]
          4. distribute-rgt-in77.1%

            \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x + -1 \cdot x}}}{2} \]
          5. neg-mul-177.1%

            \[\leadsto \frac{1 + e^{\varepsilon \cdot x + \color{blue}{\left(-x\right)}}}{2} \]
          6. sub-neg77.1%

            \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
        10. Simplified77.1%

          \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]

        if 6.8e18 < x < 3.8000000000000001e84 or 1.39999999999999997e217 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          2. Taylor expanded in eps around 0 70.7%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          3. Step-by-step derivation
            1. div-sub70.7%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
            2. rec-exp70.7%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. mul-1-neg70.7%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. +-inverses70.7%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          4. Simplified70.7%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        3. Recombined 3 regimes into one program.
        4. Final simplification69.6%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -400:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 6.8 \cdot 10^{+18} \lor \neg \left(x \leq 3.8 \cdot 10^{+84}\right) \land x \leq 1.4 \cdot 10^{+217}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

        Alternative 5: 67.4% accurate, 1.9× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -4 \cdot 10^{-307}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 2.5 \cdot 10^{+15} \lor \neg \left(x \leq 3.65 \cdot 10^{+84}\right) \land x \leq 2.8 \cdot 10^{+215}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
        (FPCore (x eps)
         :precision binary64
         (if (<= x -4e-307)
           (/ (+ 1.0 (exp (* x (- eps)))) 2.0)
           (if (or (<= x 2.5e+15) (and (not (<= x 3.65e+84)) (<= x 2.8e+215)))
             (/ (+ 1.0 (exp (- (* x eps) x))) 2.0)
             0.0)))
        double code(double x, double eps) {
        	double tmp;
        	if (x <= -4e-307) {
        		tmp = (1.0 + exp((x * -eps))) / 2.0;
        	} else if ((x <= 2.5e+15) || (!(x <= 3.65e+84) && (x <= 2.8e+215))) {
        		tmp = (1.0 + exp(((x * eps) - x))) / 2.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            real(8) :: tmp
            if (x <= (-4d-307)) then
                tmp = (1.0d0 + exp((x * -eps))) / 2.0d0
            else if ((x <= 2.5d+15) .or. (.not. (x <= 3.65d+84)) .and. (x <= 2.8d+215)) then
                tmp = (1.0d0 + exp(((x * eps) - x))) / 2.0d0
            else
                tmp = 0.0d0
            end if
            code = tmp
        end function
        
        public static double code(double x, double eps) {
        	double tmp;
        	if (x <= -4e-307) {
        		tmp = (1.0 + Math.exp((x * -eps))) / 2.0;
        	} else if ((x <= 2.5e+15) || (!(x <= 3.65e+84) && (x <= 2.8e+215))) {
        		tmp = (1.0 + Math.exp(((x * eps) - x))) / 2.0;
        	} else {
        		tmp = 0.0;
        	}
        	return tmp;
        }
        
        def code(x, eps):
        	tmp = 0
        	if x <= -4e-307:
        		tmp = (1.0 + math.exp((x * -eps))) / 2.0
        	elif (x <= 2.5e+15) or (not (x <= 3.65e+84) and (x <= 2.8e+215)):
        		tmp = (1.0 + math.exp(((x * eps) - x))) / 2.0
        	else:
        		tmp = 0.0
        	return tmp
        
        function code(x, eps)
        	tmp = 0.0
        	if (x <= -4e-307)
        		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-eps)))) / 2.0);
        	elseif ((x <= 2.5e+15) || (!(x <= 3.65e+84) && (x <= 2.8e+215)))
        		tmp = Float64(Float64(1.0 + exp(Float64(Float64(x * eps) - x))) / 2.0);
        	else
        		tmp = 0.0;
        	end
        	return tmp
        end
        
        function tmp_2 = code(x, eps)
        	tmp = 0.0;
        	if (x <= -4e-307)
        		tmp = (1.0 + exp((x * -eps))) / 2.0;
        	elseif ((x <= 2.5e+15) || (~((x <= 3.65e+84)) && (x <= 2.8e+215)))
        		tmp = (1.0 + exp(((x * eps) - x))) / 2.0;
        	else
        		tmp = 0.0;
        	end
        	tmp_2 = tmp;
        end
        
        code[x_, eps_] := If[LessEqual[x, -4e-307], N[(N[(1.0 + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 2.5e+15], And[N[Not[LessEqual[x, 3.65e+84]], $MachinePrecision], LessEqual[x, 2.8e+215]]], N[(N[(1.0 + N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        \mathbf{if}\;x \leq -4 \cdot 10^{-307}:\\
        \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\
        
        \mathbf{elif}\;x \leq 2.5 \cdot 10^{+15} \lor \neg \left(x \leq 3.65 \cdot 10^{+84}\right) \land x \leq 2.8 \cdot 10^{+215}:\\
        \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\
        
        \mathbf{else}:\\
        \;\;\;\;0\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 3 regimes
        2. if x < -3.99999999999999964e-307

          1. Initial program 72.9%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. div-sub72.9%

              \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
            2. +-rgt-identity72.9%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
            3. div-sub72.9%

              \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          3. Simplified72.9%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in eps around inf 99.8%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
          5. Step-by-step derivation
            1. mul-1-neg99.8%

              \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
            2. *-commutative99.8%

              \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
            3. mul-1-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
            4. exp-prod99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
            5. +-commutative99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
            6. remove-double-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
            7. mul-1-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
            8. sub-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
            9. exp-prod99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
            10. mul-1-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
            11. *-commutative99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
            12. sub-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
            13. mul-1-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
            14. remove-double-neg99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
            15. distribute-rgt-neg-in99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
            16. +-commutative99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
          6. Simplified99.8%

            \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
          7. Taylor expanded in eps around inf 99.8%

            \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
          8. Step-by-step derivation
            1. associate-*r*99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
            2. neg-mul-199.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
          9. Simplified99.8%

            \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
          10. Taylor expanded in x around inf 99.8%

            \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
          11. Taylor expanded in x around 0 71.3%

            \[\leadsto \frac{\color{blue}{1} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]

          if -3.99999999999999964e-307 < x < 2.5e15 or 3.65e84 < x < 2.8e215

          1. Initial program 62.5%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. div-sub62.5%

              \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
            2. +-rgt-identity62.5%

              \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
            3. div-sub62.5%

              \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          3. Simplified62.5%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
          4. Taylor expanded in x around 0 34.9%

            \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
          5. Taylor expanded in eps around inf 72.8%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} + 1}}{2} \]
          6. Step-by-step derivation
            1. exp-prod72.8%

              \[\leadsto \frac{\color{blue}{{\left(e^{-1}\right)}^{\left(\left(1 - \varepsilon\right) \cdot x\right)}} + 1}{2} \]
            2. sub-neg72.8%

              \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)} + 1}{2} \]
            3. neg-mul-172.8%

              \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)} + 1}{2} \]
            4. exp-prod72.8%

              \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}} + 1}{2} \]
            5. +-commutative72.8%

              \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}}}{2} \]
            6. neg-mul-172.8%

              \[\leadsto \frac{1 + e^{\color{blue}{-\left(1 + -1 \cdot \varepsilon\right) \cdot x}}}{2} \]
            7. *-commutative72.8%

              \[\leadsto \frac{1 + e^{-\color{blue}{x \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
            8. neg-mul-172.8%

              \[\leadsto \frac{1 + e^{-x \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
            9. sub-neg72.8%

              \[\leadsto \frac{1 + e^{-x \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
            10. distribute-rgt-neg-in72.8%

              \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
          7. Simplified72.8%

            \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
          8. Taylor expanded in x around 0 72.8%

            \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon - 1\right) \cdot x}}}{2} \]
          9. Step-by-step derivation
            1. sub-neg72.8%

              \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon + \left(-1\right)\right)} \cdot x}}{2} \]
            2. metadata-eval72.8%

              \[\leadsto \frac{1 + e^{\left(\varepsilon + \color{blue}{-1}\right) \cdot x}}{2} \]
            3. *-commutative72.8%

              \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(\varepsilon + -1\right)}}}{2} \]
            4. distribute-rgt-in72.8%

              \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x + -1 \cdot x}}}{2} \]
            5. neg-mul-172.8%

              \[\leadsto \frac{1 + e^{\varepsilon \cdot x + \color{blue}{\left(-x\right)}}}{2} \]
            6. sub-neg72.8%

              \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
          10. Simplified72.8%

            \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]

          if 2.5e15 < x < 3.65e84 or 2.8e215 < x

          1. Initial program 100.0%

            \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          2. Step-by-step derivation
            1. Simplified100.0%

              \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
            2. Taylor expanded in eps around 0 70.7%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
            3. Step-by-step derivation
              1. div-sub70.7%

                \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
              2. rec-exp70.7%

                \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
              3. mul-1-neg70.7%

                \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
              4. +-inverses70.7%

                \[\leadsto \frac{\color{blue}{0}}{2} \]
            4. Simplified70.7%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          3. Recombined 3 regimes into one program.
          4. Final simplification71.9%

            \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -4 \cdot 10^{-307}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 2.5 \cdot 10^{+15} \lor \neg \left(x \leq 3.65 \cdot 10^{+84}\right) \land x \leq 2.8 \cdot 10^{+215}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

          Alternative 6: 67.5% accurate, 1.9× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{x \cdot \varepsilon - x}\\ \mathbf{if}\;x \leq -1 \cdot 10^{-297}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 7.6 \cdot 10^{+19}:\\ \;\;\;\;\frac{t_0 + \left(1 - x \cdot \varepsilon\right)}{2}\\ \mathbf{elif}\;x \leq 6 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 8.2 \cdot 10^{+216}:\\ \;\;\;\;\frac{1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
          (FPCore (x eps)
           :precision binary64
           (let* ((t_0 (exp (- (* x eps) x))))
             (if (<= x -1e-297)
               (/ (+ 1.0 (exp (* x (- eps)))) 2.0)
               (if (<= x 7.6e+19)
                 (/ (+ t_0 (- 1.0 (* x eps))) 2.0)
                 (if (<= x 6e+84) 0.0 (if (<= x 8.2e+216) (/ (+ 1.0 t_0) 2.0) 0.0))))))
          double code(double x, double eps) {
          	double t_0 = exp(((x * eps) - x));
          	double tmp;
          	if (x <= -1e-297) {
          		tmp = (1.0 + exp((x * -eps))) / 2.0;
          	} else if (x <= 7.6e+19) {
          		tmp = (t_0 + (1.0 - (x * eps))) / 2.0;
          	} else if (x <= 6e+84) {
          		tmp = 0.0;
          	} else if (x <= 8.2e+216) {
          		tmp = (1.0 + t_0) / 2.0;
          	} else {
          		tmp = 0.0;
          	}
          	return tmp;
          }
          
          real(8) function code(x, eps)
              real(8), intent (in) :: x
              real(8), intent (in) :: eps
              real(8) :: t_0
              real(8) :: tmp
              t_0 = exp(((x * eps) - x))
              if (x <= (-1d-297)) then
                  tmp = (1.0d0 + exp((x * -eps))) / 2.0d0
              else if (x <= 7.6d+19) then
                  tmp = (t_0 + (1.0d0 - (x * eps))) / 2.0d0
              else if (x <= 6d+84) then
                  tmp = 0.0d0
              else if (x <= 8.2d+216) then
                  tmp = (1.0d0 + t_0) / 2.0d0
              else
                  tmp = 0.0d0
              end if
              code = tmp
          end function
          
          public static double code(double x, double eps) {
          	double t_0 = Math.exp(((x * eps) - x));
          	double tmp;
          	if (x <= -1e-297) {
          		tmp = (1.0 + Math.exp((x * -eps))) / 2.0;
          	} else if (x <= 7.6e+19) {
          		tmp = (t_0 + (1.0 - (x * eps))) / 2.0;
          	} else if (x <= 6e+84) {
          		tmp = 0.0;
          	} else if (x <= 8.2e+216) {
          		tmp = (1.0 + t_0) / 2.0;
          	} else {
          		tmp = 0.0;
          	}
          	return tmp;
          }
          
          def code(x, eps):
          	t_0 = math.exp(((x * eps) - x))
          	tmp = 0
          	if x <= -1e-297:
          		tmp = (1.0 + math.exp((x * -eps))) / 2.0
          	elif x <= 7.6e+19:
          		tmp = (t_0 + (1.0 - (x * eps))) / 2.0
          	elif x <= 6e+84:
          		tmp = 0.0
          	elif x <= 8.2e+216:
          		tmp = (1.0 + t_0) / 2.0
          	else:
          		tmp = 0.0
          	return tmp
          
          function code(x, eps)
          	t_0 = exp(Float64(Float64(x * eps) - x))
          	tmp = 0.0
          	if (x <= -1e-297)
          		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-eps)))) / 2.0);
          	elseif (x <= 7.6e+19)
          		tmp = Float64(Float64(t_0 + Float64(1.0 - Float64(x * eps))) / 2.0);
          	elseif (x <= 6e+84)
          		tmp = 0.0;
          	elseif (x <= 8.2e+216)
          		tmp = Float64(Float64(1.0 + t_0) / 2.0);
          	else
          		tmp = 0.0;
          	end
          	return tmp
          end
          
          function tmp_2 = code(x, eps)
          	t_0 = exp(((x * eps) - x));
          	tmp = 0.0;
          	if (x <= -1e-297)
          		tmp = (1.0 + exp((x * -eps))) / 2.0;
          	elseif (x <= 7.6e+19)
          		tmp = (t_0 + (1.0 - (x * eps))) / 2.0;
          	elseif (x <= 6e+84)
          		tmp = 0.0;
          	elseif (x <= 8.2e+216)
          		tmp = (1.0 + t_0) / 2.0;
          	else
          		tmp = 0.0;
          	end
          	tmp_2 = tmp;
          end
          
          code[x_, eps_] := Block[{t$95$0 = N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, -1e-297], N[(N[(1.0 + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 7.6e+19], N[(N[(t$95$0 + N[(1.0 - N[(x * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 6e+84], 0.0, If[LessEqual[x, 8.2e+216], N[(N[(1.0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          t_0 := e^{x \cdot \varepsilon - x}\\
          \mathbf{if}\;x \leq -1 \cdot 10^{-297}:\\
          \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\
          
          \mathbf{elif}\;x \leq 7.6 \cdot 10^{+19}:\\
          \;\;\;\;\frac{t_0 + \left(1 - x \cdot \varepsilon\right)}{2}\\
          
          \mathbf{elif}\;x \leq 6 \cdot 10^{+84}:\\
          \;\;\;\;0\\
          
          \mathbf{elif}\;x \leq 8.2 \cdot 10^{+216}:\\
          \;\;\;\;\frac{1 + t_0}{2}\\
          
          \mathbf{else}:\\
          \;\;\;\;0\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 4 regimes
          2. if x < -1.00000000000000004e-297

            1. Initial program 72.9%

              \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
            2. Step-by-step derivation
              1. div-sub72.9%

                \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
              2. +-rgt-identity72.9%

                \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              3. div-sub72.9%

                \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
            3. Simplified72.9%

              \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
            4. Taylor expanded in eps around inf 99.8%

              \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
            5. Step-by-step derivation
              1. mul-1-neg99.8%

                \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
              2. *-commutative99.8%

                \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
              3. mul-1-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
              4. exp-prod99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
              5. +-commutative99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
              6. remove-double-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
              7. mul-1-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
              8. sub-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
              9. exp-prod99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
              10. mul-1-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
              11. *-commutative99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
              12. sub-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
              13. mul-1-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
              14. remove-double-neg99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
              15. distribute-rgt-neg-in99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
              16. +-commutative99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
            6. Simplified99.8%

              \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
            7. Taylor expanded in eps around inf 99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
            8. Step-by-step derivation
              1. associate-*r*99.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
              2. neg-mul-199.8%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
            9. Simplified99.8%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
            10. Taylor expanded in x around inf 99.8%

              \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
            11. Taylor expanded in x around 0 71.3%

              \[\leadsto \frac{\color{blue}{1} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]

            if -1.00000000000000004e-297 < x < 7.6e19

            1. Initial program 49.9%

              \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
            2. Step-by-step derivation
              1. div-sub49.9%

                \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
              2. +-rgt-identity49.9%

                \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              3. div-sub49.9%

                \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
            3. Simplified49.9%

              \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
            4. Taylor expanded in eps around inf 99.5%

              \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
            5. Step-by-step derivation
              1. mul-1-neg99.5%

                \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
              2. *-commutative99.5%

                \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
              3. mul-1-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
              4. exp-prod99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
              5. +-commutative99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
              6. remove-double-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right) \cdot x\right)}\right)}{2} \]
              7. mul-1-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right) \cdot x\right)}\right)}{2} \]
              8. sub-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
              9. exp-prod99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
              10. mul-1-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
              11. *-commutative99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
              12. sub-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
              13. mul-1-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \left(-\color{blue}{\left(-\varepsilon\right)}\right)\right)}\right)}{2} \]
              14. remove-double-neg99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
              15. distribute-rgt-neg-in99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]
              16. +-commutative99.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]
            6. Simplified99.5%

              \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
            7. Taylor expanded in eps around inf 98.5%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
            8. Step-by-step derivation
              1. associate-*r*98.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
              2. neg-mul-198.5%

                \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
            9. Simplified98.5%

              \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
            10. Taylor expanded in x around inf 98.5%

              \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]
            11. Taylor expanded in eps around 0 88.5%

              \[\leadsto \frac{e^{\varepsilon \cdot x - x} - \left(-\color{blue}{\left(-1 \cdot \left(\varepsilon \cdot x\right) + 1\right)}\right)}{2} \]
            12. Step-by-step derivation
              1. +-commutative88.5%

                \[\leadsto \frac{e^{\varepsilon \cdot x - x} - \left(-\color{blue}{\left(1 + -1 \cdot \left(\varepsilon \cdot x\right)\right)}\right)}{2} \]
              2. mul-1-neg88.5%

                \[\leadsto \frac{e^{\varepsilon \cdot x - x} - \left(-\left(1 + \color{blue}{\left(-\varepsilon \cdot x\right)}\right)\right)}{2} \]
              3. unsub-neg88.5%

                \[\leadsto \frac{e^{\varepsilon \cdot x - x} - \left(-\color{blue}{\left(1 - \varepsilon \cdot x\right)}\right)}{2} \]
            13. Simplified88.5%

              \[\leadsto \frac{e^{\varepsilon \cdot x - x} - \left(-\color{blue}{\left(1 - \varepsilon \cdot x\right)}\right)}{2} \]

            if 7.6e19 < x < 5.99999999999999992e84 or 8.1999999999999995e216 < x

            1. Initial program 100.0%

              \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
            2. Step-by-step derivation
              1. Simplified100.0%

                \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
              2. Taylor expanded in eps around 0 70.7%

                \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
              3. Step-by-step derivation
                1. div-sub70.7%

                  \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                2. rec-exp70.7%

                  \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                3. mul-1-neg70.7%

                  \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                4. +-inverses70.7%

                  \[\leadsto \frac{\color{blue}{0}}{2} \]
              4. Simplified70.7%

                \[\leadsto \frac{\color{blue}{0}}{2} \]

              if 5.99999999999999992e84 < x < 8.1999999999999995e216

              1. Initial program 100.0%

                \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              2. Step-by-step derivation
                1. div-sub100.0%

                  \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                2. +-rgt-identity100.0%

                  \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                3. div-sub100.0%

                  \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
              3. Simplified100.0%

                \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
              4. Taylor expanded in x around 0 28.6%

                \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
              5. Taylor expanded in eps around inf 29.0%

                \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} + 1}}{2} \]
              6. Step-by-step derivation
                1. exp-prod29.0%

                  \[\leadsto \frac{\color{blue}{{\left(e^{-1}\right)}^{\left(\left(1 - \varepsilon\right) \cdot x\right)}} + 1}{2} \]
                2. sub-neg29.0%

                  \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)} + 1}{2} \]
                3. neg-mul-129.0%

                  \[\leadsto \frac{{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)} + 1}{2} \]
                4. exp-prod29.0%

                  \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}} + 1}{2} \]
                5. +-commutative29.0%

                  \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)}}}{2} \]
                6. neg-mul-129.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{-\left(1 + -1 \cdot \varepsilon\right) \cdot x}}}{2} \]
                7. *-commutative29.0%

                  \[\leadsto \frac{1 + e^{-\color{blue}{x \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
                8. neg-mul-129.0%

                  \[\leadsto \frac{1 + e^{-x \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
                9. sub-neg29.0%

                  \[\leadsto \frac{1 + e^{-x \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
                10. distribute-rgt-neg-in29.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
              7. Simplified29.0%

                \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-\left(1 - \varepsilon\right)\right)}}}{2} \]
              8. Taylor expanded in x around 0 29.0%

                \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon - 1\right) \cdot x}}}{2} \]
              9. Step-by-step derivation
                1. sub-neg29.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{\left(\varepsilon + \left(-1\right)\right)} \cdot x}}{2} \]
                2. metadata-eval29.0%

                  \[\leadsto \frac{1 + e^{\left(\varepsilon + \color{blue}{-1}\right) \cdot x}}{2} \]
                3. *-commutative29.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \left(\varepsilon + -1\right)}}}{2} \]
                4. distribute-rgt-in29.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x + -1 \cdot x}}}{2} \]
                5. neg-mul-129.0%

                  \[\leadsto \frac{1 + e^{\varepsilon \cdot x + \color{blue}{\left(-x\right)}}}{2} \]
                6. sub-neg29.0%

                  \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
              10. Simplified29.0%

                \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x - x}}}{2} \]
            3. Recombined 4 regimes into one program.
            4. Final simplification72.2%

              \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1 \cdot 10^{-297}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 7.6 \cdot 10^{+19}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + \left(1 - x \cdot \varepsilon\right)}{2}\\ \mathbf{elif}\;x \leq 6 \cdot 10^{+84}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 8.2 \cdot 10^{+216}:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

            Alternative 7: 63.6% accurate, 2.1× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -480:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
            (FPCore (x eps)
             :precision binary64
             (if (<= x -480.0)
               (/ (/ (expm1 (- x)) eps) 2.0)
               (if (<= x 30000000.0) 1.0 0.0)))
            double code(double x, double eps) {
            	double tmp;
            	if (x <= -480.0) {
            		tmp = (expm1(-x) / eps) / 2.0;
            	} else if (x <= 30000000.0) {
            		tmp = 1.0;
            	} else {
            		tmp = 0.0;
            	}
            	return tmp;
            }
            
            public static double code(double x, double eps) {
            	double tmp;
            	if (x <= -480.0) {
            		tmp = (Math.expm1(-x) / eps) / 2.0;
            	} else if (x <= 30000000.0) {
            		tmp = 1.0;
            	} else {
            		tmp = 0.0;
            	}
            	return tmp;
            }
            
            def code(x, eps):
            	tmp = 0
            	if x <= -480.0:
            		tmp = (math.expm1(-x) / eps) / 2.0
            	elif x <= 30000000.0:
            		tmp = 1.0
            	else:
            		tmp = 0.0
            	return tmp
            
            function code(x, eps)
            	tmp = 0.0
            	if (x <= -480.0)
            		tmp = Float64(Float64(expm1(Float64(-x)) / eps) / 2.0);
            	elseif (x <= 30000000.0)
            		tmp = 1.0;
            	else
            		tmp = 0.0;
            	end
            	return tmp
            end
            
            code[x_, eps_] := If[LessEqual[x, -480.0], N[(N[(N[(Exp[(-x)] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 30000000.0], 1.0, 0.0]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            \mathbf{if}\;x \leq -480:\\
            \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\
            
            \mathbf{elif}\;x \leq 30000000:\\
            \;\;\;\;1\\
            
            \mathbf{else}:\\
            \;\;\;\;0\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 3 regimes
            2. if x < -480

              1. Initial program 100.0%

                \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              2. Step-by-step derivation
                1. div-sub100.0%

                  \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                2. +-rgt-identity100.0%

                  \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                3. div-sub100.0%

                  \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
              3. Simplified100.0%

                \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
              4. Taylor expanded in x around 0 66.1%

                \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
              5. Taylor expanded in eps around 0 35.0%

                \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
              6. Step-by-step derivation
                1. expm1-def35.0%

                  \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
                2. neg-mul-135.0%

                  \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
              7. Simplified35.0%

                \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]

              if -480 < x < 3e7

              1. Initial program 51.2%

                \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              2. Step-by-step derivation
                1. div-sub51.2%

                  \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                2. +-rgt-identity51.2%

                  \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                3. div-sub51.2%

                  \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
              3. Simplified51.2%

                \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
              4. Taylor expanded in x around 0 81.6%

                \[\leadsto \frac{\color{blue}{2}}{2} \]

              if 3e7 < x

              1. Initial program 100.0%

                \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
              2. Step-by-step derivation
                1. Simplified100.0%

                  \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
                2. Taylor expanded in eps around 0 58.6%

                  \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                3. Step-by-step derivation
                  1. div-sub58.6%

                    \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                  2. rec-exp58.6%

                    \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                  3. mul-1-neg58.6%

                    \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                  4. +-inverses58.6%

                    \[\leadsto \frac{\color{blue}{0}}{2} \]
                4. Simplified58.6%

                  \[\leadsto \frac{\color{blue}{0}}{2} \]
              3. Recombined 3 regimes into one program.
              4. Final simplification68.1%

                \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -480:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

              Alternative 8: 60.1% accurate, 25.0× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 0.00145:\\ \;\;\;\;\frac{2 - x \cdot \varepsilon}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
              (FPCore (x eps)
               :precision binary64
               (if (<= x 0.00145) (/ (- 2.0 (* x eps)) 2.0) 0.0))
              double code(double x, double eps) {
              	double tmp;
              	if (x <= 0.00145) {
              		tmp = (2.0 - (x * eps)) / 2.0;
              	} else {
              		tmp = 0.0;
              	}
              	return tmp;
              }
              
              real(8) function code(x, eps)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: eps
                  real(8) :: tmp
                  if (x <= 0.00145d0) then
                      tmp = (2.0d0 - (x * eps)) / 2.0d0
                  else
                      tmp = 0.0d0
                  end if
                  code = tmp
              end function
              
              public static double code(double x, double eps) {
              	double tmp;
              	if (x <= 0.00145) {
              		tmp = (2.0 - (x * eps)) / 2.0;
              	} else {
              		tmp = 0.0;
              	}
              	return tmp;
              }
              
              def code(x, eps):
              	tmp = 0
              	if x <= 0.00145:
              		tmp = (2.0 - (x * eps)) / 2.0
              	else:
              		tmp = 0.0
              	return tmp
              
              function code(x, eps)
              	tmp = 0.0
              	if (x <= 0.00145)
              		tmp = Float64(Float64(2.0 - Float64(x * eps)) / 2.0);
              	else
              		tmp = 0.0;
              	end
              	return tmp
              end
              
              function tmp_2 = code(x, eps)
              	tmp = 0.0;
              	if (x <= 0.00145)
              		tmp = (2.0 - (x * eps)) / 2.0;
              	else
              		tmp = 0.0;
              	end
              	tmp_2 = tmp;
              end
              
              code[x_, eps_] := If[LessEqual[x, 0.00145], N[(N[(2.0 - N[(x * eps), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              \mathbf{if}\;x \leq 0.00145:\\
              \;\;\;\;\frac{2 - x \cdot \varepsilon}{2}\\
              
              \mathbf{else}:\\
              \;\;\;\;0\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if x < 0.00145

                1. Initial program 60.8%

                  \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                2. Step-by-step derivation
                  1. div-sub60.8%

                    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                  2. +-rgt-identity60.8%

                    \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                  3. div-sub60.8%

                    \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                3. Simplified60.8%

                  \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
                4. Taylor expanded in x around 0 39.8%

                  \[\leadsto \frac{\color{blue}{\left(\frac{1}{\varepsilon} + 1\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
                5. Taylor expanded in x around 0 47.4%

                  \[\leadsto \frac{\color{blue}{2 + \left(\frac{1}{\varepsilon} - 1\right) \cdot \left(\left(1 + \varepsilon\right) \cdot x\right)}}{2} \]
                6. Step-by-step derivation
                  1. sub-neg47.4%

                    \[\leadsto \frac{2 + \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot \left(\left(1 + \varepsilon\right) \cdot x\right)}{2} \]
                  2. metadata-eval47.4%

                    \[\leadsto \frac{2 + \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot \left(\left(1 + \varepsilon\right) \cdot x\right)}{2} \]
                  3. *-commutative47.4%

                    \[\leadsto \frac{2 + \color{blue}{\left(\left(1 + \varepsilon\right) \cdot x\right) \cdot \left(\frac{1}{\varepsilon} + -1\right)}}{2} \]
                  4. +-commutative47.4%

                    \[\leadsto \frac{2 + \left(\color{blue}{\left(\varepsilon + 1\right)} \cdot x\right) \cdot \left(\frac{1}{\varepsilon} + -1\right)}{2} \]
                  5. distribute-lft1-in47.4%

                    \[\leadsto \frac{2 + \color{blue}{\left(\varepsilon \cdot x + x\right)} \cdot \left(\frac{1}{\varepsilon} + -1\right)}{2} \]
                  6. +-commutative47.4%

                    \[\leadsto \frac{2 + \left(\varepsilon \cdot x + x\right) \cdot \color{blue}{\left(-1 + \frac{1}{\varepsilon}\right)}}{2} \]
                7. Simplified47.4%

                  \[\leadsto \frac{\color{blue}{2 + \left(\varepsilon \cdot x + x\right) \cdot \left(-1 + \frac{1}{\varepsilon}\right)}}{2} \]
                8. Taylor expanded in eps around inf 69.7%

                  \[\leadsto \frac{\color{blue}{2 + \left(-1 \cdot \left(\varepsilon \cdot x\right) + \left(-1 \cdot x + x\right)\right)}}{2} \]
                9. Step-by-step derivation
                  1. +-commutative69.7%

                    \[\leadsto \frac{2 + \color{blue}{\left(\left(-1 \cdot x + x\right) + -1 \cdot \left(\varepsilon \cdot x\right)\right)}}{2} \]
                  2. neg-mul-169.7%

                    \[\leadsto \frac{2 + \left(\left(\color{blue}{\left(-x\right)} + x\right) + -1 \cdot \left(\varepsilon \cdot x\right)\right)}{2} \]
                  3. *-rgt-identity69.7%

                    \[\leadsto \frac{2 + \left(\left(\color{blue}{\left(-x\right) \cdot 1} + x\right) + -1 \cdot \left(\varepsilon \cdot x\right)\right)}{2} \]
                  4. fma-udef69.7%

                    \[\leadsto \frac{2 + \left(\color{blue}{\mathsf{fma}\left(-x, 1, x\right)} + -1 \cdot \left(\varepsilon \cdot x\right)\right)}{2} \]
                  5. mul-1-neg69.7%

                    \[\leadsto \frac{2 + \left(\mathsf{fma}\left(-x, 1, x\right) + \color{blue}{\left(-\varepsilon \cdot x\right)}\right)}{2} \]
                  6. unsub-neg69.7%

                    \[\leadsto \frac{2 + \color{blue}{\left(\mathsf{fma}\left(-x, 1, x\right) - \varepsilon \cdot x\right)}}{2} \]
                  7. fma-udef69.7%

                    \[\leadsto \frac{2 + \left(\color{blue}{\left(\left(-x\right) \cdot 1 + x\right)} - \varepsilon \cdot x\right)}{2} \]
                  8. *-rgt-identity69.7%

                    \[\leadsto \frac{2 + \left(\left(\color{blue}{\left(-x\right)} + x\right) - \varepsilon \cdot x\right)}{2} \]
                  9. neg-mul-169.7%

                    \[\leadsto \frac{2 + \left(\left(\color{blue}{-1 \cdot x} + x\right) - \varepsilon \cdot x\right)}{2} \]
                  10. distribute-lft1-in69.7%

                    \[\leadsto \frac{2 + \left(\color{blue}{\left(-1 + 1\right) \cdot x} - \varepsilon \cdot x\right)}{2} \]
                  11. metadata-eval69.7%

                    \[\leadsto \frac{2 + \left(\color{blue}{0} \cdot x - \varepsilon \cdot x\right)}{2} \]
                  12. mul0-lft69.7%

                    \[\leadsto \frac{2 + \left(\color{blue}{0} - \varepsilon \cdot x\right)}{2} \]
                  13. neg-sub069.7%

                    \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon \cdot x\right)}}{2} \]
                  14. unsub-neg69.7%

                    \[\leadsto \frac{\color{blue}{2 - \varepsilon \cdot x}}{2} \]
                  15. *-commutative69.7%

                    \[\leadsto \frac{2 - \color{blue}{x \cdot \varepsilon}}{2} \]
                10. Simplified69.7%

                  \[\leadsto \frac{\color{blue}{2 - x \cdot \varepsilon}}{2} \]

                if 0.00145 < x

                1. Initial program 100.0%

                  \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                2. Step-by-step derivation
                  1. Simplified100.0%

                    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
                  2. Taylor expanded in eps around 0 55.5%

                    \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                  3. Step-by-step derivation
                    1. div-sub55.5%

                      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                    2. rec-exp55.5%

                      \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                    3. mul-1-neg55.5%

                      \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                    4. +-inverses55.5%

                      \[\leadsto \frac{\color{blue}{0}}{2} \]
                  4. Simplified55.5%

                    \[\leadsto \frac{\color{blue}{0}}{2} \]
                3. Recombined 2 regimes into one program.
                4. Final simplification65.6%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 0.00145:\\ \;\;\;\;\frac{2 - x \cdot \varepsilon}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

                Alternative 9: 60.3% accurate, 32.1× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;x \cdot \frac{\varepsilon}{-2}\\ \mathbf{elif}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
                (FPCore (x eps)
                 :precision binary64
                 (if (<= x -1.0) (* x (/ eps -2.0)) (if (<= x 30000000.0) 1.0 0.0)))
                double code(double x, double eps) {
                	double tmp;
                	if (x <= -1.0) {
                		tmp = x * (eps / -2.0);
                	} else if (x <= 30000000.0) {
                		tmp = 1.0;
                	} else {
                		tmp = 0.0;
                	}
                	return tmp;
                }
                
                real(8) function code(x, eps)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: eps
                    real(8) :: tmp
                    if (x <= (-1.0d0)) then
                        tmp = x * (eps / (-2.0d0))
                    else if (x <= 30000000.0d0) then
                        tmp = 1.0d0
                    else
                        tmp = 0.0d0
                    end if
                    code = tmp
                end function
                
                public static double code(double x, double eps) {
                	double tmp;
                	if (x <= -1.0) {
                		tmp = x * (eps / -2.0);
                	} else if (x <= 30000000.0) {
                		tmp = 1.0;
                	} else {
                		tmp = 0.0;
                	}
                	return tmp;
                }
                
                def code(x, eps):
                	tmp = 0
                	if x <= -1.0:
                		tmp = x * (eps / -2.0)
                	elif x <= 30000000.0:
                		tmp = 1.0
                	else:
                		tmp = 0.0
                	return tmp
                
                function code(x, eps)
                	tmp = 0.0
                	if (x <= -1.0)
                		tmp = Float64(x * Float64(eps / -2.0));
                	elseif (x <= 30000000.0)
                		tmp = 1.0;
                	else
                		tmp = 0.0;
                	end
                	return tmp
                end
                
                function tmp_2 = code(x, eps)
                	tmp = 0.0;
                	if (x <= -1.0)
                		tmp = x * (eps / -2.0);
                	elseif (x <= 30000000.0)
                		tmp = 1.0;
                	else
                		tmp = 0.0;
                	end
                	tmp_2 = tmp;
                end
                
                code[x_, eps_] := If[LessEqual[x, -1.0], N[(x * N[(eps / -2.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 30000000.0], 1.0, 0.0]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                \mathbf{if}\;x \leq -1:\\
                \;\;\;\;x \cdot \frac{\varepsilon}{-2}\\
                
                \mathbf{elif}\;x \leq 30000000:\\
                \;\;\;\;1\\
                
                \mathbf{else}:\\
                \;\;\;\;0\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 3 regimes
                2. if x < -1

                  1. Initial program 100.0%

                    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                  2. Step-by-step derivation
                    1. div-sub100.0%

                      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                    2. +-rgt-identity100.0%

                      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                    3. div-sub100.0%

                      \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                  3. Simplified100.0%

                    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
                  4. Taylor expanded in x around 0 64.6%

                    \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
                  5. Taylor expanded in x around 0 40.6%

                    \[\leadsto \frac{\color{blue}{-1 \cdot \left(\left(\frac{1}{\varepsilon} + 1\right) \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)\right) + 2}}{2} \]
                  6. Taylor expanded in eps around inf 40.6%

                    \[\leadsto \frac{\color{blue}{\varepsilon \cdot x}}{2} \]
                  7. Step-by-step derivation
                    1. expm1-log1p-u40.6%

                      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\frac{\varepsilon \cdot x}{2}\right)\right)} \]
                    2. expm1-udef40.6%

                      \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{\varepsilon \cdot x}{2}\right)} - 1} \]
                    3. frac-2neg40.6%

                      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{\frac{-\varepsilon \cdot x}{-2}}\right)} - 1 \]
                    4. distribute-lft-neg-out40.6%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{-2}\right)} - 1 \]
                    5. associate-/l*40.6%

                      \[\leadsto e^{\mathsf{log1p}\left(\color{blue}{\frac{-\varepsilon}{\frac{-2}{x}}}\right)} - 1 \]
                    6. add-sqr-sqrt40.6%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}{\frac{-2}{x}}\right)} - 1 \]
                    7. sqrt-unprod66.0%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}}{\frac{-2}{x}}\right)} - 1 \]
                    8. sqr-neg66.0%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}}{\frac{-2}{x}}\right)} - 1 \]
                    9. sqrt-unprod20.8%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\color{blue}{\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}}}{\frac{-2}{x}}\right)} - 1 \]
                    10. add-sqr-sqrt20.8%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\color{blue}{\varepsilon}}{\frac{-2}{x}}\right)} - 1 \]
                    11. metadata-eval20.8%

                      \[\leadsto e^{\mathsf{log1p}\left(\frac{\varepsilon}{\frac{\color{blue}{-2}}{x}}\right)} - 1 \]
                  8. Applied egg-rr20.8%

                    \[\leadsto \color{blue}{e^{\mathsf{log1p}\left(\frac{\varepsilon}{\frac{-2}{x}}\right)} - 1} \]
                  9. Step-by-step derivation
                    1. expm1-def20.8%

                      \[\leadsto \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(\frac{\varepsilon}{\frac{-2}{x}}\right)\right)} \]
                    2. expm1-log1p20.9%

                      \[\leadsto \color{blue}{\frac{\varepsilon}{\frac{-2}{x}}} \]
                    3. associate-/r/20.9%

                      \[\leadsto \color{blue}{\frac{\varepsilon}{-2} \cdot x} \]
                  10. Simplified20.9%

                    \[\leadsto \color{blue}{\frac{\varepsilon}{-2} \cdot x} \]

                  if -1 < x < 3e7

                  1. Initial program 50.9%

                    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                  2. Step-by-step derivation
                    1. div-sub50.9%

                      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                    2. +-rgt-identity50.9%

                      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                    3. div-sub50.9%

                      \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                  3. Simplified50.9%

                    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
                  4. Taylor expanded in x around 0 82.1%

                    \[\leadsto \frac{\color{blue}{2}}{2} \]

                  if 3e7 < x

                  1. Initial program 100.0%

                    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                  2. Step-by-step derivation
                    1. Simplified100.0%

                      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
                    2. Taylor expanded in eps around 0 58.6%

                      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                    3. Step-by-step derivation
                      1. div-sub58.6%

                        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                      2. rec-exp58.6%

                        \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                      3. mul-1-neg58.6%

                        \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                      4. +-inverses58.6%

                        \[\leadsto \frac{\color{blue}{0}}{2} \]
                    4. Simplified58.6%

                      \[\leadsto \frac{\color{blue}{0}}{2} \]
                  3. Recombined 3 regimes into one program.
                  4. Final simplification66.0%

                    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;x \cdot \frac{\varepsilon}{-2}\\ \mathbf{elif}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

                  Alternative 10: 57.2% accurate, 74.1× speedup?

                  \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
                  (FPCore (x eps) :precision binary64 (if (<= x 30000000.0) 1.0 0.0))
                  double code(double x, double eps) {
                  	double tmp;
                  	if (x <= 30000000.0) {
                  		tmp = 1.0;
                  	} else {
                  		tmp = 0.0;
                  	}
                  	return tmp;
                  }
                  
                  real(8) function code(x, eps)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: eps
                      real(8) :: tmp
                      if (x <= 30000000.0d0) then
                          tmp = 1.0d0
                      else
                          tmp = 0.0d0
                      end if
                      code = tmp
                  end function
                  
                  public static double code(double x, double eps) {
                  	double tmp;
                  	if (x <= 30000000.0) {
                  		tmp = 1.0;
                  	} else {
                  		tmp = 0.0;
                  	}
                  	return tmp;
                  }
                  
                  def code(x, eps):
                  	tmp = 0
                  	if x <= 30000000.0:
                  		tmp = 1.0
                  	else:
                  		tmp = 0.0
                  	return tmp
                  
                  function code(x, eps)
                  	tmp = 0.0
                  	if (x <= 30000000.0)
                  		tmp = 1.0;
                  	else
                  		tmp = 0.0;
                  	end
                  	return tmp
                  end
                  
                  function tmp_2 = code(x, eps)
                  	tmp = 0.0;
                  	if (x <= 30000000.0)
                  		tmp = 1.0;
                  	else
                  		tmp = 0.0;
                  	end
                  	tmp_2 = tmp;
                  end
                  
                  code[x_, eps_] := If[LessEqual[x, 30000000.0], 1.0, 0.0]
                  
                  \begin{array}{l}
                  
                  \\
                  \begin{array}{l}
                  \mathbf{if}\;x \leq 30000000:\\
                  \;\;\;\;1\\
                  
                  \mathbf{else}:\\
                  \;\;\;\;0\\
                  
                  
                  \end{array}
                  \end{array}
                  
                  Derivation
                  1. Split input into 2 regimes
                  2. if x < 3e7

                    1. Initial program 61.6%

                      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                    2. Step-by-step derivation
                      1. div-sub61.6%

                        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                      2. +-rgt-identity61.6%

                        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                      3. div-sub61.6%

                        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
                    3. Simplified61.6%

                      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
                    4. Taylor expanded in x around 0 64.8%

                      \[\leadsto \frac{\color{blue}{2}}{2} \]

                    if 3e7 < x

                    1. Initial program 100.0%

                      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                    2. Step-by-step derivation
                      1. Simplified100.0%

                        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
                      2. Taylor expanded in eps around 0 58.6%

                        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                      3. Step-by-step derivation
                        1. div-sub58.6%

                          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                        2. rec-exp58.6%

                          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                        3. mul-1-neg58.6%

                          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                        4. +-inverses58.6%

                          \[\leadsto \frac{\color{blue}{0}}{2} \]
                      4. Simplified58.6%

                        \[\leadsto \frac{\color{blue}{0}}{2} \]
                    3. Recombined 2 regimes into one program.
                    4. Final simplification63.2%

                      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 30000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

                    Alternative 11: 16.1% accurate, 227.0× speedup?

                    \[\begin{array}{l} \\ 0 \end{array} \]
                    (FPCore (x eps) :precision binary64 0.0)
                    double code(double x, double eps) {
                    	return 0.0;
                    }
                    
                    real(8) function code(x, eps)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: eps
                        code = 0.0d0
                    end function
                    
                    public static double code(double x, double eps) {
                    	return 0.0;
                    }
                    
                    def code(x, eps):
                    	return 0.0
                    
                    function code(x, eps)
                    	return 0.0
                    end
                    
                    function tmp = code(x, eps)
                    	tmp = 0.0;
                    end
                    
                    code[x_, eps_] := 0.0
                    
                    \begin{array}{l}
                    
                    \\
                    0
                    \end{array}
                    
                    Derivation
                    1. Initial program 72.0%

                      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
                    2. Step-by-step derivation
                      1. Simplified56.8%

                        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
                      2. Taylor expanded in eps around 0 17.4%

                        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                      3. Step-by-step derivation
                        1. div-sub17.4%

                          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
                        2. rec-exp17.4%

                          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
                        3. mul-1-neg17.4%

                          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
                        4. +-inverses17.7%

                          \[\leadsto \frac{\color{blue}{0}}{2} \]
                      4. Simplified17.7%

                        \[\leadsto \frac{\color{blue}{0}}{2} \]
                      5. Final simplification17.7%

                        \[\leadsto 0 \]

                      Reproduce

                      ?
                      herbie shell --seed 2023229 
                      (FPCore (x eps)
                        :name "NMSE Section 6.1 mentioned, A"
                        :precision binary64
                        (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))