NMSE Section 6.1 mentioned, A

Percentage Accurate: 74.0% → 98.9%
Time: 8.1s
Alternatives: 10
Speedup: 1.8×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 10 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 74.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 98.9% accurate, 1.1× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 4 \cdot 10^{-23}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + e^{-x}}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (if (<= x 4e-23)
   (/ (+ (exp (* x (+ eps -1.0))) (exp (* x (- eps)))) 2.0)
   (/ (+ (exp (- (* x eps) x)) (exp (- x))) 2.0)))
eps = abs(eps);
double code(double x, double eps) {
	double tmp;
	if (x <= 4e-23) {
		tmp = (exp((x * (eps + -1.0))) + exp((x * -eps))) / 2.0;
	} else {
		tmp = (exp(((x * eps) - x)) + exp(-x)) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 4d-23) then
        tmp = (exp((x * (eps + (-1.0d0)))) + exp((x * -eps))) / 2.0d0
    else
        tmp = (exp(((x * eps) - x)) + exp(-x)) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double tmp;
	if (x <= 4e-23) {
		tmp = (Math.exp((x * (eps + -1.0))) + Math.exp((x * -eps))) / 2.0;
	} else {
		tmp = (Math.exp(((x * eps) - x)) + Math.exp(-x)) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	tmp = 0
	if x <= 4e-23:
		tmp = (math.exp((x * (eps + -1.0))) + math.exp((x * -eps))) / 2.0
	else:
		tmp = (math.exp(((x * eps) - x)) + math.exp(-x)) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	tmp = 0.0
	if (x <= 4e-23)
		tmp = Float64(Float64(exp(Float64(x * Float64(eps + -1.0))) + exp(Float64(x * Float64(-eps)))) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(Float64(x * eps) - x)) + exp(Float64(-x))) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 4e-23)
		tmp = (exp((x * (eps + -1.0))) + exp((x * -eps))) / 2.0;
	else
		tmp = (exp(((x * eps) - x)) + exp(-x)) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := If[LessEqual[x, 4e-23], N[(N[(N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
\mathbf{if}\;x \leq 4 \cdot 10^{-23}:\\
\;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \varepsilon - x} + e^{-x}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 3.99999999999999984e-23

    1. Initial program 67.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub67.8%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity67.8%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub67.8%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified67.8%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 97.9%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg97.9%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative97.9%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative97.9%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified97.9%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around inf 97.9%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{\varepsilon \cdot x}}\right)}{2} \]

    if 3.99999999999999984e-23 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 73.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 73.5%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification90.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 4 \cdot 10^{-23}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + e^{-x}}{2}\\ \end{array} \]

Alternative 2: 89.8% accurate, 1.1× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;\varepsilon \leq 3.4 \cdot 10^{-20}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 7.2 \cdot 10^{+235} \lor \neg \left(\varepsilon \leq 2 \cdot 10^{+270}\right):\\ \;\;\;\;\frac{t_0 + e^{x \cdot \varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_0 \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= eps 3.4e-20)
     (/ (/ 2.0 (exp x)) 2.0)
     (if (or (<= eps 7.2e+235) (not (<= eps 2e+270)))
       (/ (+ t_0 (exp (* x eps))) 2.0)
       (/ (* t_0 (+ 2.0 (* x (+ eps (* x (* eps (* eps 0.5))))))) 2.0)))))
eps = abs(eps);
double code(double x, double eps) {
	double t_0 = exp(-x);
	double tmp;
	if (eps <= 3.4e-20) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else if ((eps <= 7.2e+235) || !(eps <= 2e+270)) {
		tmp = (t_0 + exp((x * eps))) / 2.0;
	} else {
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (eps <= 3.4d-20) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else if ((eps <= 7.2d+235) .or. (.not. (eps <= 2d+270))) then
        tmp = (t_0 + exp((x * eps))) / 2.0d0
    else
        tmp = (t_0 * (2.0d0 + (x * (eps + (x * (eps * (eps * 0.5d0))))))) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (eps <= 3.4e-20) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else if ((eps <= 7.2e+235) || !(eps <= 2e+270)) {
		tmp = (t_0 + Math.exp((x * eps))) / 2.0;
	} else {
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	t_0 = math.exp(-x)
	tmp = 0
	if eps <= 3.4e-20:
		tmp = (2.0 / math.exp(x)) / 2.0
	elif (eps <= 7.2e+235) or not (eps <= 2e+270):
		tmp = (t_0 + math.exp((x * eps))) / 2.0
	else:
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (eps <= 3.4e-20)
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	elseif ((eps <= 7.2e+235) || !(eps <= 2e+270))
		tmp = Float64(Float64(t_0 + exp(Float64(x * eps))) / 2.0);
	else
		tmp = Float64(Float64(t_0 * Float64(2.0 + Float64(x * Float64(eps + Float64(x * Float64(eps * Float64(eps * 0.5))))))) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	t_0 = exp(-x);
	tmp = 0.0;
	if (eps <= 3.4e-20)
		tmp = (2.0 / exp(x)) / 2.0;
	elseif ((eps <= 7.2e+235) || ~((eps <= 2e+270)))
		tmp = (t_0 + exp((x * eps))) / 2.0;
	else
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps, 3.4e-20], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[eps, 7.2e+235], N[Not[LessEqual[eps, 2e+270]], $MachinePrecision]], N[(N[(t$95$0 + N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$0 * N[(2.0 + N[(x * N[(eps + N[(x * N[(eps * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;\varepsilon \leq 3.4 \cdot 10^{-20}:\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{elif}\;\varepsilon \leq 7.2 \cdot 10^{+235} \lor \neg \left(\varepsilon \leq 2 \cdot 10^{+270}\right):\\
\;\;\;\;\frac{t_0 + e^{x \cdot \varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{t_0 \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if eps < 3.3999999999999997e-20

    1. Initial program 69.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub69.2%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity69.2%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub69.2%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified69.2%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 98.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg98.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative98.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified98.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 80.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 75.0%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg75.0%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/75.0%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval75.0%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified75.0%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 3.3999999999999997e-20 < eps < 7.19999999999999971e235 or 2.0000000000000001e270 < eps

    1. Initial program 98.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub98.6%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity98.6%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub98.6%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified98.6%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 86.4%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 86.4%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
    9. Taylor expanded in eps around inf 86.4%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + e^{-x}}{2} \]
    10. Step-by-step derivation
      1. *-commutative86.4%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + e^{-x}}{2} \]
    11. Simplified86.4%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + e^{-x}}{2} \]

    if 7.19999999999999971e235 < eps < 2.0000000000000001e270

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 46.2%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 46.2%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
    9. Taylor expanded in eps around 0 50.0%

      \[\leadsto \frac{\color{blue}{\varepsilon \cdot \left(e^{-x} \cdot x\right) + \left(0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right) + 2 \cdot e^{-x}\right)}}{2} \]
    10. Step-by-step derivation
      1. associate-+r+50.0%

        \[\leadsto \frac{\color{blue}{\left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right) + 2 \cdot e^{-x}}}{2} \]
      2. +-commutative50.0%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{-x} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}}{2} \]
      3. *-commutative50.0%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot 2} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      4. *-commutative50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\varepsilon \cdot \color{blue}{\left(x \cdot e^{-x}\right)} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      5. associate-*r*50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\color{blue}{\left(\varepsilon \cdot x\right) \cdot e^{-x}} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      6. associate-*r*50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \left(e^{-x} \cdot {x}^{2}\right)}\right)}{2} \]
      7. *-commutative50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left({x}^{2} \cdot e^{-x}\right)}\right)}{2} \]
      8. associate-*r*50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right) \cdot e^{-x}}\right)}{2} \]
      9. distribute-rgt-out50.0%

        \[\leadsto \frac{e^{-x} \cdot 2 + \color{blue}{e^{-x} \cdot \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)}}{2} \]
      10. distribute-lft-out50.0%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)\right)}}{2} \]
      11. unpow250.0%

        \[\leadsto \frac{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(x \cdot x\right)}\right)\right)}{2} \]
    11. Simplified67.7%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right) \cdot x\right)\right)}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification77.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 3.4 \cdot 10^{-20}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 7.2 \cdot 10^{+235} \lor \neg \left(\varepsilon \leq 2 \cdot 10^{+270}\right):\\ \;\;\;\;\frac{e^{-x} + e^{x \cdot \varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \end{array} \]

Alternative 3: 98.9% accurate, 1.1× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (/ (+ (exp (* x (+ eps -1.0))) (exp (* x (- -1.0 eps)))) 2.0))
eps = abs(eps);
double code(double x, double eps) {
	return (exp((x * (eps + -1.0))) + exp((x * (-1.0 - eps)))) / 2.0;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (exp((x * (eps + (-1.0d0)))) + exp((x * ((-1.0d0) - eps)))) / 2.0d0
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	return (Math.exp((x * (eps + -1.0))) + Math.exp((x * (-1.0 - eps)))) / 2.0;
}
eps = abs(eps)
def code(x, eps):
	return (math.exp((x * (eps + -1.0))) + math.exp((x * (-1.0 - eps)))) / 2.0
eps = abs(eps)
function code(x, eps)
	return Float64(Float64(exp(Float64(x * Float64(eps + -1.0))) + exp(Float64(x * Float64(-1.0 - eps)))) / 2.0)
end
eps = abs(eps)
function tmp = code(x, eps)
	tmp = (exp((x * (eps + -1.0))) + exp((x * (-1.0 - eps)))) / 2.0;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := N[(N[(N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
eps = |eps|\\
\\
\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}
\end{array}
Derivation
  1. Initial program 77.6%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Step-by-step derivation
    1. div-sub77.6%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    2. +-rgt-identity77.6%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    3. div-sub77.6%

      \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
  3. Simplified77.6%

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
  4. Taylor expanded in eps around inf 98.5%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
  5. Step-by-step derivation
    1. mul-1-neg98.5%

      \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
    2. *-commutative98.5%

      \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
    3. mul-1-neg98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
    4. exp-prod98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
    5. +-commutative98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
    6. *-lft-identity98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
    7. metadata-eval98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
    8. cancel-sign-sub-inv98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
    9. exp-prod98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
    10. mul-1-neg98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
    11. *-commutative98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
    12. cancel-sign-sub-inv98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
    13. metadata-eval98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
    14. *-lft-identity98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
    15. +-commutative98.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
  6. Simplified98.5%

    \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
  7. Final simplification98.5%

    \[\leadsto \frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]

Alternative 4: 95.7% accurate, 1.1× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;x \leq -1.6 \cdot 10^{-231}:\\ \;\;\;\;\frac{t_0 \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + t_0}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= x -1.6e-231)
     (/ (* t_0 (+ 2.0 (* x (+ eps (* x (* eps (* eps 0.5))))))) 2.0)
     (/ (+ (exp (- (* x eps) x)) t_0) 2.0))))
eps = abs(eps);
double code(double x, double eps) {
	double t_0 = exp(-x);
	double tmp;
	if (x <= -1.6e-231) {
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	} else {
		tmp = (exp(((x * eps) - x)) + t_0) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (x <= (-1.6d-231)) then
        tmp = (t_0 * (2.0d0 + (x * (eps + (x * (eps * (eps * 0.5d0))))))) / 2.0d0
    else
        tmp = (exp(((x * eps) - x)) + t_0) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (x <= -1.6e-231) {
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	} else {
		tmp = (Math.exp(((x * eps) - x)) + t_0) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	t_0 = math.exp(-x)
	tmp = 0
	if x <= -1.6e-231:
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0
	else:
		tmp = (math.exp(((x * eps) - x)) + t_0) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (x <= -1.6e-231)
		tmp = Float64(Float64(t_0 * Float64(2.0 + Float64(x * Float64(eps + Float64(x * Float64(eps * Float64(eps * 0.5))))))) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(Float64(x * eps) - x)) + t_0) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	t_0 = exp(-x);
	tmp = 0.0;
	if (x <= -1.6e-231)
		tmp = (t_0 * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	else
		tmp = (exp(((x * eps) - x)) + t_0) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[x, -1.6e-231], N[(N[(t$95$0 * N[(2.0 + N[(x * N[(eps + N[(x * N[(eps * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(N[(x * eps), $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision] + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;x \leq -1.6 \cdot 10^{-231}:\\
\;\;\;\;\frac{t_0 \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \varepsilon - x} + t_0}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.60000000000000004e-231

    1. Initial program 73.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub73.5%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity73.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub73.5%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified73.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 96.2%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg96.2%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative96.2%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative96.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified96.2%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 84.5%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 84.5%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
    9. Taylor expanded in eps around 0 66.2%

      \[\leadsto \frac{\color{blue}{\varepsilon \cdot \left(e^{-x} \cdot x\right) + \left(0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right) + 2 \cdot e^{-x}\right)}}{2} \]
    10. Step-by-step derivation
      1. associate-+r+66.2%

        \[\leadsto \frac{\color{blue}{\left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right) + 2 \cdot e^{-x}}}{2} \]
      2. +-commutative66.2%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{-x} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}}{2} \]
      3. *-commutative66.2%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot 2} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      4. *-commutative66.2%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\varepsilon \cdot \color{blue}{\left(x \cdot e^{-x}\right)} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      5. associate-*r*66.2%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\color{blue}{\left(\varepsilon \cdot x\right) \cdot e^{-x}} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      6. associate-*r*66.2%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \left(e^{-x} \cdot {x}^{2}\right)}\right)}{2} \]
      7. *-commutative66.2%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left({x}^{2} \cdot e^{-x}\right)}\right)}{2} \]
      8. associate-*r*66.2%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right) \cdot e^{-x}}\right)}{2} \]
      9. distribute-rgt-out74.3%

        \[\leadsto \frac{e^{-x} \cdot 2 + \color{blue}{e^{-x} \cdot \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)}}{2} \]
      10. distribute-lft-out74.3%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)\right)}}{2} \]
      11. unpow274.3%

        \[\leadsto \frac{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(x \cdot x\right)}\right)\right)}{2} \]
    11. Simplified89.5%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right) \cdot x\right)\right)}}{2} \]

    if -1.60000000000000004e-231 < x

    1. Initial program 80.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub80.3%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity80.3%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub80.3%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified80.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 79.4%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 79.4%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification83.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.6 \cdot 10^{-231}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon - x} + e^{-x}}{2}\\ \end{array} \]

Alternative 5: 77.2% accurate, 1.8× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} t_0 := \frac{\frac{2}{e^{x}}}{2}\\ \mathbf{if}\;\varepsilon \leq 3700000000000:\\ \;\;\;\;t_0\\ \mathbf{elif}\;\varepsilon \leq 1.1 \cdot 10^{+83}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 9.2 \cdot 10^{+174}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (/ (/ 2.0 (exp x)) 2.0)))
   (if (<= eps 3700000000000.0)
     t_0
     (if (<= eps 1.1e+83)
       (/ (+ 2.0 (* x (+ x -2.0))) 2.0)
       (if (<= eps 9.2e+174)
         t_0
         (/
          (* (exp (- x)) (+ 2.0 (* x (+ eps (* x (* eps (* eps 0.5)))))))
          2.0))))))
eps = abs(eps);
double code(double x, double eps) {
	double t_0 = (2.0 / exp(x)) / 2.0;
	double tmp;
	if (eps <= 3700000000000.0) {
		tmp = t_0;
	} else if (eps <= 1.1e+83) {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	} else if (eps <= 9.2e+174) {
		tmp = t_0;
	} else {
		tmp = (exp(-x) * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (2.0d0 / exp(x)) / 2.0d0
    if (eps <= 3700000000000.0d0) then
        tmp = t_0
    else if (eps <= 1.1d+83) then
        tmp = (2.0d0 + (x * (x + (-2.0d0)))) / 2.0d0
    else if (eps <= 9.2d+174) then
        tmp = t_0
    else
        tmp = (exp(-x) * (2.0d0 + (x * (eps + (x * (eps * (eps * 0.5d0))))))) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double t_0 = (2.0 / Math.exp(x)) / 2.0;
	double tmp;
	if (eps <= 3700000000000.0) {
		tmp = t_0;
	} else if (eps <= 1.1e+83) {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	} else if (eps <= 9.2e+174) {
		tmp = t_0;
	} else {
		tmp = (Math.exp(-x) * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	t_0 = (2.0 / math.exp(x)) / 2.0
	tmp = 0
	if eps <= 3700000000000.0:
		tmp = t_0
	elif eps <= 1.1e+83:
		tmp = (2.0 + (x * (x + -2.0))) / 2.0
	elif eps <= 9.2e+174:
		tmp = t_0
	else:
		tmp = (math.exp(-x) * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	t_0 = Float64(Float64(2.0 / exp(x)) / 2.0)
	tmp = 0.0
	if (eps <= 3700000000000.0)
		tmp = t_0;
	elseif (eps <= 1.1e+83)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(x + -2.0))) / 2.0);
	elseif (eps <= 9.2e+174)
		tmp = t_0;
	else
		tmp = Float64(Float64(exp(Float64(-x)) * Float64(2.0 + Float64(x * Float64(eps + Float64(x * Float64(eps * Float64(eps * 0.5))))))) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	t_0 = (2.0 / exp(x)) / 2.0;
	tmp = 0.0;
	if (eps <= 3700000000000.0)
		tmp = t_0;
	elseif (eps <= 1.1e+83)
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	elseif (eps <= 9.2e+174)
		tmp = t_0;
	else
		tmp = (exp(-x) * (2.0 + (x * (eps + (x * (eps * (eps * 0.5))))))) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := Block[{t$95$0 = N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[eps, 3700000000000.0], t$95$0, If[LessEqual[eps, 1.1e+83], N[(N[(2.0 + N[(x * N[(x + -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps, 9.2e+174], t$95$0, N[(N[(N[Exp[(-x)], $MachinePrecision] * N[(2.0 + N[(x * N[(eps + N[(x * N[(eps * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
t_0 := \frac{\frac{2}{e^{x}}}{2}\\
\mathbf{if}\;\varepsilon \leq 3700000000000:\\
\;\;\;\;t_0\\

\mathbf{elif}\;\varepsilon \leq 1.1 \cdot 10^{+83}:\\
\;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\

\mathbf{elif}\;\varepsilon \leq 9.2 \cdot 10^{+174}:\\
\;\;\;\;t_0\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if eps < 3.7e12 or 1.09999999999999999e83 < eps < 9.1999999999999991e174

    1. Initial program 72.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub72.5%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity72.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub72.5%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified72.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 98.2%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg98.2%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative98.2%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative98.2%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified98.2%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 81.2%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 72.9%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg72.9%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/72.9%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval72.9%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified72.9%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 3.7e12 < eps < 1.09999999999999999e83

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 67.3%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg67.3%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/67.3%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval67.3%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified67.3%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]
    11. Taylor expanded in x around 0 82.0%

      \[\leadsto \frac{\color{blue}{2 + \left(-2 \cdot x + {x}^{2}\right)}}{2} \]
    12. Step-by-step derivation
      1. unpow282.0%

        \[\leadsto \frac{2 + \left(-2 \cdot x + \color{blue}{x \cdot x}\right)}{2} \]
      2. distribute-rgt-out82.0%

        \[\leadsto \frac{2 + \color{blue}{x \cdot \left(-2 + x\right)}}{2} \]
      3. +-commutative82.0%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(x + -2\right)}}{2} \]
    13. Simplified82.0%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(x + -2\right)}}{2} \]

    if 9.1999999999999991e174 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 68.6%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in x around inf 68.6%

      \[\leadsto \frac{\color{blue}{e^{\varepsilon \cdot x - x} + e^{-x}}}{2} \]
    9. Taylor expanded in eps around 0 29.6%

      \[\leadsto \frac{\color{blue}{\varepsilon \cdot \left(e^{-x} \cdot x\right) + \left(0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right) + 2 \cdot e^{-x}\right)}}{2} \]
    10. Step-by-step derivation
      1. associate-+r+29.6%

        \[\leadsto \frac{\color{blue}{\left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right) + 2 \cdot e^{-x}}}{2} \]
      2. +-commutative29.6%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{-x} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}}{2} \]
      3. *-commutative29.6%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot 2} + \left(\varepsilon \cdot \left(e^{-x} \cdot x\right) + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      4. *-commutative29.6%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\varepsilon \cdot \color{blue}{\left(x \cdot e^{-x}\right)} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      5. associate-*r*29.6%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\color{blue}{\left(\varepsilon \cdot x\right) \cdot e^{-x}} + 0.5 \cdot \left({\varepsilon}^{2} \cdot \left(e^{-x} \cdot {x}^{2}\right)\right)\right)}{2} \]
      6. associate-*r*29.6%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \left(e^{-x} \cdot {x}^{2}\right)}\right)}{2} \]
      7. *-commutative29.6%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left({x}^{2} \cdot e^{-x}\right)}\right)}{2} \]
      8. associate-*r*29.6%

        \[\leadsto \frac{e^{-x} \cdot 2 + \left(\left(\varepsilon \cdot x\right) \cdot e^{-x} + \color{blue}{\left(\left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right) \cdot e^{-x}}\right)}{2} \]
      9. distribute-rgt-out33.3%

        \[\leadsto \frac{e^{-x} \cdot 2 + \color{blue}{e^{-x} \cdot \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)}}{2} \]
      10. distribute-lft-out33.3%

        \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot {x}^{2}\right)\right)}}{2} \]
      11. unpow233.3%

        \[\leadsto \frac{e^{-x} \cdot \left(2 + \left(\varepsilon \cdot x + \left(0.5 \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(x \cdot x\right)}\right)\right)}{2} \]
    11. Simplified74.5%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right) \cdot x\right)\right)}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification73.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 3700000000000:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 1.1 \cdot 10^{+83}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 9.2 \cdot 10^{+174}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(2 + x \cdot \left(\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)\right)}{2}\\ \end{array} \]

Alternative 6: 70.7% accurate, 2.1× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;\varepsilon \leq 8200000000000 \lor \neg \left(\varepsilon \leq 1.85 \cdot 10^{+82}\right):\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (if (or (<= eps 8200000000000.0) (not (<= eps 1.85e+82)))
   (/ (/ 2.0 (exp x)) 2.0)
   (/ (+ 2.0 (* x (+ x -2.0))) 2.0)))
eps = abs(eps);
double code(double x, double eps) {
	double tmp;
	if ((eps <= 8200000000000.0) || !(eps <= 1.85e+82)) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if ((eps <= 8200000000000.0d0) .or. (.not. (eps <= 1.85d+82))) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else
        tmp = (2.0d0 + (x * (x + (-2.0d0)))) / 2.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double tmp;
	if ((eps <= 8200000000000.0) || !(eps <= 1.85e+82)) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	tmp = 0
	if (eps <= 8200000000000.0) or not (eps <= 1.85e+82):
		tmp = (2.0 / math.exp(x)) / 2.0
	else:
		tmp = (2.0 + (x * (x + -2.0))) / 2.0
	return tmp
eps = abs(eps)
function code(x, eps)
	tmp = 0.0
	if ((eps <= 8200000000000.0) || !(eps <= 1.85e+82))
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	else
		tmp = Float64(Float64(2.0 + Float64(x * Float64(x + -2.0))) / 2.0);
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if ((eps <= 8200000000000.0) || ~((eps <= 1.85e+82)))
		tmp = (2.0 / exp(x)) / 2.0;
	else
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := If[Or[LessEqual[eps, 8200000000000.0], N[Not[LessEqual[eps, 1.85e+82]], $MachinePrecision]], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 + N[(x * N[(x + -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 8200000000000 \lor \neg \left(\varepsilon \leq 1.85 \cdot 10^{+82}\right):\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 8.2e12 or 1.8500000000000001e82 < eps

    1. Initial program 75.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub75.6%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity75.6%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub75.6%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified75.6%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 98.4%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg98.4%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative98.4%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative98.4%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified98.4%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 79.7%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 69.8%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg69.8%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/69.8%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval69.8%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified69.8%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 8.2e12 < eps < 1.8500000000000001e82

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub100.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative100.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 67.3%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg67.3%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/67.3%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval67.3%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified67.3%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]
    11. Taylor expanded in x around 0 82.0%

      \[\leadsto \frac{\color{blue}{2 + \left(-2 \cdot x + {x}^{2}\right)}}{2} \]
    12. Step-by-step derivation
      1. unpow282.0%

        \[\leadsto \frac{2 + \left(-2 \cdot x + \color{blue}{x \cdot x}\right)}{2} \]
      2. distribute-rgt-out82.0%

        \[\leadsto \frac{2 + \color{blue}{x \cdot \left(-2 + x\right)}}{2} \]
      3. +-commutative82.0%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(x + -2\right)}}{2} \]
    13. Simplified82.0%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(x + -2\right)}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification70.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 8200000000000 \lor \neg \left(\varepsilon \leq 1.85 \cdot 10^{+82}\right):\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \end{array} \]

Alternative 7: 63.5% accurate, 14.9× speedup?

\[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 1150 \lor \neg \left(x \leq 1.75 \cdot 10^{+260}\right) \land x \leq 8 \cdot 10^{+301}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
NOTE: eps should be positive before calling this function
(FPCore (x eps)
 :precision binary64
 (if (or (<= x 1150.0) (and (not (<= x 1.75e+260)) (<= x 8e+301)))
   (/ (+ 2.0 (* x (+ x -2.0))) 2.0)
   0.0))
eps = abs(eps);
double code(double x, double eps) {
	double tmp;
	if ((x <= 1150.0) || (!(x <= 1.75e+260) && (x <= 8e+301))) {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
NOTE: eps should be positive before calling this function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if ((x <= 1150.0d0) .or. (.not. (x <= 1.75d+260)) .and. (x <= 8d+301)) then
        tmp = (2.0d0 + (x * (x + (-2.0d0)))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps = Math.abs(eps);
public static double code(double x, double eps) {
	double tmp;
	if ((x <= 1150.0) || (!(x <= 1.75e+260) && (x <= 8e+301))) {
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps = abs(eps)
def code(x, eps):
	tmp = 0
	if (x <= 1150.0) or (not (x <= 1.75e+260) and (x <= 8e+301)):
		tmp = (2.0 + (x * (x + -2.0))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps = abs(eps)
function code(x, eps)
	tmp = 0.0
	if ((x <= 1150.0) || (!(x <= 1.75e+260) && (x <= 8e+301)))
		tmp = Float64(Float64(2.0 + Float64(x * Float64(x + -2.0))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps = abs(eps)
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if ((x <= 1150.0) || (~((x <= 1.75e+260)) && (x <= 8e+301)))
		tmp = (2.0 + (x * (x + -2.0))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
NOTE: eps should be positive before calling this function
code[x_, eps_] := If[Or[LessEqual[x, 1150.0], And[N[Not[LessEqual[x, 1.75e+260]], $MachinePrecision], LessEqual[x, 8e+301]]], N[(N[(2.0 + N[(x * N[(x + -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\begin{array}{l}
eps = |eps|\\
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1150 \lor \neg \left(x \leq 1.75 \cdot 10^{+260}\right) \land x \leq 8 \cdot 10^{+301}:\\
\;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1150 or 1.7499999999999999e260 < x < 8.00000000000000042e301

    1. Initial program 70.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. div-sub70.0%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      2. +-rgt-identity70.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      3. div-sub70.0%

        \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Simplified70.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Taylor expanded in eps around inf 98.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. mul-1-neg98.0%

        \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      2. *-commutative98.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
      3. mul-1-neg98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
      4. exp-prod98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
      5. +-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      6. *-lft-identity98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
      7. metadata-eval98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
      8. cancel-sign-sub-inv98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
      9. exp-prod98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
      10. mul-1-neg98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
      11. *-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
      12. cancel-sign-sub-inv98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
      13. metadata-eval98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
      14. *-lft-identity98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
      15. +-commutative98.0%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
    6. Simplified98.0%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
    7. Taylor expanded in eps around 0 83.3%

      \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
    8. Taylor expanded in eps around 0 73.1%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    9. Step-by-step derivation
      1. exp-neg73.1%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      2. associate-*r/73.1%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      3. metadata-eval73.1%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified73.1%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]
    11. Taylor expanded in x around 0 68.8%

      \[\leadsto \frac{\color{blue}{2 + \left(-2 \cdot x + {x}^{2}\right)}}{2} \]
    12. Step-by-step derivation
      1. unpow268.8%

        \[\leadsto \frac{2 + \left(-2 \cdot x + \color{blue}{x \cdot x}\right)}{2} \]
      2. distribute-rgt-out68.8%

        \[\leadsto \frac{2 + \color{blue}{x \cdot \left(-2 + x\right)}}{2} \]
      3. +-commutative68.8%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(x + -2\right)}}{2} \]
    13. Simplified68.8%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(x + -2\right)}}{2} \]

    if 1150 < x < 1.7499999999999999e260 or 8.00000000000000042e301 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. Simplified100.0%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
      2. Taylor expanded in eps around 0 59.1%

        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
      3. Step-by-step derivation
        1. div-sub59.1%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        2. rec-exp59.1%

          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
        3. mul-1-neg59.1%

          \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
        4. +-inverses59.1%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      4. Simplified59.1%

        \[\leadsto \frac{\color{blue}{0}}{2} \]
    3. Recombined 2 regimes into one program.
    4. Final simplification66.3%

      \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1150 \lor \neg \left(x \leq 1.75 \cdot 10^{+260}\right) \land x \leq 8 \cdot 10^{+301}:\\ \;\;\;\;\frac{2 + x \cdot \left(x + -2\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

    Alternative 8: 56.9% accurate, 25.0× speedup?

    \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 1:\\ \;\;\;\;\frac{2 + x \cdot -2}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
    NOTE: eps should be positive before calling this function
    (FPCore (x eps)
     :precision binary64
     (if (<= x 1.0) (/ (+ 2.0 (* x -2.0)) 2.0) 0.0))
    eps = abs(eps);
    double code(double x, double eps) {
    	double tmp;
    	if (x <= 1.0) {
    		tmp = (2.0 + (x * -2.0)) / 2.0;
    	} else {
    		tmp = 0.0;
    	}
    	return tmp;
    }
    
    NOTE: eps should be positive before calling this function
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        real(8) :: tmp
        if (x <= 1.0d0) then
            tmp = (2.0d0 + (x * (-2.0d0))) / 2.0d0
        else
            tmp = 0.0d0
        end if
        code = tmp
    end function
    
    eps = Math.abs(eps);
    public static double code(double x, double eps) {
    	double tmp;
    	if (x <= 1.0) {
    		tmp = (2.0 + (x * -2.0)) / 2.0;
    	} else {
    		tmp = 0.0;
    	}
    	return tmp;
    }
    
    eps = abs(eps)
    def code(x, eps):
    	tmp = 0
    	if x <= 1.0:
    		tmp = (2.0 + (x * -2.0)) / 2.0
    	else:
    		tmp = 0.0
    	return tmp
    
    eps = abs(eps)
    function code(x, eps)
    	tmp = 0.0
    	if (x <= 1.0)
    		tmp = Float64(Float64(2.0 + Float64(x * -2.0)) / 2.0);
    	else
    		tmp = 0.0;
    	end
    	return tmp
    end
    
    eps = abs(eps)
    function tmp_2 = code(x, eps)
    	tmp = 0.0;
    	if (x <= 1.0)
    		tmp = (2.0 + (x * -2.0)) / 2.0;
    	else
    		tmp = 0.0;
    	end
    	tmp_2 = tmp;
    end
    
    NOTE: eps should be positive before calling this function
    code[x_, eps_] := If[LessEqual[x, 1.0], N[(N[(2.0 + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
    
    \begin{array}{l}
    eps = |eps|\\
    \\
    \begin{array}{l}
    \mathbf{if}\;x \leq 1:\\
    \;\;\;\;\frac{2 + x \cdot -2}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;0\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if x < 1

      1. Initial program 68.2%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. div-sub68.2%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        2. +-rgt-identity68.2%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        3. div-sub68.2%

          \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
      3. Simplified68.2%

        \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      4. Taylor expanded in eps around inf 97.9%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
      5. Step-by-step derivation
        1. mul-1-neg97.9%

          \[\leadsto \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        2. *-commutative97.9%

          \[\leadsto \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]
        3. mul-1-neg97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]
        4. exp-prod97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]
        5. +-commutative97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        6. *-lft-identity97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{1 \cdot \varepsilon}\right) \cdot x\right)}\right)}{2} \]
        7. metadata-eval97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{\left(--1\right)} \cdot \varepsilon\right) \cdot x\right)}\right)}{2} \]
        8. cancel-sign-sub-inv97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 - -1 \cdot \varepsilon\right)} \cdot x\right)}\right)}{2} \]
        9. exp-prod97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-\color{blue}{e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}\right)}{2} \]
        10. mul-1-neg97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(1 - -1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]
        11. *-commutative97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
        12. cancel-sign-sub-inv97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(1 + \left(--1\right) \cdot \varepsilon\right)}}\right)}{2} \]
        13. metadata-eval97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{1} \cdot \varepsilon\right)}\right)}{2} \]
        14. *-lft-identity97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(1 + \color{blue}{\varepsilon}\right)}\right)}{2} \]
        15. +-commutative97.9%

          \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]
      6. Simplified97.9%

        \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
      7. Taylor expanded in eps around 0 84.5%

        \[\leadsto \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x}}\right)}{2} \]
      8. Taylor expanded in eps around 0 76.4%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
      9. Step-by-step derivation
        1. exp-neg76.4%

          \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
        2. associate-*r/76.4%

          \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
        3. metadata-eval76.4%

          \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
      10. Simplified76.4%

        \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]
      11. Taylor expanded in x around 0 56.0%

        \[\leadsto \frac{\color{blue}{2 + -2 \cdot x}}{2} \]
      12. Step-by-step derivation
        1. *-commutative56.0%

          \[\leadsto \frac{2 + \color{blue}{x \cdot -2}}{2} \]
      13. Simplified56.0%

        \[\leadsto \frac{\color{blue}{2 + x \cdot -2}}{2} \]

      if 1 < x

      1. Initial program 100.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Step-by-step derivation
        1. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        2. Taylor expanded in eps around 0 53.4%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        3. Step-by-step derivation
          1. div-sub53.4%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          2. rec-exp53.4%

            \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
          3. mul-1-neg53.4%

            \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
          4. +-inverses53.4%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        4. Simplified53.4%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 2 regimes into one program.
      4. Final simplification55.2%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1:\\ \;\;\;\;\frac{2 + x \cdot -2}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

      Alternative 9: 56.8% accurate, 74.1× speedup?

      \[\begin{array}{l} eps = |eps|\\ \\ \begin{array}{l} \mathbf{if}\;x \leq 1150:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      NOTE: eps should be positive before calling this function
      (FPCore (x eps) :precision binary64 (if (<= x 1150.0) 1.0 0.0))
      eps = abs(eps);
      double code(double x, double eps) {
      	double tmp;
      	if (x <= 1150.0) {
      		tmp = 1.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      NOTE: eps should be positive before calling this function
      real(8) function code(x, eps)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps
          real(8) :: tmp
          if (x <= 1150.0d0) then
              tmp = 1.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps = Math.abs(eps);
      public static double code(double x, double eps) {
      	double tmp;
      	if (x <= 1150.0) {
      		tmp = 1.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps = abs(eps)
      def code(x, eps):
      	tmp = 0
      	if x <= 1150.0:
      		tmp = 1.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps = abs(eps)
      function code(x, eps)
      	tmp = 0.0
      	if (x <= 1150.0)
      		tmp = 1.0;
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps = abs(eps)
      function tmp_2 = code(x, eps)
      	tmp = 0.0;
      	if (x <= 1150.0)
      		tmp = 1.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      NOTE: eps should be positive before calling this function
      code[x_, eps_] := If[LessEqual[x, 1150.0], 1.0, 0.0]
      
      \begin{array}{l}
      eps = |eps|\\
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 1150:\\
      \;\;\;\;1\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 1150

        1. Initial program 68.4%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. div-sub68.4%

            \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
          2. +-rgt-identity68.4%

            \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
          3. div-sub68.4%

            \[\leadsto \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
        3. Simplified68.4%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        4. Taylor expanded in x around 0 55.2%

          \[\leadsto \frac{\color{blue}{2}}{2} \]

        if 1150 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. Simplified100.0%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          2. Taylor expanded in eps around 0 54.1%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          3. Step-by-step derivation
            1. div-sub54.1%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
            2. rec-exp54.1%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. mul-1-neg54.1%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. +-inverses54.1%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          4. Simplified54.1%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        3. Recombined 2 regimes into one program.
        4. Final simplification54.8%

          \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1150:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

        Alternative 10: 16.5% accurate, 227.0× speedup?

        \[\begin{array}{l} eps = |eps|\\ \\ 0 \end{array} \]
        NOTE: eps should be positive before calling this function
        (FPCore (x eps) :precision binary64 0.0)
        eps = abs(eps);
        double code(double x, double eps) {
        	return 0.0;
        }
        
        NOTE: eps should be positive before calling this function
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = 0.0d0
        end function
        
        eps = Math.abs(eps);
        public static double code(double x, double eps) {
        	return 0.0;
        }
        
        eps = abs(eps)
        def code(x, eps):
        	return 0.0
        
        eps = abs(eps)
        function code(x, eps)
        	return 0.0
        end
        
        eps = abs(eps)
        function tmp = code(x, eps)
        	tmp = 0.0;
        end
        
        NOTE: eps should be positive before calling this function
        code[x_, eps_] := 0.0
        
        \begin{array}{l}
        eps = |eps|\\
        \\
        0
        \end{array}
        
        Derivation
        1. Initial program 77.6%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Step-by-step derivation
          1. Simplified64.1%

            \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
          2. Taylor expanded in eps around 0 17.3%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          3. Step-by-step derivation
            1. div-sub17.3%

              \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
            2. rec-exp17.3%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]
            3. mul-1-neg17.3%

              \[\leadsto \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]
            4. +-inverses17.5%

              \[\leadsto \frac{\color{blue}{0}}{2} \]
          4. Simplified17.5%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
          5. Final simplification17.5%

            \[\leadsto 0 \]

          Reproduce

          ?
          herbie shell --seed 2023250 
          (FPCore (x eps)
            :name "NMSE Section 6.1 mentioned, A"
            :precision binary64
            (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))