NMSE Section 6.1 mentioned, A

Percentage Accurate: 72.8% → 99.8%
Time: 19.9s
Alternatives: 17
Speedup: 1.7×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 17 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;eps\_m \leq 2 \cdot 10^{-40}:\\ \;\;\;\;\frac{t\_0 + t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + eps\_m \cdot x}}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (* (+ x 1.0) (exp (- x)))))
   (if (<= eps_m 2e-40)
     (/ (+ t_0 t_0) 2.0)
     (/ (+ (exp (* x (+ eps_m -1.0))) (/ 1.0 (exp (+ x (* eps_m x))))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * exp(-x);
	double tmp;
	if (eps_m <= 2e-40) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (eps_m * x))))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (x + 1.0d0) * exp(-x)
    if (eps_m <= 2d-40) then
        tmp = (t_0 + t_0) / 2.0d0
    else
        tmp = (exp((x * (eps_m + (-1.0d0)))) + (1.0d0 / exp((x + (eps_m * x))))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * Math.exp(-x);
	double tmp;
	if (eps_m <= 2e-40) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (Math.exp((x * (eps_m + -1.0))) + (1.0 / Math.exp((x + (eps_m * x))))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (x + 1.0) * math.exp(-x)
	tmp = 0
	if eps_m <= 2e-40:
		tmp = (t_0 + t_0) / 2.0
	else:
		tmp = (math.exp((x * (eps_m + -1.0))) + (1.0 / math.exp((x + (eps_m * x))))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(x + 1.0) * exp(Float64(-x)))
	tmp = 0.0
	if (eps_m <= 2e-40)
		tmp = Float64(Float64(t_0 + t_0) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + Float64(1.0 / exp(Float64(x + Float64(eps_m * x))))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (x + 1.0) * exp(-x);
	tmp = 0.0;
	if (eps_m <= 2e-40)
		tmp = (t_0 + t_0) / 2.0;
	else
		tmp = (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (eps_m * x))))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[eps$95$m, 2e-40], N[(N[(t$95$0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[Exp[N[(x + N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \left(x + 1\right) \cdot e^{-x}\\
\mathbf{if}\;eps\_m \leq 2 \cdot 10^{-40}:\\
\;\;\;\;\frac{t\_0 + t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + eps\_m \cdot x}}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1.9999999999999999e-40

    1. Initial program 60.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified60.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 73.3%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Step-by-step derivation
      1. distribute-rgt1-in73.3%

        \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{2} \]
      2. mul-1-neg73.3%

        \[\leadsto \frac{\left(x + 1\right) \cdot e^{\color{blue}{-x}} - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{2} \]
      3. distribute-lft-out73.3%

        \[\leadsto \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{-1 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)}}{2} \]
      4. distribute-rgt1-in73.9%

        \[\leadsto \frac{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}}{2} \]
      5. mul-1-neg73.9%

        \[\leadsto \frac{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)}{2} \]
    6. Simplified73.9%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]

    if 1.9999999999999999e-40 < eps

    1. Initial program 98.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified90.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification82.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 2 \cdot 10^{-40}:\\ \;\;\;\;\frac{\left(x + 1\right) \cdot e^{-x} + \left(x + 1\right) \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.8% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;eps\_m \leq 2 \cdot 10^{-38}:\\ \;\;\;\;\frac{t\_0 \cdot \left(x + 2\right) + x \cdot t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + eps\_m \cdot x}}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= eps_m 2e-38)
     (/ (+ (* t_0 (+ x 2.0)) (* x t_0)) 2.0)
     (/ (+ (exp (* x (+ eps_m -1.0))) (/ 1.0 (exp (+ x (* eps_m x))))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = exp(-x);
	double tmp;
	if (eps_m <= 2e-38) {
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (eps_m * x))))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (eps_m <= 2d-38) then
        tmp = ((t_0 * (x + 2.0d0)) + (x * t_0)) / 2.0d0
    else
        tmp = (exp((x * (eps_m + (-1.0d0)))) + (1.0d0 / exp((x + (eps_m * x))))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (eps_m <= 2e-38) {
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (Math.exp((x * (eps_m + -1.0))) + (1.0 / Math.exp((x + (eps_m * x))))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = math.exp(-x)
	tmp = 0
	if eps_m <= 2e-38:
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0
	else:
		tmp = (math.exp((x * (eps_m + -1.0))) + (1.0 / math.exp((x + (eps_m * x))))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (eps_m <= 2e-38)
		tmp = Float64(Float64(Float64(t_0 * Float64(x + 2.0)) + Float64(x * t_0)) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + Float64(1.0 / exp(Float64(x + Float64(eps_m * x))))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = exp(-x);
	tmp = 0.0;
	if (eps_m <= 2e-38)
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	else
		tmp = (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (eps_m * x))))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps$95$m, 2e-38], N[(N[(N[(t$95$0 * N[(x + 2.0), $MachinePrecision]), $MachinePrecision] + N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[Exp[N[(x + N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;eps\_m \leq 2 \cdot 10^{-38}:\\
\;\;\;\;\frac{t\_0 \cdot \left(x + 2\right) + x \cdot t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + eps\_m \cdot x}}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1.9999999999999999e-38

    1. Initial program 60.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified60.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 73.3%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Step-by-step derivation
      1. associate--r+73.3%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
      2. associate-*r*73.3%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
      3. mul-1-neg73.3%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
      4. cancel-sign-sub73.3%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
      5. distribute-rgt1-in73.3%

        \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
      6. distribute-rgt-out--73.9%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
      7. mul-1-neg73.9%

        \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
      8. mul-1-neg73.9%

        \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
    6. Simplified73.9%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
    7. Taylor expanded in x around 0 73.9%

      \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(2 + x\right)} + x \cdot e^{-x}}{2} \]
    8. Step-by-step derivation
      1. +-commutative73.9%

        \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(x + 2\right)} + x \cdot e^{-x}}{2} \]
    9. Simplified73.9%

      \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(x + 2\right)} + x \cdot e^{-x}}{2} \]

    if 1.9999999999999999e-38 < eps

    1. Initial program 98.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified90.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification82.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 2 \cdot 10^{-38}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(x + 2\right) + x \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;eps\_m \leq 10^{-37}:\\ \;\;\;\;\frac{t\_0 \cdot \left(x + 2\right) + x \cdot t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{eps\_m \cdot \left(-x\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= eps_m 1e-37)
     (/ (+ (* t_0 (+ x 2.0)) (* x t_0)) 2.0)
     (/ (+ (exp (* x (+ eps_m -1.0))) (exp (* eps_m (- x)))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = exp(-x);
	double tmp;
	if (eps_m <= 1e-37) {
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (exp((x * (eps_m + -1.0))) + exp((eps_m * -x))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (eps_m <= 1d-37) then
        tmp = ((t_0 * (x + 2.0d0)) + (x * t_0)) / 2.0d0
    else
        tmp = (exp((x * (eps_m + (-1.0d0)))) + exp((eps_m * -x))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (eps_m <= 1e-37) {
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	} else {
		tmp = (Math.exp((x * (eps_m + -1.0))) + Math.exp((eps_m * -x))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = math.exp(-x)
	tmp = 0
	if eps_m <= 1e-37:
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0
	else:
		tmp = (math.exp((x * (eps_m + -1.0))) + math.exp((eps_m * -x))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (eps_m <= 1e-37)
		tmp = Float64(Float64(Float64(t_0 * Float64(x + 2.0)) + Float64(x * t_0)) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + exp(Float64(eps_m * Float64(-x)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = exp(-x);
	tmp = 0.0;
	if (eps_m <= 1e-37)
		tmp = ((t_0 * (x + 2.0)) + (x * t_0)) / 2.0;
	else
		tmp = (exp((x * (eps_m + -1.0))) + exp((eps_m * -x))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[eps$95$m, 1e-37], N[(N[(N[(t$95$0 * N[(x + 2.0), $MachinePrecision]), $MachinePrecision] + N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;eps\_m \leq 10^{-37}:\\
\;\;\;\;\frac{t\_0 \cdot \left(x + 2\right) + x \cdot t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{eps\_m \cdot \left(-x\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1.00000000000000007e-37

    1. Initial program 60.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified60.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 73.3%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Step-by-step derivation
      1. associate--r+73.3%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
      2. associate-*r*73.3%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
      3. mul-1-neg73.3%

        \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
      4. cancel-sign-sub73.3%

        \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
      5. distribute-rgt1-in73.3%

        \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
      6. distribute-rgt-out--73.9%

        \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
      7. mul-1-neg73.9%

        \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
      8. mul-1-neg73.9%

        \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
    6. Simplified73.9%

      \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
    7. Taylor expanded in x around 0 73.9%

      \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(2 + x\right)} + x \cdot e^{-x}}{2} \]
    8. Step-by-step derivation
      1. +-commutative73.9%

        \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(x + 2\right)} + x \cdot e^{-x}}{2} \]
    9. Simplified73.9%

      \[\leadsto \frac{e^{-x} \cdot \color{blue}{\left(x + 2\right)} + x \cdot e^{-x}}{2} \]

    if 1.00000000000000007e-37 < eps

    1. Initial program 98.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified90.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
    8. Taylor expanded in x around inf 100.0%

      \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\frac{1}{e^{\varepsilon \cdot x}}}}{2} \]
    9. Step-by-step derivation
      1. rec-exp100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{e^{-\varepsilon \cdot x}}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + e^{-\color{blue}{x \cdot \varepsilon}}}{2} \]
      3. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}}{2} \]
    10. Simplified100.0%

      \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{e^{x \cdot \left(-\varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification82.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 10^{-37}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(x + 2\right) + x \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 10^{-37}:\\ \;\;\;\;\frac{\frac{eps\_m \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{eps\_m \cdot \left(-x\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 1e-37)
   (/ (/ (* eps_m (* (exp (- x)) (+ 2.0 (* x 2.0)))) eps_m) 2.0)
   (/ (+ (exp (* x (+ eps_m -1.0))) (exp (* eps_m (- x)))) 2.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1e-37) {
		tmp = ((eps_m * (exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
	} else {
		tmp = (exp((x * (eps_m + -1.0))) + exp((eps_m * -x))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 1d-37) then
        tmp = ((eps_m * (exp(-x) * (2.0d0 + (x * 2.0d0)))) / eps_m) / 2.0d0
    else
        tmp = (exp((x * (eps_m + (-1.0d0)))) + exp((eps_m * -x))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1e-37) {
		tmp = ((eps_m * (Math.exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
	} else {
		tmp = (Math.exp((x * (eps_m + -1.0))) + Math.exp((eps_m * -x))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 1e-37:
		tmp = ((eps_m * (math.exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0
	else:
		tmp = (math.exp((x * (eps_m + -1.0))) + math.exp((eps_m * -x))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 1e-37)
		tmp = Float64(Float64(Float64(eps_m * Float64(exp(Float64(-x)) * Float64(2.0 + Float64(x * 2.0)))) / eps_m) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + exp(Float64(eps_m * Float64(-x)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 1e-37)
		tmp = ((eps_m * (exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
	else
		tmp = (exp((x * (eps_m + -1.0))) + exp((eps_m * -x))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 1e-37], N[(N[(N[(eps$95$m * N[(N[Exp[(-x)], $MachinePrecision] * N[(2.0 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 10^{-37}:\\
\;\;\;\;\frac{\frac{eps\_m \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{eps\_m \cdot \left(-x\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1.00000000000000007e-37

    1. Initial program 60.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.4%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 34.0%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. Simplified73.9%

        \[\leadsto \frac{\color{blue}{\frac{\varepsilon \cdot \left(e^{-x} \cdot \left(2 + 2 \cdot x\right)\right) + 0}{\varepsilon}}}{2} \]

      if 1.00000000000000007e-37 < eps

      1. Initial program 98.8%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Simplified90.5%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
      3. Add Preprocessing
      4. Taylor expanded in eps around inf 100.0%

        \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
      5. Taylor expanded in eps around inf 100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
      6. Step-by-step derivation
        1. *-commutative100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
      7. Simplified100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
      8. Taylor expanded in x around inf 100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\frac{1}{e^{\varepsilon \cdot x}}}}{2} \]
      9. Step-by-step derivation
        1. rec-exp100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{e^{-\varepsilon \cdot x}}}{2} \]
        2. *-commutative100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + e^{-\color{blue}{x \cdot \varepsilon}}}{2} \]
        3. distribute-rgt-neg-in100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}}{2} \]
      10. Simplified100.0%

        \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{e^{x \cdot \left(-\varepsilon\right)}}}{2} \]
    6. Recombined 2 regimes into one program.
    7. Final simplification81.9%

      \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 10^{-37}:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \end{array} \]
    8. Add Preprocessing

    Alternative 5: 78.7% accurate, 1.6× speedup?

    \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 2.35 \cdot 10^{-5}:\\ \;\;\;\;\frac{\frac{eps\_m \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{eps\_m}}{2}\\ \mathbf{elif}\;eps\_m \leq 4 \cdot 10^{+145} \lor \neg \left(eps\_m \leq 6.8 \cdot 10^{+247}\right):\\ \;\;\;\;\frac{\left(1 + \left(\frac{1}{eps\_m} + x \cdot \left(\left(-1 + \frac{-1}{eps\_m}\right) \cdot \left(1 - eps\_m\right)\right)\right)\right) + e^{x \cdot \left(-1 - eps\_m\right)} \cdot \left(\frac{-1}{eps\_m} - -1\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \left(1 - eps\_m \cdot x\right)}{2}\\ \end{array} \end{array} \]
    eps_m = (fabs.f64 eps)
    (FPCore (x eps_m)
     :precision binary64
     (if (<= eps_m 2.35e-5)
       (/ (/ (* eps_m (* (exp (- x)) (+ 2.0 (* x 2.0)))) eps_m) 2.0)
       (if (or (<= eps_m 4e+145) (not (<= eps_m 6.8e+247)))
         (/
          (+
           (+
            1.0
            (+ (/ 1.0 eps_m) (* x (* (+ -1.0 (/ -1.0 eps_m)) (- 1.0 eps_m)))))
           (* (exp (* x (- -1.0 eps_m))) (- (/ -1.0 eps_m) -1.0)))
          2.0)
         (/ (+ (exp (* x (+ eps_m -1.0))) (- 1.0 (* eps_m x))) 2.0))))
    eps_m = fabs(eps);
    double code(double x, double eps_m) {
    	double tmp;
    	if (eps_m <= 2.35e-5) {
    		tmp = ((eps_m * (exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
    	} else if ((eps_m <= 4e+145) || !(eps_m <= 6.8e+247)) {
    		tmp = ((1.0 + ((1.0 / eps_m) + (x * ((-1.0 + (-1.0 / eps_m)) * (1.0 - eps_m))))) + (exp((x * (-1.0 - eps_m))) * ((-1.0 / eps_m) - -1.0))) / 2.0;
    	} else {
    		tmp = (exp((x * (eps_m + -1.0))) + (1.0 - (eps_m * x))) / 2.0;
    	}
    	return tmp;
    }
    
    eps_m = abs(eps)
    real(8) function code(x, eps_m)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps_m
        real(8) :: tmp
        if (eps_m <= 2.35d-5) then
            tmp = ((eps_m * (exp(-x) * (2.0d0 + (x * 2.0d0)))) / eps_m) / 2.0d0
        else if ((eps_m <= 4d+145) .or. (.not. (eps_m <= 6.8d+247))) then
            tmp = ((1.0d0 + ((1.0d0 / eps_m) + (x * (((-1.0d0) + ((-1.0d0) / eps_m)) * (1.0d0 - eps_m))))) + (exp((x * ((-1.0d0) - eps_m))) * (((-1.0d0) / eps_m) - (-1.0d0)))) / 2.0d0
        else
            tmp = (exp((x * (eps_m + (-1.0d0)))) + (1.0d0 - (eps_m * x))) / 2.0d0
        end if
        code = tmp
    end function
    
    eps_m = Math.abs(eps);
    public static double code(double x, double eps_m) {
    	double tmp;
    	if (eps_m <= 2.35e-5) {
    		tmp = ((eps_m * (Math.exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
    	} else if ((eps_m <= 4e+145) || !(eps_m <= 6.8e+247)) {
    		tmp = ((1.0 + ((1.0 / eps_m) + (x * ((-1.0 + (-1.0 / eps_m)) * (1.0 - eps_m))))) + (Math.exp((x * (-1.0 - eps_m))) * ((-1.0 / eps_m) - -1.0))) / 2.0;
    	} else {
    		tmp = (Math.exp((x * (eps_m + -1.0))) + (1.0 - (eps_m * x))) / 2.0;
    	}
    	return tmp;
    }
    
    eps_m = math.fabs(eps)
    def code(x, eps_m):
    	tmp = 0
    	if eps_m <= 2.35e-5:
    		tmp = ((eps_m * (math.exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0
    	elif (eps_m <= 4e+145) or not (eps_m <= 6.8e+247):
    		tmp = ((1.0 + ((1.0 / eps_m) + (x * ((-1.0 + (-1.0 / eps_m)) * (1.0 - eps_m))))) + (math.exp((x * (-1.0 - eps_m))) * ((-1.0 / eps_m) - -1.0))) / 2.0
    	else:
    		tmp = (math.exp((x * (eps_m + -1.0))) + (1.0 - (eps_m * x))) / 2.0
    	return tmp
    
    eps_m = abs(eps)
    function code(x, eps_m)
    	tmp = 0.0
    	if (eps_m <= 2.35e-5)
    		tmp = Float64(Float64(Float64(eps_m * Float64(exp(Float64(-x)) * Float64(2.0 + Float64(x * 2.0)))) / eps_m) / 2.0);
    	elseif ((eps_m <= 4e+145) || !(eps_m <= 6.8e+247))
    		tmp = Float64(Float64(Float64(1.0 + Float64(Float64(1.0 / eps_m) + Float64(x * Float64(Float64(-1.0 + Float64(-1.0 / eps_m)) * Float64(1.0 - eps_m))))) + Float64(exp(Float64(x * Float64(-1.0 - eps_m))) * Float64(Float64(-1.0 / eps_m) - -1.0))) / 2.0);
    	else
    		tmp = Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + Float64(1.0 - Float64(eps_m * x))) / 2.0);
    	end
    	return tmp
    end
    
    eps_m = abs(eps);
    function tmp_2 = code(x, eps_m)
    	tmp = 0.0;
    	if (eps_m <= 2.35e-5)
    		tmp = ((eps_m * (exp(-x) * (2.0 + (x * 2.0)))) / eps_m) / 2.0;
    	elseif ((eps_m <= 4e+145) || ~((eps_m <= 6.8e+247)))
    		tmp = ((1.0 + ((1.0 / eps_m) + (x * ((-1.0 + (-1.0 / eps_m)) * (1.0 - eps_m))))) + (exp((x * (-1.0 - eps_m))) * ((-1.0 / eps_m) - -1.0))) / 2.0;
    	else
    		tmp = (exp((x * (eps_m + -1.0))) + (1.0 - (eps_m * x))) / 2.0;
    	end
    	tmp_2 = tmp;
    end
    
    eps_m = N[Abs[eps], $MachinePrecision]
    code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 2.35e-5], N[(N[(N[(eps$95$m * N[(N[Exp[(-x)], $MachinePrecision] * N[(2.0 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[eps$95$m, 4e+145], N[Not[LessEqual[eps$95$m, 6.8e+247]], $MachinePrecision]], N[(N[(N[(1.0 + N[(N[(1.0 / eps$95$m), $MachinePrecision] + N[(x * N[(N[(-1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] * N[(1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(-1.0 / eps$95$m), $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
    
    \begin{array}{l}
    eps_m = \left|\varepsilon\right|
    
    \\
    \begin{array}{l}
    \mathbf{if}\;eps\_m \leq 2.35 \cdot 10^{-5}:\\
    \;\;\;\;\frac{\frac{eps\_m \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{eps\_m}}{2}\\
    
    \mathbf{elif}\;eps\_m \leq 4 \cdot 10^{+145} \lor \neg \left(eps\_m \leq 6.8 \cdot 10^{+247}\right):\\
    \;\;\;\;\frac{\left(1 + \left(\frac{1}{eps\_m} + x \cdot \left(\left(-1 + \frac{-1}{eps\_m}\right) \cdot \left(1 - eps\_m\right)\right)\right)\right) + e^{x \cdot \left(-1 - eps\_m\right)} \cdot \left(\frac{-1}{eps\_m} - -1\right)}{2}\\
    
    \mathbf{else}:\\
    \;\;\;\;\frac{e^{x \cdot \left(eps\_m + -1\right)} + \left(1 - eps\_m \cdot x\right)}{2}\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if eps < 2.34999999999999986e-5

      1. Initial program 60.8%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Simplified51.5%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
      3. Add Preprocessing
      4. Taylor expanded in eps around 0 34.9%

        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
      5. Step-by-step derivation
        1. Simplified74.7%

          \[\leadsto \frac{\color{blue}{\frac{\varepsilon \cdot \left(e^{-x} \cdot \left(2 + 2 \cdot x\right)\right) + 0}{\varepsilon}}}{2} \]

        if 2.34999999999999986e-5 < eps < 4e145 or 6.79999999999999961e247 < eps

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 78.9%

          \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Step-by-step derivation
          1. +-commutative78.9%

            \[\leadsto \frac{\left(1 + \color{blue}{\left(\frac{1}{\varepsilon} + -1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          2. mul-1-neg78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + \color{blue}{\left(-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          3. distribute-rgt-neg-in78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + \color{blue}{x \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          4. *-commutative78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(-\color{blue}{\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)}\right)\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          5. distribute-rgt-neg-in78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(-\left(1 + \frac{1}{\varepsilon}\right)\right)\right)}\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          6. distribute-neg-in78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(1 - \varepsilon\right) \cdot \color{blue}{\left(\left(-1\right) + \left(-\frac{1}{\varepsilon}\right)\right)}\right)\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          7. metadata-eval78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(\color{blue}{-1} + \left(-\frac{1}{\varepsilon}\right)\right)\right)\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          8. distribute-neg-frac78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \color{blue}{\frac{-1}{\varepsilon}}\right)\right)\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
          9. metadata-eval78.9%

            \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{\color{blue}{-1}}{\varepsilon}\right)\right)\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        6. Simplified78.9%

          \[\leadsto \frac{\color{blue}{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]

        if 4e145 < eps < 6.79999999999999961e247

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified80.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 100.0%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative100.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified100.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in x around 0 80.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 + -1 \cdot \left(\varepsilon \cdot x\right)\right)}}{2} \]
        9. Step-by-step derivation
          1. mul-1-neg80.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 + \color{blue}{\left(-\varepsilon \cdot x\right)}\right)}{2} \]
          2. *-commutative80.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 + \left(-\color{blue}{x \cdot \varepsilon}\right)\right)}{2} \]
          3. unsub-neg80.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 - x \cdot \varepsilon\right)}}{2} \]
          4. *-commutative80.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 - \color{blue}{\varepsilon \cdot x}\right)}{2} \]
        10. Simplified80.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 - \varepsilon \cdot x\right)}}{2} \]
      6. Recombined 3 regimes into one program.
      7. Final simplification76.1%

        \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 2.35 \cdot 10^{-5}:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(e^{-x} \cdot \left(2 + x \cdot 2\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;\varepsilon \leq 4 \cdot 10^{+145} \lor \neg \left(\varepsilon \leq 6.8 \cdot 10^{+247}\right):\\ \;\;\;\;\frac{\left(1 + \left(\frac{1}{\varepsilon} + x \cdot \left(\left(-1 + \frac{-1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)\right) + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(\frac{-1}{\varepsilon} - -1\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + \left(1 - \varepsilon \cdot x\right)}{2}\\ \end{array} \]
      8. Add Preprocessing

      Alternative 6: 77.8% accurate, 1.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 3600000000000 \lor \neg \left(x \leq 6.2 \cdot 10^{+37} \lor \neg \left(x \leq 5.2 \cdot 10^{+91}\right) \land x \leq 9 \cdot 10^{+271}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x -2e-275)
         (/ (+ 1.0 (exp (- x))) 2.0)
         (if (or (<= x 3600000000000.0)
                 (not (or (<= x 6.2e+37) (and (not (<= x 5.2e+91)) (<= x 9e+271)))))
           (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)
           0.0)))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + exp(-x)) / 2.0;
      	} else if ((x <= 3600000000000.0) || !((x <= 6.2e+37) || (!(x <= 5.2e+91) && (x <= 9e+271)))) {
      		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= (-2d-275)) then
              tmp = (1.0d0 + exp(-x)) / 2.0d0
          else if ((x <= 3600000000000.0d0) .or. (.not. (x <= 6.2d+37) .or. (.not. (x <= 5.2d+91)) .and. (x <= 9d+271))) then
              tmp = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + Math.exp(-x)) / 2.0;
      	} else if ((x <= 3600000000000.0) || !((x <= 6.2e+37) || (!(x <= 5.2e+91) && (x <= 9e+271)))) {
      		tmp = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= -2e-275:
      		tmp = (1.0 + math.exp(-x)) / 2.0
      	elif (x <= 3600000000000.0) or not ((x <= 6.2e+37) or (not (x <= 5.2e+91) and (x <= 9e+271))):
      		tmp = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= -2e-275)
      		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
      	elseif ((x <= 3600000000000.0) || !((x <= 6.2e+37) || (!(x <= 5.2e+91) && (x <= 9e+271))))
      		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= -2e-275)
      		tmp = (1.0 + exp(-x)) / 2.0;
      	elseif ((x <= 3600000000000.0) || ~(((x <= 6.2e+37) || (~((x <= 5.2e+91)) && (x <= 9e+271)))))
      		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, -2e-275], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 3600000000000.0], N[Not[Or[LessEqual[x, 6.2e+37], And[N[Not[LessEqual[x, 5.2e+91]], $MachinePrecision], LessEqual[x, 9e+271]]]], $MachinePrecision]], N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\
      \;\;\;\;\frac{1 + e^{-x}}{2}\\
      
      \mathbf{elif}\;x \leq 3600000000000 \lor \neg \left(x \leq 6.2 \cdot 10^{+37} \lor \neg \left(x \leq 5.2 \cdot 10^{+91}\right) \land x \leq 9 \cdot 10^{+271}\right):\\
      \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < -1.99999999999999987e-275

        1. Initial program 64.8%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified55.4%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 97.0%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 97.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative97.0%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified97.0%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in eps around 0 80.8%

          \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. neg-mul-180.8%

            \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
        10. Simplified80.8%

          \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

        if -1.99999999999999987e-275 < x < 3.6e12 or 6.2000000000000004e37 < x < 5.2000000000000001e91 or 8.9999999999999994e271 < x

        1. Initial program 66.8%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified60.7%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 97.7%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in x around 0 73.7%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{1}}{2} \]

        if 3.6e12 < x < 6.2000000000000004e37 or 5.2000000000000001e91 < x < 8.9999999999999994e271

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 72.2%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg72.2%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub72.2%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp72.2%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses72.2%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified72.2%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification76.3%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 3600000000000 \lor \neg \left(x \leq 6.2 \cdot 10^{+37} \lor \neg \left(x \leq 5.2 \cdot 10^{+91}\right) \land x \leq 9 \cdot 10^{+271}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
      5. Add Preprocessing

      Alternative 7: 84.6% accurate, 1.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 2800000000000 \lor \neg \left(x \leq 6 \cdot 10^{+37}\right) \land \left(x \leq 1.9 \cdot 10^{+91} \lor \neg \left(x \leq 1.6 \cdot 10^{+271}\right)\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x -2e-275)
         (/ (+ 1.0 (exp (* eps_m (- x)))) 2.0)
         (if (or (<= x 2800000000000.0)
                 (and (not (<= x 6e+37)) (or (<= x 1.9e+91) (not (<= x 1.6e+271)))))
           (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)
           0.0)))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + exp((eps_m * -x))) / 2.0;
      	} else if ((x <= 2800000000000.0) || (!(x <= 6e+37) && ((x <= 1.9e+91) || !(x <= 1.6e+271)))) {
      		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= (-2d-275)) then
              tmp = (1.0d0 + exp((eps_m * -x))) / 2.0d0
          else if ((x <= 2800000000000.0d0) .or. (.not. (x <= 6d+37)) .and. (x <= 1.9d+91) .or. (.not. (x <= 1.6d+271))) then
              tmp = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + Math.exp((eps_m * -x))) / 2.0;
      	} else if ((x <= 2800000000000.0) || (!(x <= 6e+37) && ((x <= 1.9e+91) || !(x <= 1.6e+271)))) {
      		tmp = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= -2e-275:
      		tmp = (1.0 + math.exp((eps_m * -x))) / 2.0
      	elif (x <= 2800000000000.0) or (not (x <= 6e+37) and ((x <= 1.9e+91) or not (x <= 1.6e+271))):
      		tmp = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= -2e-275)
      		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * Float64(-x)))) / 2.0);
      	elseif ((x <= 2800000000000.0) || (!(x <= 6e+37) && ((x <= 1.9e+91) || !(x <= 1.6e+271))))
      		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= -2e-275)
      		tmp = (1.0 + exp((eps_m * -x))) / 2.0;
      	elseif ((x <= 2800000000000.0) || (~((x <= 6e+37)) && ((x <= 1.9e+91) || ~((x <= 1.6e+271)))))
      		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, -2e-275], N[(N[(1.0 + N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 2800000000000.0], And[N[Not[LessEqual[x, 6e+37]], $MachinePrecision], Or[LessEqual[x, 1.9e+91], N[Not[LessEqual[x, 1.6e+271]], $MachinePrecision]]]], N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\
      \;\;\;\;\frac{1 + e^{eps\_m \cdot \left(-x\right)}}{2}\\
      
      \mathbf{elif}\;x \leq 2800000000000 \lor \neg \left(x \leq 6 \cdot 10^{+37}\right) \land \left(x \leq 1.9 \cdot 10^{+91} \lor \neg \left(x \leq 1.6 \cdot 10^{+271}\right)\right):\\
      \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < -1.99999999999999987e-275

        1. Initial program 64.8%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified64.8%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 43.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in eps around inf 75.1%

          \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        6. Step-by-step derivation
          1. sub-neg75.1%

            \[\leadsto \frac{\color{blue}{1 + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
          2. mul-1-neg75.1%

            \[\leadsto \frac{1 + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}\right)}{2} \]
          3. remove-double-neg75.1%

            \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
          4. associate-*r*75.1%

            \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}}{2} \]
          5. exp-prod69.4%

            \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1 \cdot x}\right)}^{\left(1 + \varepsilon\right)}}}{2} \]
          6. remove-double-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right)}}{2} \]
          7. mul-1-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right)}}{2} \]
          8. sub-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\color{blue}{\left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
          9. exp-prod75.1%

            \[\leadsto \frac{1 + \color{blue}{e^{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
          10. associate-*r*75.1%

            \[\leadsto \frac{1 + e^{\color{blue}{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
          11. remove-double-neg75.1%

            \[\leadsto \frac{1 + \color{blue}{\left(-\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)\right)}}{2} \]
          12. mul-1-neg75.1%

            \[\leadsto \frac{1 + \left(-\color{blue}{-1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
          13. sub-neg75.1%

            \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
          14. mul-1-neg75.1%

            \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}}{2} \]
          15. mul-1-neg75.1%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{-x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
          16. distribute-rgt-neg-in75.1%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        7. Simplified75.1%

          \[\leadsto \frac{\color{blue}{1 - \left(-e^{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}\right)}}{2} \]
        8. Taylor expanded in eps around inf 75.6%

          \[\leadsto \frac{1 - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
        9. Step-by-step derivation
          1. mul-1-neg75.6%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{-\varepsilon \cdot x}}\right)}{2} \]
          2. *-commutative75.6%

            \[\leadsto \frac{1 - \left(-e^{-\color{blue}{x \cdot \varepsilon}}\right)}{2} \]
          3. distribute-rgt-neg-in75.6%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}\right)}{2} \]
        10. Simplified75.6%

          \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}\right)}{2} \]

        if -1.99999999999999987e-275 < x < 2.8e12 or 6.00000000000000043e37 < x < 1.8999999999999999e91 or 1.6000000000000001e271 < x

        1. Initial program 66.8%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified60.7%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 97.7%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in x around 0 73.7%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{1}}{2} \]

        if 2.8e12 < x < 6.00000000000000043e37 or 1.8999999999999999e91 < x < 1.6000000000000001e271

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 72.2%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg72.2%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub72.2%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp72.2%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses72.2%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified72.2%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification74.2%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 2800000000000 \lor \neg \left(x \leq 6 \cdot 10^{+37}\right) \land \left(x \leq 1.9 \cdot 10^{+91} \lor \neg \left(x \leq 1.6 \cdot 10^{+271}\right)\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
      5. Add Preprocessing

      Alternative 8: 84.7% accurate, 1.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := e^{x \cdot \left(eps\_m + -1\right)}\\ \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 4.3 \cdot 10^{+15}:\\ \;\;\;\;\frac{t\_0 + \left(1 - eps\_m \cdot x\right)}{2}\\ \mathbf{elif}\;x \leq 6.5 \cdot 10^{+37}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+91} \lor \neg \left(x \leq 7.6 \cdot 10^{+270}\right):\\ \;\;\;\;\frac{1 + t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (let* ((t_0 (exp (* x (+ eps_m -1.0)))))
         (if (<= x -2e-275)
           (/ (+ 1.0 (exp (* eps_m (- x)))) 2.0)
           (if (<= x 4.3e+15)
             (/ (+ t_0 (- 1.0 (* eps_m x))) 2.0)
             (if (<= x 6.5e+37)
               0.0
               (if (or (<= x 3.5e+91) (not (<= x 7.6e+270)))
                 (/ (+ 1.0 t_0) 2.0)
                 0.0))))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double t_0 = exp((x * (eps_m + -1.0)));
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + exp((eps_m * -x))) / 2.0;
      	} else if (x <= 4.3e+15) {
      		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
      	} else if (x <= 6.5e+37) {
      		tmp = 0.0;
      	} else if ((x <= 3.5e+91) || !(x <= 7.6e+270)) {
      		tmp = (1.0 + t_0) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: t_0
          real(8) :: tmp
          t_0 = exp((x * (eps_m + (-1.0d0))))
          if (x <= (-2d-275)) then
              tmp = (1.0d0 + exp((eps_m * -x))) / 2.0d0
          else if (x <= 4.3d+15) then
              tmp = (t_0 + (1.0d0 - (eps_m * x))) / 2.0d0
          else if (x <= 6.5d+37) then
              tmp = 0.0d0
          else if ((x <= 3.5d+91) .or. (.not. (x <= 7.6d+270))) then
              tmp = (1.0d0 + t_0) / 2.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double t_0 = Math.exp((x * (eps_m + -1.0)));
      	double tmp;
      	if (x <= -2e-275) {
      		tmp = (1.0 + Math.exp((eps_m * -x))) / 2.0;
      	} else if (x <= 4.3e+15) {
      		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
      	} else if (x <= 6.5e+37) {
      		tmp = 0.0;
      	} else if ((x <= 3.5e+91) || !(x <= 7.6e+270)) {
      		tmp = (1.0 + t_0) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	t_0 = math.exp((x * (eps_m + -1.0)))
      	tmp = 0
      	if x <= -2e-275:
      		tmp = (1.0 + math.exp((eps_m * -x))) / 2.0
      	elif x <= 4.3e+15:
      		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0
      	elif x <= 6.5e+37:
      		tmp = 0.0
      	elif (x <= 3.5e+91) or not (x <= 7.6e+270):
      		tmp = (1.0 + t_0) / 2.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	t_0 = exp(Float64(x * Float64(eps_m + -1.0)))
      	tmp = 0.0
      	if (x <= -2e-275)
      		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * Float64(-x)))) / 2.0);
      	elseif (x <= 4.3e+15)
      		tmp = Float64(Float64(t_0 + Float64(1.0 - Float64(eps_m * x))) / 2.0);
      	elseif (x <= 6.5e+37)
      		tmp = 0.0;
      	elseif ((x <= 3.5e+91) || !(x <= 7.6e+270))
      		tmp = Float64(Float64(1.0 + t_0) / 2.0);
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	t_0 = exp((x * (eps_m + -1.0)));
      	tmp = 0.0;
      	if (x <= -2e-275)
      		tmp = (1.0 + exp((eps_m * -x))) / 2.0;
      	elseif (x <= 4.3e+15)
      		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
      	elseif (x <= 6.5e+37)
      		tmp = 0.0;
      	elseif ((x <= 3.5e+91) || ~((x <= 7.6e+270)))
      		tmp = (1.0 + t_0) / 2.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, -2e-275], N[(N[(1.0 + N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 4.3e+15], N[(N[(t$95$0 + N[(1.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 6.5e+37], 0.0, If[Or[LessEqual[x, 3.5e+91], N[Not[LessEqual[x, 7.6e+270]], $MachinePrecision]], N[(N[(1.0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      t_0 := e^{x \cdot \left(eps\_m + -1\right)}\\
      \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\
      \;\;\;\;\frac{1 + e^{eps\_m \cdot \left(-x\right)}}{2}\\
      
      \mathbf{elif}\;x \leq 4.3 \cdot 10^{+15}:\\
      \;\;\;\;\frac{t\_0 + \left(1 - eps\_m \cdot x\right)}{2}\\
      
      \mathbf{elif}\;x \leq 6.5 \cdot 10^{+37}:\\
      \;\;\;\;0\\
      
      \mathbf{elif}\;x \leq 3.5 \cdot 10^{+91} \lor \neg \left(x \leq 7.6 \cdot 10^{+270}\right):\\
      \;\;\;\;\frac{1 + t\_0}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 4 regimes
      2. if x < -1.99999999999999987e-275

        1. Initial program 64.8%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified64.8%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 43.0%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in eps around inf 75.1%

          \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
        6. Step-by-step derivation
          1. sub-neg75.1%

            \[\leadsto \frac{\color{blue}{1 + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
          2. mul-1-neg75.1%

            \[\leadsto \frac{1 + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}\right)}{2} \]
          3. remove-double-neg75.1%

            \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
          4. associate-*r*75.1%

            \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}}{2} \]
          5. exp-prod69.4%

            \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1 \cdot x}\right)}^{\left(1 + \varepsilon\right)}}}{2} \]
          6. remove-double-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\left(1 + \color{blue}{\left(-\left(-\varepsilon\right)\right)}\right)}}{2} \]
          7. mul-1-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\left(1 + \left(-\color{blue}{-1 \cdot \varepsilon}\right)\right)}}{2} \]
          8. sub-neg69.4%

            \[\leadsto \frac{1 + {\left(e^{-1 \cdot x}\right)}^{\color{blue}{\left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
          9. exp-prod75.1%

            \[\leadsto \frac{1 + \color{blue}{e^{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}}{2} \]
          10. associate-*r*75.1%

            \[\leadsto \frac{1 + e^{\color{blue}{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
          11. remove-double-neg75.1%

            \[\leadsto \frac{1 + \color{blue}{\left(-\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)\right)}}{2} \]
          12. mul-1-neg75.1%

            \[\leadsto \frac{1 + \left(-\color{blue}{-1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
          13. sub-neg75.1%

            \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}}{2} \]
          14. mul-1-neg75.1%

            \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}\right)}}{2} \]
          15. mul-1-neg75.1%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{-x \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]
          16. distribute-rgt-neg-in75.1%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]
        7. Simplified75.1%

          \[\leadsto \frac{\color{blue}{1 - \left(-e^{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}\right)}}{2} \]
        8. Taylor expanded in eps around inf 75.6%

          \[\leadsto \frac{1 - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
        9. Step-by-step derivation
          1. mul-1-neg75.6%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{-\varepsilon \cdot x}}\right)}{2} \]
          2. *-commutative75.6%

            \[\leadsto \frac{1 - \left(-e^{-\color{blue}{x \cdot \varepsilon}}\right)}{2} \]
          3. distribute-rgt-neg-in75.6%

            \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}\right)}{2} \]
        10. Simplified75.6%

          \[\leadsto \frac{1 - \left(-e^{\color{blue}{x \cdot \left(-\varepsilon\right)}}\right)}{2} \]

        if -1.99999999999999987e-275 < x < 4.3e15

        1. Initial program 56.3%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified48.2%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 96.9%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 95.8%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative95.8%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified95.8%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in x around 0 85.8%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 + -1 \cdot \left(\varepsilon \cdot x\right)\right)}}{2} \]
        9. Step-by-step derivation
          1. mul-1-neg85.8%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 + \color{blue}{\left(-\varepsilon \cdot x\right)}\right)}{2} \]
          2. *-commutative85.8%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 + \left(-\color{blue}{x \cdot \varepsilon}\right)\right)}{2} \]
          3. unsub-neg85.8%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 - x \cdot \varepsilon\right)}}{2} \]
          4. *-commutative85.8%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \left(1 - \color{blue}{\varepsilon \cdot x}\right)}{2} \]
        10. Simplified85.8%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{\left(1 - \varepsilon \cdot x\right)}}{2} \]

        if 4.3e15 < x < 6.4999999999999998e37 or 3.50000000000000001e91 < x < 7.60000000000000036e270

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 72.2%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp72.2%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg72.2%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub72.2%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg72.2%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp72.2%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses72.2%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified72.2%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 6.4999999999999998e37 < x < 3.50000000000000001e91 or 7.60000000000000036e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 100.0%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in x around 0 36.7%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{1}}{2} \]
      3. Recombined 4 regimes into one program.
      4. Final simplification74.3%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-275}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot \left(-x\right)}}{2}\\ \mathbf{elif}\;x \leq 4.3 \cdot 10^{+15}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + \left(1 - \varepsilon \cdot x\right)}{2}\\ \mathbf{elif}\;x \leq 6.5 \cdot 10^{+37}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+91} \lor \neg \left(x \leq 7.6 \cdot 10^{+270}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
      5. Add Preprocessing

      Alternative 9: 71.3% accurate, 1.9× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 2.05:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 6 \cdot 10^{+37}:\\ \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 1.6 \cdot 10^{+91}:\\ \;\;\;\;\frac{\left(x \cdot 2\right) \cdot e^{x}}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x 2.05)
         (/ (+ 1.0 (exp (- x))) 2.0)
         (if (<= x 6e+37)
           (/ (* x (/ 2.0 (exp x))) 2.0)
           (if (<= x 1.6e+91)
             (/ (* (* x 2.0) (exp x)) 2.0)
             (if (<= x 1e+271) 0.0 (/ (* x (+ 2.0 (* x (+ x 2.0)))) 2.0))))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 2.05) {
      		tmp = (1.0 + exp(-x)) / 2.0;
      	} else if (x <= 6e+37) {
      		tmp = (x * (2.0 / exp(x))) / 2.0;
      	} else if (x <= 1.6e+91) {
      		tmp = ((x * 2.0) * exp(x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= 2.05d0) then
              tmp = (1.0d0 + exp(-x)) / 2.0d0
          else if (x <= 6d+37) then
              tmp = (x * (2.0d0 / exp(x))) / 2.0d0
          else if (x <= 1.6d+91) then
              tmp = ((x * 2.0d0) * exp(x)) / 2.0d0
          else if (x <= 1d+271) then
              tmp = 0.0d0
          else
              tmp = (x * (2.0d0 + (x * (x + 2.0d0)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 2.05) {
      		tmp = (1.0 + Math.exp(-x)) / 2.0;
      	} else if (x <= 6e+37) {
      		tmp = (x * (2.0 / Math.exp(x))) / 2.0;
      	} else if (x <= 1.6e+91) {
      		tmp = ((x * 2.0) * Math.exp(x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= 2.05:
      		tmp = (1.0 + math.exp(-x)) / 2.0
      	elif x <= 6e+37:
      		tmp = (x * (2.0 / math.exp(x))) / 2.0
      	elif x <= 1.6e+91:
      		tmp = ((x * 2.0) * math.exp(x)) / 2.0
      	elif x <= 1e+271:
      		tmp = 0.0
      	else:
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= 2.05)
      		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
      	elseif (x <= 6e+37)
      		tmp = Float64(Float64(x * Float64(2.0 / exp(x))) / 2.0);
      	elseif (x <= 1.6e+91)
      		tmp = Float64(Float64(Float64(x * 2.0) * exp(x)) / 2.0);
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x + 2.0)))) / 2.0);
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= 2.05)
      		tmp = (1.0 + exp(-x)) / 2.0;
      	elseif (x <= 6e+37)
      		tmp = (x * (2.0 / exp(x))) / 2.0;
      	elseif (x <= 1.6e+91)
      		tmp = ((x * 2.0) * exp(x)) / 2.0;
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, 2.05], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 6e+37], N[(N[(x * N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.6e+91], N[(N[(N[(x * 2.0), $MachinePrecision] * N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1e+271], 0.0, N[(N[(x * N[(2.0 + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 2.05:\\
      \;\;\;\;\frac{1 + e^{-x}}{2}\\
      
      \mathbf{elif}\;x \leq 6 \cdot 10^{+37}:\\
      \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\
      
      \mathbf{elif}\;x \leq 1.6 \cdot 10^{+91}:\\
      \;\;\;\;\frac{\left(x \cdot 2\right) \cdot e^{x}}{2}\\
      
      \mathbf{elif}\;x \leq 10^{+271}:\\
      \;\;\;\;0\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 5 regimes
      2. if x < 2.0499999999999998

        1. Initial program 60.5%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified51.4%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 97.3%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 97.4%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative97.4%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified97.4%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in eps around 0 80.3%

          \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. neg-mul-180.3%

            \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
        10. Simplified80.3%

          \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

        if 2.0499999999999998 < x < 6.00000000000000043e37

        1. Initial program 93.1%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified93.1%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 72.0%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+72.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*72.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg72.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub72.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in72.0%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--72.0%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg72.0%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg72.0%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified72.0%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 66.7%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp66.7%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*66.7%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative66.7%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp66.7%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-166.7%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*66.7%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-166.7%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified66.7%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Taylor expanded in x around inf 66.7%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        11. Step-by-step derivation
          1. associate-*r*66.7%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot e^{-x}}}{2} \]
          2. *-commutative66.7%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot e^{-x}}{2} \]
          3. exp-neg66.7%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
          4. associate-/l*66.7%

            \[\leadsto \frac{\color{blue}{\frac{\left(x \cdot 2\right) \cdot 1}{e^{x}}}}{2} \]
          5. *-rgt-identity66.7%

            \[\leadsto \frac{\frac{\color{blue}{x \cdot 2}}{e^{x}}}{2} \]
          6. associate-/l*66.7%

            \[\leadsto \frac{\color{blue}{x \cdot \frac{2}{e^{x}}}}{2} \]
        12. Simplified66.7%

          \[\leadsto \frac{\color{blue}{x \cdot \frac{2}{e^{x}}}}{2} \]

        if 6.00000000000000043e37 < x < 1.59999999999999995e91

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 27.8%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+27.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*27.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg27.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub27.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in27.8%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--27.8%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg27.8%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg27.8%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified27.8%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 27.8%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp27.8%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*27.8%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative27.8%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp27.8%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-127.8%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*27.8%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-127.8%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified27.8%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow127.8%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*27.8%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative27.8%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.8%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.8%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.8%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.8%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.8%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.8%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.8%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]

        if 1.59999999999999995e91 < x < 9.99999999999999953e270

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 68.1%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg68.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg68.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp68.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg68.1%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub68.1%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg68.1%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp68.1%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses68.1%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified68.1%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 9.99999999999999953e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 28.4%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in28.4%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--28.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg28.4%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg28.4%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified28.4%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 28.4%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp28.4%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp28.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-128.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-128.4%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified28.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow128.4%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.2%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 73.2%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x\right)\right)}}{2} \]
      3. Recombined 5 regimes into one program.
      4. Final simplification77.1%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.05:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 6 \cdot 10^{+37}:\\ \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 1.6 \cdot 10^{+91}:\\ \;\;\;\;\frac{\left(x \cdot 2\right) \cdot e^{x}}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 10: 71.8% accurate, 1.9× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 2.05:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 6.5 \cdot 10^{+270}:\\ \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x 2.05)
         (/ (+ 1.0 (exp (- x))) 2.0)
         (if (<= x 6.5e+270)
           (/ (* x (/ 2.0 (exp x))) 2.0)
           (/ (* x (+ 2.0 (* x (+ x 2.0)))) 2.0))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 2.05) {
      		tmp = (1.0 + exp(-x)) / 2.0;
      	} else if (x <= 6.5e+270) {
      		tmp = (x * (2.0 / exp(x))) / 2.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= 2.05d0) then
              tmp = (1.0d0 + exp(-x)) / 2.0d0
          else if (x <= 6.5d+270) then
              tmp = (x * (2.0d0 / exp(x))) / 2.0d0
          else
              tmp = (x * (2.0d0 + (x * (x + 2.0d0)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 2.05) {
      		tmp = (1.0 + Math.exp(-x)) / 2.0;
      	} else if (x <= 6.5e+270) {
      		tmp = (x * (2.0 / Math.exp(x))) / 2.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= 2.05:
      		tmp = (1.0 + math.exp(-x)) / 2.0
      	elif x <= 6.5e+270:
      		tmp = (x * (2.0 / math.exp(x))) / 2.0
      	else:
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= 2.05)
      		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
      	elseif (x <= 6.5e+270)
      		tmp = Float64(Float64(x * Float64(2.0 / exp(x))) / 2.0);
      	else
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x + 2.0)))) / 2.0);
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= 2.05)
      		tmp = (1.0 + exp(-x)) / 2.0;
      	elseif (x <= 6.5e+270)
      		tmp = (x * (2.0 / exp(x))) / 2.0;
      	else
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, 2.05], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 6.5e+270], N[(N[(x * N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(x * N[(2.0 + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 2.05:\\
      \;\;\;\;\frac{1 + e^{-x}}{2}\\
      
      \mathbf{elif}\;x \leq 6.5 \cdot 10^{+270}:\\
      \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < 2.0499999999999998

        1. Initial program 60.5%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified51.4%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 97.3%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 97.4%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative97.4%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified97.4%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in eps around 0 80.3%

          \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. neg-mul-180.3%

            \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
        10. Simplified80.3%

          \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

        if 2.0499999999999998 < x < 6.4999999999999996e270

        1. Initial program 98.5%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified98.5%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 59.8%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+59.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*59.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg59.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub59.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in59.8%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--59.8%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg59.8%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg59.8%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified59.8%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 58.6%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp58.6%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*58.6%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative58.6%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp58.6%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-158.6%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*58.6%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-158.6%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified58.6%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Taylor expanded in x around inf 58.6%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        11. Step-by-step derivation
          1. associate-*r*58.6%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot e^{-x}}}{2} \]
          2. *-commutative58.6%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot e^{-x}}{2} \]
          3. exp-neg58.6%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
          4. associate-/l*58.6%

            \[\leadsto \frac{\color{blue}{\frac{\left(x \cdot 2\right) \cdot 1}{e^{x}}}}{2} \]
          5. *-rgt-identity58.6%

            \[\leadsto \frac{\frac{\color{blue}{x \cdot 2}}{e^{x}}}{2} \]
          6. associate-/l*58.6%

            \[\leadsto \frac{\color{blue}{x \cdot \frac{2}{e^{x}}}}{2} \]
        12. Simplified58.6%

          \[\leadsto \frac{\color{blue}{x \cdot \frac{2}{e^{x}}}}{2} \]

        if 6.4999999999999996e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 28.4%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in28.4%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--28.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg28.4%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg28.4%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified28.4%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 28.4%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp28.4%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp28.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-128.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-128.4%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified28.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow128.4%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.2%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 73.2%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x\right)\right)}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification74.4%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 2.05:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 6.5 \cdot 10^{+270}:\\ \;\;\;\;\frac{x \cdot \frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 11: 71.6% accurate, 2.0× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 800000:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x 800000.0)
         (/ (+ 1.0 (exp (- x))) 2.0)
         (if (<= x 1e+271) 0.0 (/ (* x (+ 2.0 (* x (+ x 2.0)))) 2.0))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 800000.0) {
      		tmp = (1.0 + exp(-x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= 800000.0d0) then
              tmp = (1.0d0 + exp(-x)) / 2.0d0
          else if (x <= 1d+271) then
              tmp = 0.0d0
          else
              tmp = (x * (2.0d0 + (x * (x + 2.0d0)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 800000.0) {
      		tmp = (1.0 + Math.exp(-x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= 800000.0:
      		tmp = (1.0 + math.exp(-x)) / 2.0
      	elif x <= 1e+271:
      		tmp = 0.0
      	else:
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= 800000.0)
      		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x + 2.0)))) / 2.0);
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= 800000.0)
      		tmp = (1.0 + exp(-x)) / 2.0;
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, 800000.0], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1e+271], 0.0, N[(N[(x * N[(2.0 + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 800000:\\
      \;\;\;\;\frac{1 + e^{-x}}{2}\\
      
      \mathbf{elif}\;x \leq 10^{+271}:\\
      \;\;\;\;0\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < 8e5

        1. Initial program 60.6%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified51.7%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around inf 96.9%

          \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
        5. Taylor expanded in eps around inf 96.9%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x}}}}{2} \]
        6. Step-by-step derivation
          1. *-commutative96.9%

            \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        7. Simplified96.9%

          \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon}}}}{2} \]
        8. Taylor expanded in eps around 0 79.0%

          \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
        9. Step-by-step derivation
          1. neg-mul-179.0%

            \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
        10. Simplified79.0%

          \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

        if 8e5 < x < 9.99999999999999953e270

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 60.9%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg60.9%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub60.9%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp60.9%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses60.9%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified60.9%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 9.99999999999999953e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 28.4%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in28.4%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--28.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg28.4%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg28.4%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified28.4%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 28.4%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp28.4%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp28.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-128.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-128.4%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified28.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow128.4%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.2%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 73.2%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x\right)\right)}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification74.3%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 800000:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 12: 66.1% accurate, 8.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\ \mathbf{elif}\;x \leq 85:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{elif}\;x \leq 4 \cdot 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x -2.6e+143)
         (/ (* x (+ 2.0 (* x 2.0))) 2.0)
         (if (<= x 85.0)
           (/ (- 2.0 (* eps_m x)) 2.0)
           (if (<= x 4e+271) 0.0 (/ (* x (+ 2.0 (* x (+ x 2.0)))) 2.0)))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2.6e+143) {
      		tmp = (x * (2.0 + (x * 2.0))) / 2.0;
      	} else if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else if (x <= 4e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= (-2.6d+143)) then
              tmp = (x * (2.0d0 + (x * 2.0d0))) / 2.0d0
          else if (x <= 85.0d0) then
              tmp = (2.0d0 - (eps_m * x)) / 2.0d0
          else if (x <= 4d+271) then
              tmp = 0.0d0
          else
              tmp = (x * (2.0d0 + (x * (x + 2.0d0)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -2.6e+143) {
      		tmp = (x * (2.0 + (x * 2.0))) / 2.0;
      	} else if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else if (x <= 4e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= -2.6e+143:
      		tmp = (x * (2.0 + (x * 2.0))) / 2.0
      	elif x <= 85.0:
      		tmp = (2.0 - (eps_m * x)) / 2.0
      	elif x <= 4e+271:
      		tmp = 0.0
      	else:
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= -2.6e+143)
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * 2.0))) / 2.0);
      	elseif (x <= 85.0)
      		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
      	elseif (x <= 4e+271)
      		tmp = 0.0;
      	else
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x + 2.0)))) / 2.0);
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= -2.6e+143)
      		tmp = (x * (2.0 + (x * 2.0))) / 2.0;
      	elseif (x <= 85.0)
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	elseif (x <= 4e+271)
      		tmp = 0.0;
      	else
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, -2.6e+143], N[(N[(x * N[(2.0 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 85.0], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 4e+271], 0.0, N[(N[(x * N[(2.0 + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\
      
      \mathbf{elif}\;x \leq 85:\\
      \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\
      
      \mathbf{elif}\;x \leq 4 \cdot 10^{+271}:\\
      \;\;\;\;0\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 4 regimes
      2. if x < -2.5999999999999999e143

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 0.0%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+0.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg0.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub0.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in0.0%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--0.0%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg0.0%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg0.0%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified0.0%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 0.0%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp0.0%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative0.0%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp0.0%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-10.0%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*0.0%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-10.0%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified0.0%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow10.0%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative0.0%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg0.0%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt1.6%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr1.6%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow11.6%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified1.6%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 95.7%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + 2 \cdot x\right)}}{2} \]

        if -2.5999999999999999e143 < x < 85

        1. Initial program 54.9%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified54.9%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 39.8%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in x around 0 45.6%

          \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)}}{2} \]
        6. Taylor expanded in eps around inf 70.1%

          \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
        7. Step-by-step derivation
          1. associate-*r*70.1%

            \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
          2. mul-1-neg70.1%

            \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
        8. Simplified70.1%

          \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

        if 85 < x < 3.99999999999999981e271

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 59.1%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg59.1%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub59.1%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp59.1%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses59.1%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified59.1%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 3.99999999999999981e271 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 28.4%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in28.4%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--28.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg28.4%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg28.4%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified28.4%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 28.4%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp28.4%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp28.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-128.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-128.4%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified28.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow128.4%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.2%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 73.2%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x\right)\right)}}{2} \]
      3. Recombined 4 regimes into one program.
      4. Final simplification69.5%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\ \mathbf{elif}\;x \leq 85:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 4 \cdot 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 13: 68.6% accurate, 8.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -820:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + x \cdot 0.3333333333333333\right)\right)\right)}{2}\\ \mathbf{elif}\;x \leq 800000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x -820.0)
         (/ (* x (+ 2.0 (* x (+ 2.0 (* x (+ 1.0 (* x 0.3333333333333333))))))) 2.0)
         (if (<= x 800000.0)
           1.0
           (if (<= x 1e+271) 0.0 (/ (* x (+ 2.0 (* x (+ x 2.0)))) 2.0)))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -820.0) {
      		tmp = (x * (2.0 + (x * (2.0 + (x * (1.0 + (x * 0.3333333333333333))))))) / 2.0;
      	} else if (x <= 800000.0) {
      		tmp = 1.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= (-820.0d0)) then
              tmp = (x * (2.0d0 + (x * (2.0d0 + (x * (1.0d0 + (x * 0.3333333333333333d0))))))) / 2.0d0
          else if (x <= 800000.0d0) then
              tmp = 1.0d0
          else if (x <= 1d+271) then
              tmp = 0.0d0
          else
              tmp = (x * (2.0d0 + (x * (x + 2.0d0)))) / 2.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= -820.0) {
      		tmp = (x * (2.0 + (x * (2.0 + (x * (1.0 + (x * 0.3333333333333333))))))) / 2.0;
      	} else if (x <= 800000.0) {
      		tmp = 1.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= -820.0:
      		tmp = (x * (2.0 + (x * (2.0 + (x * (1.0 + (x * 0.3333333333333333))))))) / 2.0
      	elif x <= 800000.0:
      		tmp = 1.0
      	elif x <= 1e+271:
      		tmp = 0.0
      	else:
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= -820.0)
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(2.0 + Float64(x * Float64(1.0 + Float64(x * 0.3333333333333333))))))) / 2.0);
      	elseif (x <= 800000.0)
      		tmp = 1.0;
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x + 2.0)))) / 2.0);
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= -820.0)
      		tmp = (x * (2.0 + (x * (2.0 + (x * (1.0 + (x * 0.3333333333333333))))))) / 2.0;
      	elseif (x <= 800000.0)
      		tmp = 1.0;
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = (x * (2.0 + (x * (x + 2.0)))) / 2.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, -820.0], N[(N[(x * N[(2.0 + N[(x * N[(2.0 + N[(x * N[(1.0 + N[(x * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 800000.0], 1.0, If[LessEqual[x, 1e+271], 0.0, N[(N[(x * N[(2.0 + N[(x * N[(x + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq -820:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + x \cdot 0.3333333333333333\right)\right)\right)}{2}\\
      
      \mathbf{elif}\;x \leq 800000:\\
      \;\;\;\;1\\
      
      \mathbf{elif}\;x \leq 10^{+271}:\\
      \;\;\;\;0\\
      
      \mathbf{else}:\\
      \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 4 regimes
      2. if x < -820

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 0.0%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+0.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg0.0%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub0.0%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in0.0%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--0.0%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg0.0%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg0.0%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified0.0%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 0.0%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp0.0%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative0.0%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp0.0%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-10.0%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*0.0%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-10.0%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified0.0%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow10.0%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*0.0%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative0.0%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg0.0%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt1.6%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr1.6%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow11.6%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified1.6%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 76.7%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + 0.3333333333333333 \cdot x\right)\right)\right)}}{2} \]
        15. Step-by-step derivation
          1. *-commutative76.7%

            \[\leadsto \frac{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + \color{blue}{x \cdot 0.3333333333333333}\right)\right)\right)}{2} \]
        16. Simplified76.7%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + x \cdot 0.3333333333333333\right)\right)\right)}}{2} \]

        if -820 < x < 8e5

        1. Initial program 50.5%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified50.5%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 74.7%

          \[\leadsto \frac{\color{blue}{2}}{2} \]

        if 8e5 < x < 9.99999999999999953e270

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 60.9%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp60.9%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg60.9%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub60.9%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg60.9%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp60.9%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses60.9%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified60.9%

          \[\leadsto \frac{\color{blue}{0}}{2} \]

        if 9.99999999999999953e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 28.4%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg28.4%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub28.4%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in28.4%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--28.4%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg28.4%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg28.4%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified28.4%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 28.4%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp28.4%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp28.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-128.4%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*28.4%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-128.4%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified28.4%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow128.4%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*28.4%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative28.4%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg73.2%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt73.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr73.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow173.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified73.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 73.2%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(2 + x\right)\right)}}{2} \]
      3. Recombined 4 regimes into one program.
      4. Final simplification71.5%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -820:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(2 + x \cdot \left(1 + x \cdot 0.3333333333333333\right)\right)\right)}{2}\\ \mathbf{elif}\;x \leq 800000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x + 2\right)\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 14: 66.2% accurate, 9.4× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\ \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 85:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (let* ((t_0 (/ (* x (+ 2.0 (* x 2.0))) 2.0)))
         (if (<= x -2.6e+143)
           t_0
           (if (<= x 85.0) (/ (- 2.0 (* eps_m x)) 2.0) (if (<= x 1e+271) 0.0 t_0)))))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double t_0 = (x * (2.0 + (x * 2.0))) / 2.0;
      	double tmp;
      	if (x <= -2.6e+143) {
      		tmp = t_0;
      	} else if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = t_0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: t_0
          real(8) :: tmp
          t_0 = (x * (2.0d0 + (x * 2.0d0))) / 2.0d0
          if (x <= (-2.6d+143)) then
              tmp = t_0
          else if (x <= 85.0d0) then
              tmp = (2.0d0 - (eps_m * x)) / 2.0d0
          else if (x <= 1d+271) then
              tmp = 0.0d0
          else
              tmp = t_0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double t_0 = (x * (2.0 + (x * 2.0))) / 2.0;
      	double tmp;
      	if (x <= -2.6e+143) {
      		tmp = t_0;
      	} else if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else if (x <= 1e+271) {
      		tmp = 0.0;
      	} else {
      		tmp = t_0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	t_0 = (x * (2.0 + (x * 2.0))) / 2.0
      	tmp = 0
      	if x <= -2.6e+143:
      		tmp = t_0
      	elif x <= 85.0:
      		tmp = (2.0 - (eps_m * x)) / 2.0
      	elif x <= 1e+271:
      		tmp = 0.0
      	else:
      		tmp = t_0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	t_0 = Float64(Float64(x * Float64(2.0 + Float64(x * 2.0))) / 2.0)
      	tmp = 0.0
      	if (x <= -2.6e+143)
      		tmp = t_0;
      	elseif (x <= 85.0)
      		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = t_0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	t_0 = (x * (2.0 + (x * 2.0))) / 2.0;
      	tmp = 0.0;
      	if (x <= -2.6e+143)
      		tmp = t_0;
      	elseif (x <= 85.0)
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	elseif (x <= 1e+271)
      		tmp = 0.0;
      	else
      		tmp = t_0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(x * N[(2.0 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -2.6e+143], t$95$0, If[LessEqual[x, 85.0], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1e+271], 0.0, t$95$0]]]]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      t_0 := \frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\
      \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\
      \;\;\;\;t\_0\\
      
      \mathbf{elif}\;x \leq 85:\\
      \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\
      
      \mathbf{elif}\;x \leq 10^{+271}:\\
      \;\;\;\;0\\
      
      \mathbf{else}:\\
      \;\;\;\;t\_0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if x < -2.5999999999999999e143 or 9.99999999999999953e270 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 9.8%

          \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
        5. Step-by-step derivation
          1. associate--r+9.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)}}{2} \]
          2. associate-*r*9.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-1 \cdot x\right) \cdot e^{-1 \cdot x}}}{2} \]
          3. mul-1-neg9.8%

            \[\leadsto \frac{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) - \color{blue}{\left(-x\right)} \cdot e^{-1 \cdot x}}{2} \]
          4. cancel-sign-sub9.8%

            \[\leadsto \frac{\color{blue}{\left(\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}}{2} \]
          5. distribute-rgt1-in9.8%

            \[\leadsto \frac{\left(\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - -1 \cdot e^{-1 \cdot x}\right) + x \cdot e^{-1 \cdot x}}{2} \]
          6. distribute-rgt-out--9.8%

            \[\leadsto \frac{\color{blue}{e^{-1 \cdot x} \cdot \left(\left(x + 1\right) - -1\right)} + x \cdot e^{-1 \cdot x}}{2} \]
          7. mul-1-neg9.8%

            \[\leadsto \frac{e^{\color{blue}{-x}} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-1 \cdot x}}{2} \]
          8. mul-1-neg9.8%

            \[\leadsto \frac{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{\color{blue}{-x}}}{2} \]
        6. Simplified9.8%

          \[\leadsto \frac{\color{blue}{e^{-x} \cdot \left(\left(x + 1\right) - -1\right) + x \cdot e^{-x}}}{2} \]
        7. Taylor expanded in x around inf 9.8%

          \[\leadsto \frac{\color{blue}{2 \cdot \left(x \cdot e^{-x}\right)}}{2} \]
        8. Step-by-step derivation
          1. rec-exp9.8%

            \[\leadsto \frac{2 \cdot \left(x \cdot \color{blue}{\frac{1}{e^{x}}}\right)}{2} \]
          2. associate-*r*9.8%

            \[\leadsto \frac{\color{blue}{\left(2 \cdot x\right) \cdot \frac{1}{e^{x}}}}{2} \]
          3. *-commutative9.8%

            \[\leadsto \frac{\color{blue}{\left(x \cdot 2\right)} \cdot \frac{1}{e^{x}}}{2} \]
          4. rec-exp9.8%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot \color{blue}{e^{-x}}}{2} \]
          5. neg-mul-19.8%

            \[\leadsto \frac{\left(x \cdot 2\right) \cdot e^{\color{blue}{-1 \cdot x}}}{2} \]
          6. associate-*r*9.8%

            \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-1 \cdot x}\right)}}{2} \]
          7. neg-mul-19.8%

            \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
        9. Simplified9.8%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
        10. Step-by-step derivation
          1. pow19.8%

            \[\leadsto \frac{\color{blue}{{\left(x \cdot \left(2 \cdot e^{-x}\right)\right)}^{1}}}{2} \]
          2. associate-*r*9.8%

            \[\leadsto \frac{{\color{blue}{\left(\left(x \cdot 2\right) \cdot e^{-x}\right)}}^{1}}{2} \]
          3. *-commutative9.8%

            \[\leadsto \frac{{\color{blue}{\left(e^{-x} \cdot \left(x \cdot 2\right)\right)}}^{1}}{2} \]
          4. add-sqr-sqrt0.0%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          5. sqrt-unprod25.1%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          6. sqr-neg25.1%

            \[\leadsto \frac{{\left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          7. sqrt-unprod25.1%

            \[\leadsto \frac{{\left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
          8. add-sqr-sqrt26.2%

            \[\leadsto \frac{{\left(e^{\color{blue}{x}} \cdot \left(x \cdot 2\right)\right)}^{1}}{2} \]
        11. Applied egg-rr26.2%

          \[\leadsto \frac{\color{blue}{{\left(e^{x} \cdot \left(x \cdot 2\right)\right)}^{1}}}{2} \]
        12. Step-by-step derivation
          1. unpow126.2%

            \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        13. Simplified26.2%

          \[\leadsto \frac{\color{blue}{e^{x} \cdot \left(x \cdot 2\right)}}{2} \]
        14. Taylor expanded in x around 0 87.9%

          \[\leadsto \frac{\color{blue}{x \cdot \left(2 + 2 \cdot x\right)}}{2} \]

        if -2.5999999999999999e143 < x < 85

        1. Initial program 54.9%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified54.9%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 39.8%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in x around 0 45.6%

          \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)}}{2} \]
        6. Taylor expanded in eps around inf 70.1%

          \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
        7. Step-by-step derivation
          1. associate-*r*70.1%

            \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
          2. mul-1-neg70.1%

            \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
        8. Simplified70.1%

          \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

        if 85 < x < 9.99999999999999953e270

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 59.1%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp59.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg59.1%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub59.1%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg59.1%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp59.1%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses59.1%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified59.1%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 3 regimes into one program.
      4. Final simplification69.5%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+143}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\ \mathbf{elif}\;x \leq 85:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 10^{+271}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot 2\right)}{2}\\ \end{array} \]
      5. Add Preprocessing

      Alternative 15: 64.6% accurate, 18.9× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 85:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m)
       :precision binary64
       (if (<= x 85.0) (/ (- 2.0 (* eps_m x)) 2.0) 0.0))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= 85.0d0) then
              tmp = (2.0d0 - (eps_m * x)) / 2.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 85.0) {
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= 85.0:
      		tmp = (2.0 - (eps_m * x)) / 2.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= 85.0)
      		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= 85.0)
      		tmp = (2.0 - (eps_m * x)) / 2.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, 85.0], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 85:\\
      \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 85

        1. Initial program 60.2%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified60.2%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 43.6%

          \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
        5. Taylor expanded in x around 0 45.6%

          \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)}}{2} \]
        6. Taylor expanded in eps around inf 67.2%

          \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
        7. Step-by-step derivation
          1. associate-*r*67.2%

            \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
          2. mul-1-neg67.2%

            \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
        8. Simplified67.2%

          \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

        if 85 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 54.7%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg54.7%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg54.7%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp54.7%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg54.7%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub54.7%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg54.7%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp54.7%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses54.7%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified54.7%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 2 regimes into one program.
      4. Final simplification63.4%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 85:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
      5. Add Preprocessing

      Alternative 16: 57.8% accurate, 37.7× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 800000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m) :precision binary64 (if (<= x 800000.0) 1.0 0.0))
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 800000.0) {
      		tmp = 1.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          real(8) :: tmp
          if (x <= 800000.0d0) then
              tmp = 1.0d0
          else
              tmp = 0.0d0
          end if
          code = tmp
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	double tmp;
      	if (x <= 800000.0) {
      		tmp = 1.0;
      	} else {
      		tmp = 0.0;
      	}
      	return tmp;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	tmp = 0
      	if x <= 800000.0:
      		tmp = 1.0
      	else:
      		tmp = 0.0
      	return tmp
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	tmp = 0.0
      	if (x <= 800000.0)
      		tmp = 1.0;
      	else
      		tmp = 0.0;
      	end
      	return tmp
      end
      
      eps_m = abs(eps);
      function tmp_2 = code(x, eps_m)
      	tmp = 0.0;
      	if (x <= 800000.0)
      		tmp = 1.0;
      	else
      		tmp = 0.0;
      	end
      	tmp_2 = tmp;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := If[LessEqual[x, 800000.0], 1.0, 0.0]
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      \begin{array}{l}
      \mathbf{if}\;x \leq 800000:\\
      \;\;\;\;1\\
      
      \mathbf{else}:\\
      \;\;\;\;0\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 2 regimes
      2. if x < 8e5

        1. Initial program 60.6%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified60.6%

          \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in x around 0 60.2%

          \[\leadsto \frac{\color{blue}{2}}{2} \]

        if 8e5 < x

        1. Initial program 100.0%

          \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
        2. Simplified100.0%

          \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
        3. Add Preprocessing
        4. Taylor expanded in eps around 0 56.1%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
        5. Step-by-step derivation
          1. mul-1-neg56.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
          2. mul-1-neg56.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
          3. rec-exp56.1%

            \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
          4. sub-neg56.1%

            \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
          5. div-sub56.1%

            \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
          6. mul-1-neg56.1%

            \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          7. rec-exp56.1%

            \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
          8. +-inverses56.1%

            \[\leadsto \frac{\color{blue}{0}}{2} \]
        6. Simplified56.1%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      3. Recombined 2 regimes into one program.
      4. Final simplification59.0%

        \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 800000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
      5. Add Preprocessing

      Alternative 17: 16.0% accurate, 227.0× speedup?

      \[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ 0 \end{array} \]
      eps_m = (fabs.f64 eps)
      (FPCore (x eps_m) :precision binary64 0.0)
      eps_m = fabs(eps);
      double code(double x, double eps_m) {
      	return 0.0;
      }
      
      eps_m = abs(eps)
      real(8) function code(x, eps_m)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps_m
          code = 0.0d0
      end function
      
      eps_m = Math.abs(eps);
      public static double code(double x, double eps_m) {
      	return 0.0;
      }
      
      eps_m = math.fabs(eps)
      def code(x, eps_m):
      	return 0.0
      
      eps_m = abs(eps)
      function code(x, eps_m)
      	return 0.0
      end
      
      eps_m = abs(eps);
      function tmp = code(x, eps_m)
      	tmp = 0.0;
      end
      
      eps_m = N[Abs[eps], $MachinePrecision]
      code[x_, eps$95$m_] := 0.0
      
      \begin{array}{l}
      eps_m = \left|\varepsilon\right|
      
      \\
      0
      \end{array}
      
      Derivation
      1. Initial program 72.0%

        \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      2. Simplified60.5%

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
      3. Add Preprocessing
      4. Taylor expanded in eps around 0 17.8%

        \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}}{\varepsilon}}}{2} \]
      5. Step-by-step derivation
        1. mul-1-neg17.8%

          \[\leadsto \frac{\frac{e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}}{\varepsilon}}{2} \]
        2. mul-1-neg17.8%

          \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-e^{\color{blue}{-x}}\right)}{\varepsilon}}{2} \]
        3. rec-exp17.8%

          \[\leadsto \frac{\frac{e^{-1 \cdot x} + \left(-\color{blue}{\frac{1}{e^{x}}}\right)}{\varepsilon}}{2} \]
        4. sub-neg17.8%

          \[\leadsto \frac{\frac{\color{blue}{e^{-1 \cdot x} - \frac{1}{e^{x}}}}{\varepsilon}}{2} \]
        5. div-sub17.8%

          \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]
        6. mul-1-neg17.8%

          \[\leadsto \frac{\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
        7. rec-exp17.8%

          \[\leadsto \frac{\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}{2} \]
        8. +-inverses18.0%

          \[\leadsto \frac{\color{blue}{0}}{2} \]
      6. Simplified18.0%

        \[\leadsto \frac{\color{blue}{0}}{2} \]
      7. Final simplification18.0%

        \[\leadsto 0 \]
      8. Add Preprocessing

      Reproduce

      ?
      herbie shell --seed 2024081 
      (FPCore (x eps)
        :name "NMSE Section 6.1 mentioned, A"
        :precision binary64
        (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))