NMSE Section 6.1 mentioned, A

Percentage Accurate: 73.3% → 100.0%
Time: 18.7s
Alternatives: 12
Speedup: 1.7×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 73.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;eps\_m \leq 0.001:\\ \;\;\;\;\frac{t\_0 + t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) \cdot e^{x \cdot \left(eps\_m + -1\right)} - \left(-1 + \frac{1}{eps\_m}\right) \cdot e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (* (+ x 1.0) (exp (- x)))))
   (if (<= eps_m 0.001)
     (/ (+ t_0 t_0) 2.0)
     (/
      (-
       (* (+ 1.0 (/ 1.0 eps_m)) (exp (* x (+ eps_m -1.0))))
       (* (+ -1.0 (/ 1.0 eps_m)) (exp (* x (- -1.0 eps_m)))))
      2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * exp(-x);
	double tmp;
	if (eps_m <= 0.001) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (((1.0 + (1.0 / eps_m)) * exp((x * (eps_m + -1.0)))) - ((-1.0 + (1.0 / eps_m)) * exp((x * (-1.0 - eps_m))))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (x + 1.0d0) * exp(-x)
    if (eps_m <= 0.001d0) then
        tmp = (t_0 + t_0) / 2.0d0
    else
        tmp = (((1.0d0 + (1.0d0 / eps_m)) * exp((x * (eps_m + (-1.0d0))))) - (((-1.0d0) + (1.0d0 / eps_m)) * exp((x * ((-1.0d0) - eps_m))))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * Math.exp(-x);
	double tmp;
	if (eps_m <= 0.001) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (((1.0 + (1.0 / eps_m)) * Math.exp((x * (eps_m + -1.0)))) - ((-1.0 + (1.0 / eps_m)) * Math.exp((x * (-1.0 - eps_m))))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (x + 1.0) * math.exp(-x)
	tmp = 0
	if eps_m <= 0.001:
		tmp = (t_0 + t_0) / 2.0
	else:
		tmp = (((1.0 + (1.0 / eps_m)) * math.exp((x * (eps_m + -1.0)))) - ((-1.0 + (1.0 / eps_m)) * math.exp((x * (-1.0 - eps_m))))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(x + 1.0) * exp(Float64(-x)))
	tmp = 0.0
	if (eps_m <= 0.001)
		tmp = Float64(Float64(t_0 + t_0) / 2.0);
	else
		tmp = Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) * exp(Float64(x * Float64(eps_m + -1.0)))) - Float64(Float64(-1.0 + Float64(1.0 / eps_m)) * exp(Float64(x * Float64(-1.0 - eps_m))))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (x + 1.0) * exp(-x);
	tmp = 0.0;
	if (eps_m <= 0.001)
		tmp = (t_0 + t_0) / 2.0;
	else
		tmp = (((1.0 + (1.0 / eps_m)) * exp((x * (eps_m + -1.0)))) - ((-1.0 + (1.0 / eps_m)) * exp((x * (-1.0 - eps_m))))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[eps$95$m, 0.001], N[(N[(t$95$0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] - N[(N[(-1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \left(x + 1\right) \cdot e^{-x}\\
\mathbf{if}\;eps\_m \leq 0.001:\\
\;\;\;\;\frac{t\_0 + t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) \cdot e^{x \cdot \left(eps\_m + -1\right)} - \left(-1 + \frac{1}{eps\_m}\right) \cdot e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1e-3

    1. Initial program 63.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg63.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity63.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg63.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity63.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in63.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg63.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval63.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in63.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified63.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in eps around 0 69.5%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    6. Simplified70.1%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]

    if 1e-3 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Add Preprocessing
  3. Recombined 2 regimes into one program.
  4. Final simplification78.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 0.001:\\ \;\;\;\;\frac{\left(x + 1\right) \cdot e^{-x} + \left(x + 1\right) \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} - \left(-1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 85.3% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;x \leq -2 \cdot 10^{-262}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 1.4 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\ \mathbf{elif}\;x \leq 4.5 \cdot 10^{+74}:\\ \;\;\;\;\frac{t\_0 + t\_0}{2}\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+249} \lor \neg \left(x \leq 1.66 \cdot 10^{+289}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (* (+ x 1.0) (exp (- x)))))
   (if (<= x -2e-262)
     (/ (+ 1.0 (exp (* x (- 1.0 eps_m)))) 2.0)
     (if (<= x 1.4e+42)
       (/ (+ 1.0 (exp (* eps_m x))) 2.0)
       (if (<= x 4.5e+74)
         (/ (+ t_0 t_0) 2.0)
         (if (or (<= x 3.5e+249) (not (<= x 1.66e+289)))
           (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)
           (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * exp(-x);
	double tmp;
	if (x <= -2e-262) {
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 1.4e+42) {
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	} else if (x <= 4.5e+74) {
		tmp = (t_0 + t_0) / 2.0;
	} else if ((x <= 3.5e+249) || !(x <= 1.66e+289)) {
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (x + 1.0d0) * exp(-x)
    if (x <= (-2d-262)) then
        tmp = (1.0d0 + exp((x * (1.0d0 - eps_m)))) / 2.0d0
    else if (x <= 1.4d+42) then
        tmp = (1.0d0 + exp((eps_m * x))) / 2.0d0
    else if (x <= 4.5d+74) then
        tmp = (t_0 + t_0) / 2.0d0
    else if ((x <= 3.5d+249) .or. (.not. (x <= 1.66d+289))) then
        tmp = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    else
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * Math.exp(-x);
	double tmp;
	if (x <= -2e-262) {
		tmp = (1.0 + Math.exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 1.4e+42) {
		tmp = (1.0 + Math.exp((eps_m * x))) / 2.0;
	} else if (x <= 4.5e+74) {
		tmp = (t_0 + t_0) / 2.0;
	} else if ((x <= 3.5e+249) || !(x <= 1.66e+289)) {
		tmp = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (x + 1.0) * math.exp(-x)
	tmp = 0
	if x <= -2e-262:
		tmp = (1.0 + math.exp((x * (1.0 - eps_m)))) / 2.0
	elif x <= 1.4e+42:
		tmp = (1.0 + math.exp((eps_m * x))) / 2.0
	elif x <= 4.5e+74:
		tmp = (t_0 + t_0) / 2.0
	elif (x <= 3.5e+249) or not (x <= 1.66e+289):
		tmp = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(x + 1.0) * exp(Float64(-x)))
	tmp = 0.0
	if (x <= -2e-262)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(1.0 - eps_m)))) / 2.0);
	elseif (x <= 1.4e+42)
		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * x))) / 2.0);
	elseif (x <= 4.5e+74)
		tmp = Float64(Float64(t_0 + t_0) / 2.0);
	elseif ((x <= 3.5e+249) || !(x <= 1.66e+289))
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (x + 1.0) * exp(-x);
	tmp = 0.0;
	if (x <= -2e-262)
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	elseif (x <= 1.4e+42)
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	elseif (x <= 4.5e+74)
		tmp = (t_0 + t_0) / 2.0;
	elseif ((x <= 3.5e+249) || ~((x <= 1.66e+289)))
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	else
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -2e-262], N[(N[(1.0 + N[Exp[N[(x * N[(1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.4e+42], N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 4.5e+74], N[(N[(t$95$0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 3.5e+249], N[Not[LessEqual[x, 1.66e+289]], $MachinePrecision]], N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \left(x + 1\right) \cdot e^{-x}\\
\mathbf{if}\;x \leq -2 \cdot 10^{-262}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 1.4 \cdot 10^{+42}:\\
\;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\

\mathbf{elif}\;x \leq 4.5 \cdot 10^{+74}:\\
\;\;\;\;\frac{t\_0 + t\_0}{2}\\

\mathbf{elif}\;x \leq 3.5 \cdot 10^{+249} \lor \neg \left(x \leq 1.66 \cdot 10^{+289}\right):\\
\;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 5 regimes
  2. if x < -2.00000000000000002e-262

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around inf 68.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if -2.00000000000000002e-262 < x < 1.4e42

    1. Initial program 56.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg56.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity56.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg56.3%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity56.3%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified56.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative84.6%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if 1.4e42 < x < 4.5e74

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in eps around 0 89.1%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    6. Simplified89.1%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]

    if 4.5e74 < x < 3.50000000000000012e249 or 1.6600000000000001e289 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if 3.50000000000000012e249 < x < 1.6600000000000001e289

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 15.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 74.2%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 5 regimes into one program.
  4. Final simplification70.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-262}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 1.4 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \mathbf{elif}\;x \leq 4.5 \cdot 10^{+74}:\\ \;\;\;\;\frac{\left(x + 1\right) \cdot e^{-x} + \left(x + 1\right) \cdot e^{-x}}{2}\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+249} \lor \neg \left(x \leq 1.66 \cdot 10^{+289}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 85.3% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := e^{-x}\\ \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+41}:\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\ \mathbf{elif}\;x \leq 3.8 \cdot 10^{+74}:\\ \;\;\;\;\frac{\frac{t\_0 - t\_0}{eps\_m}}{2}\\ \mathbf{elif}\;x \leq 3.6 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (exp (- x))))
   (if (<= x -2.7e-263)
     (/ (+ 1.0 (exp (* x (- 1.0 eps_m)))) 2.0)
     (if (<= x 3.5e+41)
       (/ (+ 1.0 (exp (* eps_m x))) 2.0)
       (if (<= x 3.8e+74)
         (/ (/ (- t_0 t_0) eps_m) 2.0)
         (if (or (<= x 3.6e+249) (not (<= x 1.15e+286)))
           (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)
           (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = exp(-x);
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 3.5e+41) {
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	} else if (x <= 3.8e+74) {
		tmp = ((t_0 - t_0) / eps_m) / 2.0;
	} else if ((x <= 3.6e+249) || !(x <= 1.15e+286)) {
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp(-x)
    if (x <= (-2.7d-263)) then
        tmp = (1.0d0 + exp((x * (1.0d0 - eps_m)))) / 2.0d0
    else if (x <= 3.5d+41) then
        tmp = (1.0d0 + exp((eps_m * x))) / 2.0d0
    else if (x <= 3.8d+74) then
        tmp = ((t_0 - t_0) / eps_m) / 2.0d0
    else if ((x <= 3.6d+249) .or. (.not. (x <= 1.15d+286))) then
        tmp = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    else
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = Math.exp(-x);
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + Math.exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 3.5e+41) {
		tmp = (1.0 + Math.exp((eps_m * x))) / 2.0;
	} else if (x <= 3.8e+74) {
		tmp = ((t_0 - t_0) / eps_m) / 2.0;
	} else if ((x <= 3.6e+249) || !(x <= 1.15e+286)) {
		tmp = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = math.exp(-x)
	tmp = 0
	if x <= -2.7e-263:
		tmp = (1.0 + math.exp((x * (1.0 - eps_m)))) / 2.0
	elif x <= 3.5e+41:
		tmp = (1.0 + math.exp((eps_m * x))) / 2.0
	elif x <= 3.8e+74:
		tmp = ((t_0 - t_0) / eps_m) / 2.0
	elif (x <= 3.6e+249) or not (x <= 1.15e+286):
		tmp = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = exp(Float64(-x))
	tmp = 0.0
	if (x <= -2.7e-263)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(1.0 - eps_m)))) / 2.0);
	elseif (x <= 3.5e+41)
		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * x))) / 2.0);
	elseif (x <= 3.8e+74)
		tmp = Float64(Float64(Float64(t_0 - t_0) / eps_m) / 2.0);
	elseif ((x <= 3.6e+249) || !(x <= 1.15e+286))
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = exp(-x);
	tmp = 0.0;
	if (x <= -2.7e-263)
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	elseif (x <= 3.5e+41)
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	elseif (x <= 3.8e+74)
		tmp = ((t_0 - t_0) / eps_m) / 2.0;
	elseif ((x <= 3.6e+249) || ~((x <= 1.15e+286)))
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	else
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, If[LessEqual[x, -2.7e-263], N[(N[(1.0 + N[Exp[N[(x * N[(1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 3.5e+41], N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 3.8e+74], N[(N[(N[(t$95$0 - t$95$0), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 3.6e+249], N[Not[LessEqual[x, 1.15e+286]], $MachinePrecision]], N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := e^{-x}\\
\mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 3.5 \cdot 10^{+41}:\\
\;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\

\mathbf{elif}\;x \leq 3.8 \cdot 10^{+74}:\\
\;\;\;\;\frac{\frac{t\_0 - t\_0}{eps\_m}}{2}\\

\mathbf{elif}\;x \leq 3.6 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right):\\
\;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 5 regimes
  2. if x < -2.70000000000000003e-263

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around inf 68.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if -2.70000000000000003e-263 < x < 3.4999999999999999e41

    1. Initial program 56.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg56.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity56.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg56.3%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity56.3%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified56.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative84.6%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if 3.4999999999999999e41 < x < 3.7999999999999998e74

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 - \frac{-1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 89.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. inv-pow89.1%

        \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{{\left(e^{x}\right)}^{-1}}}{\varepsilon}}{2} \]
    6. Applied egg-rr89.1%

      \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{{\left(e^{x}\right)}^{-1}}}{\varepsilon}}{2} \]
    7. Step-by-step derivation
      1. unpow-189.1%

        \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{\frac{1}{e^{x}}}}{\varepsilon}}{2} \]
      2. rec-exp89.1%

        \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]
    8. Simplified89.1%

      \[\leadsto \frac{\frac{e^{-1 \cdot x} - \color{blue}{e^{-x}}}{\varepsilon}}{2} \]

    if 3.7999999999999998e74 < x < 3.5999999999999997e249 or 1.1500000000000001e286 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if 3.5999999999999997e249 < x < 1.1500000000000001e286

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 15.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 74.2%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 5 regimes into one program.
  4. Final simplification70.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 3.5 \cdot 10^{+41}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \mathbf{elif}\;x \leq 3.8 \cdot 10^{+74}:\\ \;\;\;\;\frac{\frac{e^{-x} - e^{-x}}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 3.6 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right):\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 85.3% accurate, 1.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 1.2 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\ \mathbf{elif}\;x \leq 3.9 \cdot 10^{+74} \lor \neg \left(x \leq 3.1 \cdot 10^{+249}\right) \land x \leq 7.5 \cdot 10^{+287}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -2.7e-263)
   (/ (+ 1.0 (exp (* x (- 1.0 eps_m)))) 2.0)
   (if (<= x 1.2e+42)
     (/ (+ 1.0 (exp (* eps_m x))) 2.0)
     (if (or (<= x 3.9e+74) (and (not (<= x 3.1e+249)) (<= x 7.5e+287)))
       (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
       (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 1.2e+42) {
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	} else if ((x <= 3.9e+74) || (!(x <= 3.1e+249) && (x <= 7.5e+287))) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else {
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-2.7d-263)) then
        tmp = (1.0d0 + exp((x * (1.0d0 - eps_m)))) / 2.0d0
    else if (x <= 1.2d+42) then
        tmp = (1.0d0 + exp((eps_m * x))) / 2.0d0
    else if ((x <= 3.9d+74) .or. (.not. (x <= 3.1d+249)) .and. (x <= 7.5d+287)) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else
        tmp = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + Math.exp((x * (1.0 - eps_m)))) / 2.0;
	} else if (x <= 1.2e+42) {
		tmp = (1.0 + Math.exp((eps_m * x))) / 2.0;
	} else if ((x <= 3.9e+74) || (!(x <= 3.1e+249) && (x <= 7.5e+287))) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else {
		tmp = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -2.7e-263:
		tmp = (1.0 + math.exp((x * (1.0 - eps_m)))) / 2.0
	elif x <= 1.2e+42:
		tmp = (1.0 + math.exp((eps_m * x))) / 2.0
	elif (x <= 3.9e+74) or (not (x <= 3.1e+249) and (x <= 7.5e+287)):
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	else:
		tmp = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -2.7e-263)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(1.0 - eps_m)))) / 2.0);
	elseif (x <= 1.2e+42)
		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * x))) / 2.0);
	elseif ((x <= 3.9e+74) || (!(x <= 3.1e+249) && (x <= 7.5e+287)))
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	else
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -2.7e-263)
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	elseif (x <= 1.2e+42)
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	elseif ((x <= 3.9e+74) || (~((x <= 3.1e+249)) && (x <= 7.5e+287)))
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	else
		tmp = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -2.7e-263], N[(N[(1.0 + N[Exp[N[(x * N[(1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.2e+42], N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 3.9e+74], And[N[Not[LessEqual[x, 3.1e+249]], $MachinePrecision], LessEqual[x, 7.5e+287]]], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 1.2 \cdot 10^{+42}:\\
\;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\

\mathbf{elif}\;x \leq 3.9 \cdot 10^{+74} \lor \neg \left(x \leq 3.1 \cdot 10^{+249}\right) \land x \leq 7.5 \cdot 10^{+287}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -2.70000000000000003e-263

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around inf 68.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if -2.70000000000000003e-263 < x < 1.1999999999999999e42

    1. Initial program 56.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg56.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity56.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg56.3%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity56.3%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in56.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified56.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative83.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod83.8%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg83.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified83.8%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative84.6%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified84.6%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if 1.1999999999999999e42 < x < 3.90000000000000008e74 or 3.10000000000000015e249 < x < 7.4999999999999994e287

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 14.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 75.8%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 3.90000000000000008e74 < x < 3.10000000000000015e249 or 7.4999999999999994e287 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative40.7%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod40.7%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg40.7%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified40.7%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification69.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 1.2 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \mathbf{elif}\;x \leq 3.9 \cdot 10^{+74} \lor \neg \left(x \leq 3.1 \cdot 10^{+249}\right) \land x \leq 7.5 \cdot 10^{+287}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 78.8% accurate, 1.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 8 \cdot 10^{+41} \lor \neg \left(x \leq 4.2 \cdot 10^{+74}\right) \land \left(x \leq 1.3 \cdot 10^{+249} \lor \neg \left(x \leq 6.5 \cdot 10^{+286}\right)\right):\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -2.7e-263)
   (/ (+ 1.0 (exp (- x))) 2.0)
   (if (or (<= x 8e+41)
           (and (not (<= x 4.2e+74))
                (or (<= x 1.3e+249) (not (<= x 6.5e+286)))))
     (/ (+ 1.0 (exp (* eps_m x))) 2.0)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + exp(-x)) / 2.0;
	} else if ((x <= 8e+41) || (!(x <= 4.2e+74) && ((x <= 1.3e+249) || !(x <= 6.5e+286)))) {
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-2.7d-263)) then
        tmp = (1.0d0 + exp(-x)) / 2.0d0
    else if ((x <= 8d+41) .or. (.not. (x <= 4.2d+74)) .and. (x <= 1.3d+249) .or. (.not. (x <= 6.5d+286))) then
        tmp = (1.0d0 + exp((eps_m * x))) / 2.0d0
    else
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -2.7e-263) {
		tmp = (1.0 + Math.exp(-x)) / 2.0;
	} else if ((x <= 8e+41) || (!(x <= 4.2e+74) && ((x <= 1.3e+249) || !(x <= 6.5e+286)))) {
		tmp = (1.0 + Math.exp((eps_m * x))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -2.7e-263:
		tmp = (1.0 + math.exp(-x)) / 2.0
	elif (x <= 8e+41) or (not (x <= 4.2e+74) and ((x <= 1.3e+249) or not (x <= 6.5e+286))):
		tmp = (1.0 + math.exp((eps_m * x))) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -2.7e-263)
		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
	elseif ((x <= 8e+41) || (!(x <= 4.2e+74) && ((x <= 1.3e+249) || !(x <= 6.5e+286))))
		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * x))) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -2.7e-263)
		tmp = (1.0 + exp(-x)) / 2.0;
	elseif ((x <= 8e+41) || (~((x <= 4.2e+74)) && ((x <= 1.3e+249) || ~((x <= 6.5e+286)))))
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	else
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -2.7e-263], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 8e+41], And[N[Not[LessEqual[x, 4.2e+74]], $MachinePrecision], Or[LessEqual[x, 1.3e+249], N[Not[LessEqual[x, 6.5e+286]], $MachinePrecision]]]], N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\
\;\;\;\;\frac{1 + e^{-x}}{2}\\

\mathbf{elif}\;x \leq 8 \cdot 10^{+41} \lor \neg \left(x \leq 4.2 \cdot 10^{+74}\right) \land \left(x \leq 1.3 \cdot 10^{+249} \lor \neg \left(x \leq 6.5 \cdot 10^{+286}\right)\right):\\
\;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.70000000000000003e-263

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 71.4%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod71.4%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative71.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg71.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg71.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative71.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod71.4%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*71.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg71.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg71.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg71.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified71.4%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around 0 75.2%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. neg-mul-175.2%

        \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
    11. Simplified75.2%

      \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

    if -2.70000000000000003e-263 < x < 8.00000000000000005e41 or 4.1999999999999998e74 < x < 1.3000000000000001e249 or 6.5000000000000003e286 < x

    1. Initial program 70.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg70.9%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity70.9%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg70.9%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity70.9%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified70.9%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 69.4%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod69.4%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod69.4%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*69.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg69.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg69.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg69.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified69.4%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 69.9%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative69.9%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified69.9%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if 8.00000000000000005e41 < x < 4.1999999999999998e74 or 1.3000000000000001e249 < x < 6.5000000000000003e286

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 14.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 75.8%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification72.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.7 \cdot 10^{-263}:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 8 \cdot 10^{+41} \lor \neg \left(x \leq 4.2 \cdot 10^{+74}\right) \land \left(x \leq 1.3 \cdot 10^{+249} \lor \neg \left(x \leq 6.5 \cdot 10^{+286}\right)\right):\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 85.1% accurate, 1.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 7.2 \cdot 10^{+41} \lor \neg \left(x \leq 3.9 \cdot 10^{+74}\right) \land \left(x \leq 1.9 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right)\right):\\ \;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -2e-256)
   (/ (+ 1.0 (exp (* x (- 1.0 eps_m)))) 2.0)
   (if (or (<= x 7.2e+41)
           (and (not (<= x 3.9e+74))
                (or (<= x 1.9e+249) (not (<= x 1.15e+286)))))
     (/ (+ 1.0 (exp (* eps_m x))) 2.0)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -2e-256) {
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	} else if ((x <= 7.2e+41) || (!(x <= 3.9e+74) && ((x <= 1.9e+249) || !(x <= 1.15e+286)))) {
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-2d-256)) then
        tmp = (1.0d0 + exp((x * (1.0d0 - eps_m)))) / 2.0d0
    else if ((x <= 7.2d+41) .or. (.not. (x <= 3.9d+74)) .and. (x <= 1.9d+249) .or. (.not. (x <= 1.15d+286))) then
        tmp = (1.0d0 + exp((eps_m * x))) / 2.0d0
    else
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -2e-256) {
		tmp = (1.0 + Math.exp((x * (1.0 - eps_m)))) / 2.0;
	} else if ((x <= 7.2e+41) || (!(x <= 3.9e+74) && ((x <= 1.9e+249) || !(x <= 1.15e+286)))) {
		tmp = (1.0 + Math.exp((eps_m * x))) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -2e-256:
		tmp = (1.0 + math.exp((x * (1.0 - eps_m)))) / 2.0
	elif (x <= 7.2e+41) or (not (x <= 3.9e+74) and ((x <= 1.9e+249) or not (x <= 1.15e+286))):
		tmp = (1.0 + math.exp((eps_m * x))) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -2e-256)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(1.0 - eps_m)))) / 2.0);
	elseif ((x <= 7.2e+41) || (!(x <= 3.9e+74) && ((x <= 1.9e+249) || !(x <= 1.15e+286))))
		tmp = Float64(Float64(1.0 + exp(Float64(eps_m * x))) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -2e-256)
		tmp = (1.0 + exp((x * (1.0 - eps_m)))) / 2.0;
	elseif ((x <= 7.2e+41) || (~((x <= 3.9e+74)) && ((x <= 1.9e+249) || ~((x <= 1.15e+286)))))
		tmp = (1.0 + exp((eps_m * x))) / 2.0;
	else
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -2e-256], N[(N[(1.0 + N[Exp[N[(x * N[(1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 7.2e+41], And[N[Not[LessEqual[x, 3.9e+74]], $MachinePrecision], Or[LessEqual[x, 1.9e+249], N[Not[LessEqual[x, 1.15e+286]], $MachinePrecision]]]], N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 7.2 \cdot 10^{+41} \lor \neg \left(x \leq 3.9 \cdot 10^{+74}\right) \land \left(x \leq 1.9 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right)\right):\\
\;\;\;\;\frac{1 + e^{eps\_m \cdot x}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1.99999999999999995e-256

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around inf 68.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(1 - \varepsilon\right)}}}{2} \]

    if -1.99999999999999995e-256 < x < 7.20000000000000051e41 or 3.90000000000000008e74 < x < 1.8999999999999999e249 or 1.1500000000000001e286 < x

    1. Initial program 70.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg70.9%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity70.9%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg70.9%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity70.9%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in70.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified70.9%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 40.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 69.4%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod69.4%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative69.4%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod69.4%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*69.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg69.4%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg69.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg69.4%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified69.4%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 69.9%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative69.9%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified69.9%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if 7.20000000000000051e41 < x < 3.90000000000000008e74 or 1.8999999999999999e249 < x < 1.1500000000000001e286

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 14.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 75.8%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification69.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-256}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 7.2 \cdot 10^{+41} \lor \neg \left(x \leq 3.9 \cdot 10^{+74}\right) \land \left(x \leq 1.9 \cdot 10^{+249} \lor \neg \left(x \leq 1.15 \cdot 10^{+286}\right)\right):\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 64.2% accurate, 1.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{elif}\;x \leq 1.65 \cdot 10^{+42} \lor \neg \left(x \leq 10^{+90} \lor \neg \left(x \leq 1.65 \cdot 10^{+249}\right) \land x \leq 6.7 \cdot 10^{+286}\right):\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 8.5e-6)
   (/ (- 2.0 (* eps_m x)) 2.0)
   (if (or (<= x 1.65e+42)
           (not
            (or (<= x 1e+90) (and (not (<= x 1.65e+249)) (<= x 6.7e+286)))))
     (/ (/ (expm1 x) eps_m) 2.0)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 8.5e-6) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else if ((x <= 1.65e+42) || !((x <= 1e+90) || (!(x <= 1.65e+249) && (x <= 6.7e+286)))) {
		tmp = (expm1(x) / eps_m) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 8.5e-6) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else if ((x <= 1.65e+42) || !((x <= 1e+90) || (!(x <= 1.65e+249) && (x <= 6.7e+286)))) {
		tmp = (Math.expm1(x) / eps_m) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 8.5e-6:
		tmp = (2.0 - (eps_m * x)) / 2.0
	elif (x <= 1.65e+42) or not ((x <= 1e+90) or (not (x <= 1.65e+249) and (x <= 6.7e+286))):
		tmp = (math.expm1(x) / eps_m) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 8.5e-6)
		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
	elseif ((x <= 1.65e+42) || !((x <= 1e+90) || (!(x <= 1.65e+249) && (x <= 6.7e+286))))
		tmp = Float64(Float64(expm1(x) / eps_m) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 8.5e-6], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 1.65e+42], N[Not[Or[LessEqual[x, 1e+90], And[N[Not[LessEqual[x, 1.65e+249]], $MachinePrecision], LessEqual[x, 6.7e+286]]]], $MachinePrecision]], N[(N[(N[(Exp[x] - 1), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\
\;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\

\mathbf{elif}\;x \leq 1.65 \cdot 10^{+42} \lor \neg \left(x \leq 10^{+90} \lor \neg \left(x \leq 1.65 \cdot 10^{+249}\right) \land x \leq 6.7 \cdot 10^{+286}\right):\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 8.4999999999999999e-6

    1. Initial program 60.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg60.1%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity60.1%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg60.1%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity60.1%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified60.1%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 42.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt26.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod37.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg37.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod14.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr38.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified38.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in x around 0 47.8%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
    11. Step-by-step derivation
      1. *-commutative47.8%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    12. Simplified47.8%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    13. Taylor expanded in eps around inf 64.7%

      \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    14. Step-by-step derivation
      1. associate-*r*64.7%

        \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-164.7%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    15. Simplified64.7%

      \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

    if 8.4999999999999999e-6 < x < 1.6499999999999999e42 or 9.99999999999999966e89 < x < 1.65000000000000007e249 or 6.70000000000000037e286 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr30.2%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative30.2%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified30.2%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around 0 40.5%

      \[\leadsto \frac{\color{blue}{\frac{e^{x} - 1}{\varepsilon}}}{2} \]
    11. Step-by-step derivation
      1. expm1-def40.5%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(x\right)}}{\varepsilon}}{2} \]
    12. Simplified40.5%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}}{2} \]

    if 1.6499999999999999e42 < x < 9.99999999999999966e89 or 1.65000000000000007e249 < x < 6.70000000000000037e286

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 12.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 72.3%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification59.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 1.65 \cdot 10^{+42} \lor \neg \left(x \leq 10^{+90} \lor \neg \left(x \leq 1.65 \cdot 10^{+249}\right) \land x \leq 6.7 \cdot 10^{+286}\right):\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 71.2% accurate, 1.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 7100:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 1.02 \cdot 10^{+42} \lor \neg \left(x \leq 5.2 \cdot 10^{+89}\right) \land \left(x \leq 4.2 \cdot 10^{+249} \lor \neg \left(x \leq 2.8 \cdot 10^{+285}\right)\right):\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 7100.0)
   (/ (+ 1.0 (exp (- x))) 2.0)
   (if (or (<= x 1.02e+42)
           (and (not (<= x 5.2e+89))
                (or (<= x 4.2e+249) (not (<= x 2.8e+285)))))
     (/ (/ (expm1 x) eps_m) 2.0)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 7100.0) {
		tmp = (1.0 + exp(-x)) / 2.0;
	} else if ((x <= 1.02e+42) || (!(x <= 5.2e+89) && ((x <= 4.2e+249) || !(x <= 2.8e+285)))) {
		tmp = (expm1(x) / eps_m) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 7100.0) {
		tmp = (1.0 + Math.exp(-x)) / 2.0;
	} else if ((x <= 1.02e+42) || (!(x <= 5.2e+89) && ((x <= 4.2e+249) || !(x <= 2.8e+285)))) {
		tmp = (Math.expm1(x) / eps_m) / 2.0;
	} else {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 7100.0:
		tmp = (1.0 + math.exp(-x)) / 2.0
	elif (x <= 1.02e+42) or (not (x <= 5.2e+89) and ((x <= 4.2e+249) or not (x <= 2.8e+285))):
		tmp = (math.expm1(x) / eps_m) / 2.0
	else:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 7100.0)
		tmp = Float64(Float64(1.0 + exp(Float64(-x))) / 2.0);
	elseif ((x <= 1.02e+42) || (!(x <= 5.2e+89) && ((x <= 4.2e+249) || !(x <= 2.8e+285))))
		tmp = Float64(Float64(expm1(x) / eps_m) / 2.0);
	else
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	end
	return tmp
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 7100.0], N[(N[(1.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 1.02e+42], And[N[Not[LessEqual[x, 5.2e+89]], $MachinePrecision], Or[LessEqual[x, 4.2e+249], N[Not[LessEqual[x, 2.8e+285]], $MachinePrecision]]]], N[(N[(N[(Exp[x] - 1), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 7100:\\
\;\;\;\;\frac{1 + e^{-x}}{2}\\

\mathbf{elif}\;x \leq 1.02 \cdot 10^{+42} \lor \neg \left(x \leq 5.2 \cdot 10^{+89}\right) \land \left(x \leq 4.2 \cdot 10^{+249} \lor \neg \left(x \leq 2.8 \cdot 10^{+285}\right)\right):\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 7100

    1. Initial program 61.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg61.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity61.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg61.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity61.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in61.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg61.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval61.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in61.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified61.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 43.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 80.0%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod80.0%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative80.0%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg80.0%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg80.0%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative80.0%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod80.0%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*80.0%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg80.0%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg80.0%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg80.0%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified80.0%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around 0 74.6%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. neg-mul-174.6%

        \[\leadsto \frac{1 + e^{\color{blue}{-x}}}{2} \]
    11. Simplified74.6%

      \[\leadsto \frac{\color{blue}{1 + e^{-x}}}{2} \]

    if 7100 < x < 1.01999999999999996e42 or 5.2000000000000001e89 < x < 4.1999999999999997e249 or 2.80000000000000016e285 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 44.5%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp44.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity44.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod44.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval44.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp44.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr32.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative32.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified32.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in eps around 0 43.3%

      \[\leadsto \frac{\color{blue}{\frac{e^{x} - 1}{\varepsilon}}}{2} \]
    11. Step-by-step derivation
      1. expm1-def43.3%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(x\right)}}{\varepsilon}}{2} \]
    12. Simplified43.3%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}}{2} \]

    if 1.01999999999999996e42 < x < 5.2000000000000001e89 or 4.1999999999999997e249 < x < 2.80000000000000016e285

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 12.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 72.3%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification67.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 7100:\\ \;\;\;\;\frac{1 + e^{-x}}{2}\\ \mathbf{elif}\;x \leq 1.02 \cdot 10^{+42} \lor \neg \left(x \leq 5.2 \cdot 10^{+89}\right) \land \left(x \leq 4.2 \cdot 10^{+249} \lor \neg \left(x \leq 2.8 \cdot 10^{+285}\right)\right):\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 60.5% accurate, 8.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{elif}\;x \leq 8.8 \cdot 10^{+74} \lor \neg \left(x \leq 2.6 \cdot 10^{+249}\right):\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + \left(1 + eps\_m \cdot x\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 8.5e-6)
   (/ (- 2.0 (* eps_m x)) 2.0)
   (if (or (<= x 8.8e+74) (not (<= x 2.6e+249)))
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
     (/ (+ 1.0 (+ 1.0 (* eps_m x))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 8.5e-6) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else if ((x <= 8.8e+74) || !(x <= 2.6e+249)) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else {
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 8.5d-6) then
        tmp = (2.0d0 - (eps_m * x)) / 2.0d0
    else if ((x <= 8.8d+74) .or. (.not. (x <= 2.6d+249))) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else
        tmp = (1.0d0 + (1.0d0 + (eps_m * x))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 8.5e-6) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else if ((x <= 8.8e+74) || !(x <= 2.6e+249)) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else {
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 8.5e-6:
		tmp = (2.0 - (eps_m * x)) / 2.0
	elif (x <= 8.8e+74) or not (x <= 2.6e+249):
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	else:
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 8.5e-6)
		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
	elseif ((x <= 8.8e+74) || !(x <= 2.6e+249))
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	else
		tmp = Float64(Float64(1.0 + Float64(1.0 + Float64(eps_m * x))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 8.5e-6)
		tmp = (2.0 - (eps_m * x)) / 2.0;
	elseif ((x <= 8.8e+74) || ~((x <= 2.6e+249)))
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	else
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 8.5e-6], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[x, 8.8e+74], N[Not[LessEqual[x, 2.6e+249]], $MachinePrecision]], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(1.0 + N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\
\;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\

\mathbf{elif}\;x \leq 8.8 \cdot 10^{+74} \lor \neg \left(x \leq 2.6 \cdot 10^{+249}\right):\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + \left(1 + eps\_m \cdot x\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 8.4999999999999999e-6

    1. Initial program 60.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg60.1%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity60.1%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg60.1%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity60.1%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in60.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified60.1%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 42.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp42.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt26.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod37.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg37.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod14.3%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr38.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative38.9%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified38.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in x around 0 47.8%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
    11. Step-by-step derivation
      1. *-commutative47.8%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    12. Simplified47.8%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    13. Taylor expanded in eps around inf 64.7%

      \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    14. Step-by-step derivation
      1. associate-*r*64.7%

        \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-164.7%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    15. Simplified64.7%

      \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

    if 8.4999999999999999e-6 < x < 8.8000000000000005e74 or 2.60000000000000019e249 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 33.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in x around 0 50.5%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 8.8000000000000005e74 < x < 2.60000000000000019e249

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity100.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg100.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity100.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 38.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 38.6%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod38.6%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative38.6%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg38.6%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg38.6%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative38.6%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod38.6%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*38.6%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg38.6%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg38.6%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg38.6%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified38.6%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 38.5%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative38.5%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified38.5%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    12. Taylor expanded in x around 0 21.7%

      \[\leadsto \frac{1 + \color{blue}{\left(1 + \varepsilon \cdot x\right)}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification55.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 8.5 \cdot 10^{-6}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 8.8 \cdot 10^{+74} \lor \neg \left(x \leq 2.6 \cdot 10^{+249}\right):\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + \left(1 + \varepsilon \cdot x\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 58.3% accurate, 16.2× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -4 \cdot 10^{-263}:\\ \;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + \left(1 + eps\_m \cdot x\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -4e-263)
   (/ (- 2.0 (* eps_m x)) 2.0)
   (/ (+ 1.0 (+ 1.0 (* eps_m x))) 2.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -4e-263) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else {
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-4d-263)) then
        tmp = (2.0d0 - (eps_m * x)) / 2.0d0
    else
        tmp = (1.0d0 + (1.0d0 + (eps_m * x))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -4e-263) {
		tmp = (2.0 - (eps_m * x)) / 2.0;
	} else {
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -4e-263:
		tmp = (2.0 - (eps_m * x)) / 2.0
	else:
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -4e-263)
		tmp = Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0);
	else
		tmp = Float64(Float64(1.0 + Float64(1.0 + Float64(eps_m * x))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -4e-263)
		tmp = (2.0 - (eps_m * x)) / 2.0;
	else
		tmp = (1.0 + (1.0 + (eps_m * x))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -4e-263], N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[(1.0 + N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -4 \cdot 10^{-263}:\\
\;\;\;\;\frac{2 - eps\_m \cdot x}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + \left(1 + eps\_m \cdot x\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -4e-263

    1. Initial program 71.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg71.4%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity71.4%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg71.5%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity71.5%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in71.5%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified71.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 46.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-un-lft-identity46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      3. log-prod46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      4. metadata-eval46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      5. add-log-exp46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      6. add-sqr-sqrt46.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      7. sqrt-unprod44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      8. sqr-neg44.1%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      9. sqrt-prod0.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      10. add-sqr-sqrt41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. Applied egg-rr41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. Step-by-step derivation
      1. +-lft-identity41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
      2. *-commutative41.4%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. Simplified41.4%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. Taylor expanded in x around 0 39.6%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
    11. Step-by-step derivation
      1. *-commutative39.6%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    12. Simplified39.6%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
    13. Taylor expanded in eps around inf 51.6%

      \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    14. Step-by-step derivation
      1. associate-*r*51.6%

        \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-151.6%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    15. Simplified51.6%

      \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

    if -4e-263 < x

    1. Initial program 75.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Step-by-step derivation
      1. fma-neg75.0%

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
      2. /-rgt-identity75.0%

        \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
      3. fma-neg75.0%

        \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
      4. /-rgt-identity75.0%

        \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      5. distribute-rgt-neg-in75.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      6. sub-neg75.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      7. metadata-eval75.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
      8. distribute-rgt-neg-in75.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Simplified75.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    4. Add Preprocessing
    5. Taylor expanded in x around 0 37.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    6. Taylor expanded in eps around inf 61.8%

      \[\leadsto \frac{\color{blue}{1 + e^{-1 \cdot \left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
    7. Step-by-step derivation
      1. exp-prod61.8%

        \[\leadsto \frac{1 + \color{blue}{{\left(e^{-1}\right)}^{\left(x \cdot \left(1 - \varepsilon\right)\right)}}}{2} \]
      2. *-commutative61.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right)}}}{2} \]
      3. sub-neg61.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \left(-\varepsilon\right)\right)} \cdot x\right)}}{2} \]
      4. mul-1-neg61.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\left(\left(1 + \color{blue}{-1 \cdot \varepsilon}\right) \cdot x\right)}}{2} \]
      5. *-commutative61.8%

        \[\leadsto \frac{1 + {\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      6. exp-prod61.8%

        \[\leadsto \frac{1 + \color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}}{2} \]
      7. associate-*r*61.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + -1 \cdot \varepsilon\right)}}}{2} \]
      8. mul-1-neg61.8%

        \[\leadsto \frac{1 + e^{\color{blue}{\left(-x\right)} \cdot \left(1 + -1 \cdot \varepsilon\right)}}{2} \]
      9. mul-1-neg61.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)}}{2} \]
      10. sub-neg61.8%

        \[\leadsto \frac{1 + e^{\left(-x\right) \cdot \color{blue}{\left(1 - \varepsilon\right)}}}{2} \]
    8. Simplified61.8%

      \[\leadsto \frac{\color{blue}{1 + e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)}}}{2} \]
    9. Taylor expanded in eps around inf 62.2%

      \[\leadsto \frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{2} \]
    10. Step-by-step derivation
      1. *-commutative62.2%

        \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    11. Simplified62.2%

      \[\leadsto \frac{1 + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]
    12. Taylor expanded in x around 0 46.8%

      \[\leadsto \frac{1 + \color{blue}{\left(1 + \varepsilon \cdot x\right)}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification48.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -4 \cdot 10^{-263}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + \left(1 + \varepsilon \cdot x\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 11: 50.2% accurate, 32.4× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \frac{2 - eps\_m \cdot x}{2} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (/ (- 2.0 (* eps_m x)) 2.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return (2.0 - (eps_m * x)) / 2.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = (2.0d0 - (eps_m * x)) / 2.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return (2.0 - (eps_m * x)) / 2.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return (2.0 - (eps_m * x)) / 2.0
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(Float64(2.0 - Float64(eps_m * x)) / 2.0)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = (2.0 - (eps_m * x)) / 2.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := N[(N[(2.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\frac{2 - eps\_m \cdot x}{2}
\end{array}
Derivation
  1. Initial program 73.8%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Step-by-step derivation
    1. fma-neg73.8%

      \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
    2. /-rgt-identity73.8%

      \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
    3. fma-neg73.8%

      \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
    4. /-rgt-identity73.8%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    5. distribute-rgt-neg-in73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    6. sub-neg73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    7. metadata-eval73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    8. distribute-rgt-neg-in73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
  3. Simplified73.8%

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
  4. Add Preprocessing
  5. Taylor expanded in x around 0 40.1%

    \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
  6. Step-by-step derivation
    1. add-log-exp40.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    2. *-un-lft-identity40.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\log \color{blue}{\left(1 \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    3. log-prod40.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\log 1 + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    4. metadata-eval40.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0} + \log \left(e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    5. add-log-exp40.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    6. add-sqr-sqrt17.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    7. sqrt-unprod33.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    8. sqr-neg33.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \sqrt{\color{blue}{x \cdot x}}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    9. sqrt-prod18.2%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    10. add-sqr-sqrt34.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{0 + \left(1 - \varepsilon\right) \cdot \color{blue}{x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  7. Applied egg-rr34.3%

    \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{0 + \left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  8. Step-by-step derivation
    1. +-lft-identity34.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
    2. *-commutative34.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  9. Simplified34.3%

    \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]
  10. Taylor expanded in x around 0 36.9%

    \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)}}{2} \]
  11. Step-by-step derivation
    1. *-commutative36.9%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
  12. Simplified36.9%

    \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}}{2} \]
  13. Taylor expanded in eps around inf 48.1%

    \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
  14. Step-by-step derivation
    1. associate-*r*48.1%

      \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
    2. neg-mul-148.1%

      \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
  15. Simplified48.1%

    \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
  16. Final simplification48.1%

    \[\leadsto \frac{2 - \varepsilon \cdot x}{2} \]
  17. Add Preprocessing

Alternative 12: 43.8% accurate, 227.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ 1 \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 1.0)
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return 1.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = 1.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return 1.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return 1.0
eps_m = abs(eps)
function code(x, eps_m)
	return 1.0
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = 1.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := 1.0
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
1
\end{array}
Derivation
  1. Initial program 73.8%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Step-by-step derivation
    1. fma-neg73.8%

      \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}}{2} \]
    2. /-rgt-identity73.8%

      \[\leadsto \frac{\mathsf{fma}\left(\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1}}, e^{-\left(1 - \varepsilon\right) \cdot x}, -\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}\right)}{2} \]
    3. fma-neg73.8%

      \[\leadsto \frac{\color{blue}{\frac{1 + \frac{1}{\varepsilon}}{1} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}}{2} \]
    4. /-rgt-identity73.8%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    5. distribute-rgt-neg-in73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    6. sub-neg73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    7. metadata-eval73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    8. distribute-rgt-neg-in73.8%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
  3. Simplified73.8%

    \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
  4. Add Preprocessing
  5. Taylor expanded in x around 0 40.5%

    \[\leadsto \frac{\color{blue}{2}}{2} \]
  6. Final simplification40.5%

    \[\leadsto 1 \]
  7. Add Preprocessing

Reproduce

?
herbie shell --seed 2024040 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))