NMSE Section 6.1 mentioned, A

Percentage Accurate: 72.4% → 99.7%
Time: 17.5s
Alternatives: 21
Speedup: 1.8×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 21 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 72.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;eps\_m \leq 10^{-49}:\\ \;\;\;\;\frac{t\_0 + t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{eps\_m \cdot \left(-x\right)} + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (* (+ x 1.0) (exp (- x)))))
   (if (<= eps_m 1e-49)
     (/ (+ t_0 t_0) 2.0)
     (/ (+ (exp (* eps_m (- x))) (exp (* x (+ eps_m -1.0)))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * exp(-x);
	double tmp;
	if (eps_m <= 1e-49) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (exp((eps_m * -x)) + exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (x + 1.0d0) * exp(-x)
    if (eps_m <= 1d-49) then
        tmp = (t_0 + t_0) / 2.0d0
    else
        tmp = (exp((eps_m * -x)) + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (x + 1.0) * Math.exp(-x);
	double tmp;
	if (eps_m <= 1e-49) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (Math.exp((eps_m * -x)) + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (x + 1.0) * math.exp(-x)
	tmp = 0
	if eps_m <= 1e-49:
		tmp = (t_0 + t_0) / 2.0
	else:
		tmp = (math.exp((eps_m * -x)) + math.exp((x * (eps_m + -1.0)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(x + 1.0) * exp(Float64(-x)))
	tmp = 0.0
	if (eps_m <= 1e-49)
		tmp = Float64(Float64(t_0 + t_0) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(eps_m * Float64(-x))) + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (x + 1.0) * exp(-x);
	tmp = 0.0;
	if (eps_m <= 1e-49)
		tmp = (t_0 + t_0) / 2.0;
	else
		tmp = (exp((eps_m * -x)) + exp((x * (eps_m + -1.0)))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[eps$95$m, 1e-49], N[(N[(t$95$0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \left(x + 1\right) \cdot e^{-x}\\
\mathbf{if}\;eps\_m \leq 10^{-49}:\\
\;\;\;\;\frac{t\_0 + t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{eps\_m \cdot \left(-x\right)} + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 9.99999999999999936e-50

    1. Initial program 65.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified65.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 66.5%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified67.6%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]

    if 9.99999999999999936e-50 < eps

    1. Initial program 97.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified75.4%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    9. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\varepsilon \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    10. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification77.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 10^{-49}:\\ \;\;\;\;\frac{\left(x + 1\right) \cdot e^{-x} + \left(x + 1\right) \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot \left(-x\right)} + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.3% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 2.15 \cdot 10^{-51}:\\ \;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{eps\_m \cdot \left(-x\right)} + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 2.15e-51)
   (/ (* 2.0 (exp (- (log1p x) x))) 2.0)
   (/ (+ (exp (* eps_m (- x))) (exp (* x (+ eps_m -1.0)))) 2.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.15e-51) {
		tmp = (2.0 * exp((log1p(x) - x))) / 2.0;
	} else {
		tmp = (exp((eps_m * -x)) + exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.15e-51) {
		tmp = (2.0 * Math.exp((Math.log1p(x) - x))) / 2.0;
	} else {
		tmp = (Math.exp((eps_m * -x)) + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 2.15e-51:
		tmp = (2.0 * math.exp((math.log1p(x) - x))) / 2.0
	else:
		tmp = (math.exp((eps_m * -x)) + math.exp((x * (eps_m + -1.0)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 2.15e-51)
		tmp = Float64(Float64(2.0 * exp(Float64(log1p(x) - x))) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(eps_m * Float64(-x))) + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0);
	end
	return tmp
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 2.15e-51], N[(N[(2.0 * N[Exp[N[(N[Log[1 + x], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(eps$95$m * (-x)), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 2.15 \cdot 10^{-51}:\\
\;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{eps\_m \cdot \left(-x\right)} + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 2.1499999999999999e-51

    1. Initial program 65.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified65.7%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 66.3%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified67.5%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
    6. Step-by-step derivation
      1. cancel-sign-sub-inv67.5%

        \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
      2. add-exp-log65.8%

        \[\leadsto \frac{\color{blue}{e^{\log \left(\left(x + 1\right) \cdot e^{-x}\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      3. *-commutative65.8%

        \[\leadsto \frac{e^{\log \color{blue}{\left(e^{-x} \cdot \left(x + 1\right)\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      4. log-prod65.8%

        \[\leadsto \frac{e^{\color{blue}{\log \left(e^{-x}\right) + \log \left(x + 1\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      5. add-log-exp65.8%

        \[\leadsto \frac{e^{\color{blue}{\left(-x\right)} + \log \left(x + 1\right)} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      6. +-commutative65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \log \color{blue}{\left(1 + x\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      7. log1p-define65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \color{blue}{\mathsf{log1p}\left(x\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      8. metadata-eval65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + \color{blue}{1} \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      9. add-exp-log65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + \color{blue}{e^{\log \left(1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}}}{2} \]
      10. *-un-lft-identity65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\log \color{blue}{\left(\left(x + 1\right) \cdot e^{-x}\right)}}}{2} \]
      11. *-commutative65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\log \color{blue}{\left(e^{-x} \cdot \left(x + 1\right)\right)}}}{2} \]
      12. log-prod65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\color{blue}{\log \left(e^{-x}\right) + \log \left(x + 1\right)}}}{2} \]
      13. add-log-exp65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\color{blue}{\left(-x\right)} + \log \left(x + 1\right)}}{2} \]
      14. +-commutative65.8%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\left(-x\right) + \log \color{blue}{\left(1 + x\right)}}}{2} \]
    7. Applied egg-rr65.8%

      \[\leadsto \frac{\color{blue}{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\left(-x\right) + \mathsf{log1p}\left(x\right)}}}{2} \]
    8. Step-by-step derivation
      1. count-265.8%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{\left(-x\right) + \mathsf{log1p}\left(x\right)}}}{2} \]
      2. +-commutative65.8%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{\mathsf{log1p}\left(x\right) + \left(-x\right)}}}{2} \]
      3. unsub-neg65.8%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{\mathsf{log1p}\left(x\right) - x}}}{2} \]
    9. Simplified65.8%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}}{2} \]

    if 2.1499999999999999e-51 < eps

    1. Initial program 96.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified74.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    9. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\varepsilon \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    10. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification76.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 2.15 \cdot 10^{-51}:\\ \;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot \left(-x\right)} + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 98.8% accurate, 1.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{x \cdot \left(-1 - eps\_m\right)}}{2} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (/ (+ (exp (* x (+ eps_m -1.0))) (exp (* x (- -1.0 eps_m)))) 2.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return (exp((x * (eps_m + -1.0))) + exp((x * (-1.0 - eps_m)))) / 2.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = (exp((x * (eps_m + (-1.0d0)))) + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return (Math.exp((x * (eps_m + -1.0))) + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return (math.exp((x * (eps_m + -1.0))) + math.exp((x * (-1.0 - eps_m)))) / 2.0
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = (exp((x * (eps_m + -1.0))) + exp((x * (-1.0 - eps_m)))) / 2.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\frac{e^{x \cdot \left(eps\_m + -1\right)} + e^{x \cdot \left(-1 - eps\_m\right)}}{2}
\end{array}
Derivation
  1. Initial program 75.0%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified59.6%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around inf 98.3%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
  5. Taylor expanded in eps around 0 98.3%

    \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
  6. Step-by-step derivation
    1. associate-*r*98.3%

      \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    2. neg-mul-198.3%

      \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    3. distribute-rgt-in98.3%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    4. unsub-neg98.3%

      \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
  7. Simplified98.3%

    \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
  8. Final simplification98.3%

    \[\leadsto \frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]
  9. Add Preprocessing

Alternative 4: 84.5% accurate, 1.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 2.7 \cdot 10^{-7}:\\ \;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\ \mathbf{elif}\;eps\_m \leq 3.3 \cdot 10^{+124}:\\ \;\;\;\;\frac{\frac{1}{\frac{-1}{eps\_m}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(eps\_m + -1\right)}}{eps\_m}\right)}{2}\\ \mathbf{elif}\;eps\_m \leq 1.7 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 2.7e-7)
   (/ (* 2.0 (exp (- (log1p x) x))) 2.0)
   (if (<= eps_m 3.3e+124)
     (/
      (*
       (/ 1.0 (/ -1.0 eps_m))
       (+ x (/ (- -1.0 (exp (* x (+ eps_m -1.0)))) eps_m)))
      2.0)
     (if (<= eps_m 1.7e+154)
       (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
       (/ (* eps_m (- (/ (+ 1.0 (exp (* eps_m x))) eps_m) x)) 2.0)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.7e-7) {
		tmp = (2.0 * exp((log1p(x) - x))) / 2.0;
	} else if (eps_m <= 3.3e+124) {
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0;
	} else if (eps_m <= 1.7e+154) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else {
		tmp = (eps_m * (((1.0 + exp((eps_m * x))) / eps_m) - x)) / 2.0;
	}
	return tmp;
}
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.7e-7) {
		tmp = (2.0 * Math.exp((Math.log1p(x) - x))) / 2.0;
	} else if (eps_m <= 3.3e+124) {
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - Math.exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0;
	} else if (eps_m <= 1.7e+154) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else {
		tmp = (eps_m * (((1.0 + Math.exp((eps_m * x))) / eps_m) - x)) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 2.7e-7:
		tmp = (2.0 * math.exp((math.log1p(x) - x))) / 2.0
	elif eps_m <= 3.3e+124:
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - math.exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0
	elif eps_m <= 1.7e+154:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	else:
		tmp = (eps_m * (((1.0 + math.exp((eps_m * x))) / eps_m) - x)) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 2.7e-7)
		tmp = Float64(Float64(2.0 * exp(Float64(log1p(x) - x))) / 2.0);
	elseif (eps_m <= 3.3e+124)
		tmp = Float64(Float64(Float64(1.0 / Float64(-1.0 / eps_m)) * Float64(x + Float64(Float64(-1.0 - exp(Float64(x * Float64(eps_m + -1.0)))) / eps_m))) / 2.0);
	elseif (eps_m <= 1.7e+154)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	else
		tmp = Float64(Float64(eps_m * Float64(Float64(Float64(1.0 + exp(Float64(eps_m * x))) / eps_m) - x)) / 2.0);
	end
	return tmp
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 2.7e-7], N[(N[(2.0 * N[Exp[N[(N[Log[1 + x], $MachinePrecision] - x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps$95$m, 3.3e+124], N[(N[(N[(1.0 / N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] * N[(x + N[(N[(-1.0 - N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps$95$m, 1.7e+154], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(eps$95$m * N[(N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 2.7 \cdot 10^{-7}:\\
\;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\

\mathbf{elif}\;eps\_m \leq 3.3 \cdot 10^{+124}:\\
\;\;\;\;\frac{\frac{1}{\frac{-1}{eps\_m}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(eps\_m + -1\right)}}{eps\_m}\right)}{2}\\

\mathbf{elif}\;eps\_m \leq 1.7 \cdot 10^{+154}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if eps < 2.70000000000000009e-7

    1. Initial program 65.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified65.2%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 67.4%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified68.5%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
    6. Step-by-step derivation
      1. cancel-sign-sub-inv68.5%

        \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
      2. add-exp-log66.9%

        \[\leadsto \frac{\color{blue}{e^{\log \left(\left(x + 1\right) \cdot e^{-x}\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      3. *-commutative66.9%

        \[\leadsto \frac{e^{\log \color{blue}{\left(e^{-x} \cdot \left(x + 1\right)\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      4. log-prod66.9%

        \[\leadsto \frac{e^{\color{blue}{\log \left(e^{-x}\right) + \log \left(x + 1\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      5. add-log-exp66.9%

        \[\leadsto \frac{e^{\color{blue}{\left(-x\right)} + \log \left(x + 1\right)} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      6. +-commutative66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \log \color{blue}{\left(1 + x\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      7. log1p-define66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \color{blue}{\mathsf{log1p}\left(x\right)}} + \left(--1\right) \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      8. metadata-eval66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + \color{blue}{1} \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}{2} \]
      9. add-exp-log66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + \color{blue}{e^{\log \left(1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}}}{2} \]
      10. *-un-lft-identity66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\log \color{blue}{\left(\left(x + 1\right) \cdot e^{-x}\right)}}}{2} \]
      11. *-commutative66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\log \color{blue}{\left(e^{-x} \cdot \left(x + 1\right)\right)}}}{2} \]
      12. log-prod66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\color{blue}{\log \left(e^{-x}\right) + \log \left(x + 1\right)}}}{2} \]
      13. add-log-exp66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\color{blue}{\left(-x\right)} + \log \left(x + 1\right)}}{2} \]
      14. +-commutative66.9%

        \[\leadsto \frac{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\left(-x\right) + \log \color{blue}{\left(1 + x\right)}}}{2} \]
    7. Applied egg-rr66.9%

      \[\leadsto \frac{\color{blue}{e^{\left(-x\right) + \mathsf{log1p}\left(x\right)} + e^{\left(-x\right) + \mathsf{log1p}\left(x\right)}}}{2} \]
    8. Step-by-step derivation
      1. count-266.9%

        \[\leadsto \frac{\color{blue}{2 \cdot e^{\left(-x\right) + \mathsf{log1p}\left(x\right)}}}{2} \]
      2. +-commutative66.9%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{\mathsf{log1p}\left(x\right) + \left(-x\right)}}}{2} \]
      3. unsub-neg66.9%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{\mathsf{log1p}\left(x\right) - x}}}{2} \]
    9. Simplified66.9%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}}{2} \]

    if 2.70000000000000009e-7 < eps < 3.30000000000000015e124

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 76.2%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around -inf 79.7%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*79.7%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}}{2} \]
      2. neg-mul-179.7%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}{2} \]
      3. sub-neg79.7%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} + \left(--1 \cdot x\right)\right)}}{2} \]
    7. Simplified79.7%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}}{2} \]
    8. Step-by-step derivation
      1. add-sqr-sqrt0.0%

        \[\leadsto \frac{\color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      2. sqrt-unprod0.7%

        \[\leadsto \frac{\color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      3. sqr-neg0.7%

        \[\leadsto \frac{\sqrt{\color{blue}{\varepsilon \cdot \varepsilon}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      4. sqrt-unprod0.7%

        \[\leadsto \frac{\color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      5. add-sqr-sqrt0.7%

        \[\leadsto \frac{\color{blue}{\varepsilon} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      6. /-rgt-identity0.7%

        \[\leadsto \frac{\color{blue}{\frac{\varepsilon}{1}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      7. clear-num0.7%

        \[\leadsto \frac{\color{blue}{\frac{1}{\frac{1}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      8. frac-2neg0.7%

        \[\leadsto \frac{\frac{1}{\color{blue}{\frac{-1}{-\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      9. metadata-eval0.7%

        \[\leadsto \frac{\frac{1}{\frac{\color{blue}{-1}}{-\varepsilon}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      10. add-sqr-sqrt0.0%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      11. sqrt-unprod79.7%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      12. sqr-neg79.7%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      13. sqrt-unprod79.2%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      14. add-sqr-sqrt79.7%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
    9. Applied egg-rr79.7%

      \[\leadsto \frac{\color{blue}{\frac{1}{\frac{-1}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]

    if 3.30000000000000015e124 < eps < 1.69999999999999987e154

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 80.6%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Taylor expanded in eps around inf 80.6%

      \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
    6. Step-by-step derivation
      1. mul-1-neg80.6%

        \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
      2. associate-*r*80.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]
      3. neg-mul-180.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 + \varepsilon\right)}\right)}{2} \]
    7. Simplified80.6%

      \[\leadsto \frac{\color{blue}{1 - \left(-e^{\left(-x\right) \cdot \left(1 + \varepsilon\right)}\right)}}{2} \]

    if 1.69999999999999987e154 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 77.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around -inf 90.8%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*90.8%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}}{2} \]
      2. neg-mul-190.8%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}{2} \]
      3. sub-neg90.8%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} + \left(--1 \cdot x\right)\right)}}{2} \]
    7. Simplified90.8%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}}{2} \]
    8. Taylor expanded in eps around inf 90.8%

      \[\leadsto \frac{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{\varepsilon}\right) + x\right)}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification72.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 2.7 \cdot 10^{-7}:\\ \;\;\;\;\frac{2 \cdot e^{\mathsf{log1p}\left(x\right) - x}}{2}\\ \mathbf{elif}\;\varepsilon \leq 3.3 \cdot 10^{+124}:\\ \;\;\;\;\frac{\frac{1}{\frac{-1}{\varepsilon}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(\varepsilon + -1\right)}}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 1.7 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon \cdot \left(\frac{1 + e^{\varepsilon \cdot x}}{\varepsilon} - x\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 84.3% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := e^{x \cdot \left(eps\_m + -1\right)}\\ \mathbf{if}\;x \leq -2 \cdot 10^{-268}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 80000000000000:\\ \;\;\;\;\frac{t\_0 + \left(1 - eps\_m \cdot x\right)}{2}\\ \mathbf{elif}\;x \leq 4.3 \cdot 10^{+58}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{elif}\;x \leq 1.08 \cdot 10^{+282}:\\ \;\;\;\;\frac{1 + t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (exp (* x (+ eps_m -1.0)))))
   (if (<= x -2e-268)
     (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
     (if (<= x 80000000000000.0)
       (/ (+ t_0 (- 1.0 (* eps_m x))) 2.0)
       (if (<= x 4.3e+58)
         (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
         (if (<= x 1.08e+282) (/ (+ 1.0 t_0) 2.0) 0.0))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = exp((x * (eps_m + -1.0)));
	double tmp;
	if (x <= -2e-268) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 80000000000000.0) {
		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
	} else if (x <= 4.3e+58) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 1.08e+282) {
		tmp = (1.0 + t_0) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = exp((x * (eps_m + (-1.0d0))))
    if (x <= (-2d-268)) then
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    else if (x <= 80000000000000.0d0) then
        tmp = (t_0 + (1.0d0 - (eps_m * x))) / 2.0d0
    else if (x <= 4.3d+58) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else if (x <= 1.08d+282) then
        tmp = (1.0d0 + t_0) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = Math.exp((x * (eps_m + -1.0)));
	double tmp;
	if (x <= -2e-268) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 80000000000000.0) {
		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
	} else if (x <= 4.3e+58) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 1.08e+282) {
		tmp = (1.0 + t_0) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = math.exp((x * (eps_m + -1.0)))
	tmp = 0
	if x <= -2e-268:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	elif x <= 80000000000000.0:
		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0
	elif x <= 4.3e+58:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	elif x <= 1.08e+282:
		tmp = (1.0 + t_0) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = exp(Float64(x * Float64(eps_m + -1.0)))
	tmp = 0.0
	if (x <= -2e-268)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	elseif (x <= 80000000000000.0)
		tmp = Float64(Float64(t_0 + Float64(1.0 - Float64(eps_m * x))) / 2.0);
	elseif (x <= 4.3e+58)
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	elseif (x <= 1.08e+282)
		tmp = Float64(Float64(1.0 + t_0) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = exp((x * (eps_m + -1.0)));
	tmp = 0.0;
	if (x <= -2e-268)
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	elseif (x <= 80000000000000.0)
		tmp = (t_0 + (1.0 - (eps_m * x))) / 2.0;
	elseif (x <= 4.3e+58)
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	elseif (x <= 1.08e+282)
		tmp = (1.0 + t_0) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[x, -2e-268], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 80000000000000.0], N[(N[(t$95$0 + N[(1.0 - N[(eps$95$m * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 4.3e+58], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.08e+282], N[(N[(1.0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := e^{x \cdot \left(eps\_m + -1\right)}\\
\mathbf{if}\;x \leq -2 \cdot 10^{-268}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 80000000000000:\\
\;\;\;\;\frac{t\_0 + \left(1 - eps\_m \cdot x\right)}{2}\\

\mathbf{elif}\;x \leq 4.3 \cdot 10^{+58}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{elif}\;x \leq 1.08 \cdot 10^{+282}:\\
\;\;\;\;\frac{1 + t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 5 regimes
  2. if x < -1.99999999999999992e-268

    1. Initial program 63.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified63.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 35.4%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Taylor expanded in eps around inf 67.5%

      \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
    6. Step-by-step derivation
      1. mul-1-neg67.5%

        \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
      2. associate-*r*67.5%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]
      3. neg-mul-167.5%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 + \varepsilon\right)}\right)}{2} \]
    7. Simplified67.5%

      \[\leadsto \frac{\color{blue}{1 - \left(-e^{\left(-x\right) \cdot \left(1 + \varepsilon\right)}\right)}}{2} \]

    if -1.99999999999999992e-268 < x < 8e13

    1. Initial program 67.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified34.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    9. Step-by-step derivation
      1. mul-1-neg100.0%

        \[\leadsto \frac{e^{\color{blue}{-\varepsilon \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. *-commutative100.0%

        \[\leadsto \frac{e^{-\color{blue}{x \cdot \varepsilon}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-neg-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    10. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-\varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    11. Taylor expanded in x around 0 85.7%

      \[\leadsto \frac{\color{blue}{\left(1 + -1 \cdot \left(\varepsilon \cdot x\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    12. Step-by-step derivation
      1. mul-1-neg85.7%

        \[\leadsto \frac{\left(1 + \color{blue}{\left(-\varepsilon \cdot x\right)}\right) + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. *-commutative85.7%

        \[\leadsto \frac{\left(1 + \left(-\color{blue}{x \cdot \varepsilon}\right)\right) + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. unsub-neg85.7%

        \[\leadsto \frac{\color{blue}{\left(1 - x \cdot \varepsilon\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. *-commutative85.7%

        \[\leadsto \frac{\left(1 - \color{blue}{\varepsilon \cdot x}\right) + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    13. Simplified85.7%

      \[\leadsto \frac{\color{blue}{\left(1 - \varepsilon \cdot x\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]

    if 8e13 < x < 4.29999999999999991e58

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 21.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in x around 0 74.2%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 4.29999999999999991e58 < x < 1.08000000000000004e282

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in x around 0 30.8%

      \[\leadsto \frac{\color{blue}{1} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]

    if 1.08000000000000004e282 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 5 regimes into one program.
  4. Final simplification67.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-268}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 80000000000000:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + \left(1 - \varepsilon \cdot x\right)}{2}\\ \mathbf{elif}\;x \leq 4.3 \cdot 10^{+58}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 1.08 \cdot 10^{+282}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 84.1% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{if}\;x \leq -1.42 \cdot 10^{-269}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 85000000000000:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 9.5 \cdot 10^{+57}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{elif}\;x \leq 9 \cdot 10^{+281}:\\ \;\;\;\;t\_0\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)))
   (if (<= x -1.42e-269)
     (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
     (if (<= x 85000000000000.0)
       t_0
       (if (<= x 9.5e+57)
         (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
         (if (<= x 9e+281) t_0 0.0))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	double tmp;
	if (x <= -1.42e-269) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 85000000000000.0) {
		tmp = t_0;
	} else if (x <= 9.5e+57) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 9e+281) {
		tmp = t_0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    if (x <= (-1.42d-269)) then
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    else if (x <= 85000000000000.0d0) then
        tmp = t_0
    else if (x <= 9.5d+57) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else if (x <= 9d+281) then
        tmp = t_0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	double tmp;
	if (x <= -1.42e-269) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 85000000000000.0) {
		tmp = t_0;
	} else if (x <= 9.5e+57) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 9e+281) {
		tmp = t_0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
	tmp = 0
	if x <= -1.42e-269:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	elif x <= 85000000000000.0:
		tmp = t_0
	elif x <= 9.5e+57:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	elif x <= 9e+281:
		tmp = t_0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0)
	tmp = 0.0
	if (x <= -1.42e-269)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	elseif (x <= 85000000000000.0)
		tmp = t_0;
	elseif (x <= 9.5e+57)
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	elseif (x <= 9e+281)
		tmp = t_0;
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	tmp = 0.0;
	if (x <= -1.42e-269)
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	elseif (x <= 85000000000000.0)
		tmp = t_0;
	elseif (x <= 9.5e+57)
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	elseif (x <= 9e+281)
		tmp = t_0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -1.42e-269], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 85000000000000.0], t$95$0, If[LessEqual[x, 9.5e+57], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 9e+281], t$95$0, 0.0]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\
\mathbf{if}\;x \leq -1.42 \cdot 10^{-269}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 85000000000000:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;x \leq 9.5 \cdot 10^{+57}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{elif}\;x \leq 9 \cdot 10^{+281}:\\
\;\;\;\;t\_0\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -1.42e-269

    1. Initial program 63.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified63.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 35.4%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Taylor expanded in eps around inf 67.5%

      \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
    6. Step-by-step derivation
      1. mul-1-neg67.5%

        \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
      2. associate-*r*67.5%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]
      3. neg-mul-167.5%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 + \varepsilon\right)}\right)}{2} \]
    7. Simplified67.5%

      \[\leadsto \frac{\color{blue}{1 - \left(-e^{\left(-x\right) \cdot \left(1 + \varepsilon\right)}\right)}}{2} \]

    if -1.42e-269 < x < 8.5e13 or 9.4999999999999997e57 < x < 8.99999999999999965e281

    1. Initial program 79.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified58.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in x around 0 65.4%

      \[\leadsto \frac{\color{blue}{1} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]

    if 8.5e13 < x < 9.4999999999999997e57

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 21.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in x around 0 74.2%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 8.99999999999999965e281 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification67.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.42 \cdot 10^{-269}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 85000000000000:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{elif}\;x \leq 9.5 \cdot 10^{+57}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 9 \cdot 10^{+281}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 76.9% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := \frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\ \mathbf{if}\;x \leq -2 \cdot 10^{-269}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 62000000000000:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 9 \cdot 10^{+57}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+281}:\\ \;\;\;\;t\_0\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (/ (+ 1.0 (exp (* x (+ eps_m -1.0)))) 2.0)))
   (if (<= x -2e-269)
     (/ (/ 2.0 (exp x)) 2.0)
     (if (<= x 62000000000000.0)
       t_0
       (if (<= x 9e+57)
         (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
         (if (<= x 2e+281) t_0 0.0))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	double tmp;
	if (x <= -2e-269) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else if (x <= 62000000000000.0) {
		tmp = t_0;
	} else if (x <= 9e+57) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 2e+281) {
		tmp = t_0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (1.0d0 + exp((x * (eps_m + (-1.0d0))))) / 2.0d0
    if (x <= (-2d-269)) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else if (x <= 62000000000000.0d0) then
        tmp = t_0
    else if (x <= 9d+57) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else if (x <= 2d+281) then
        tmp = t_0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = (1.0 + Math.exp((x * (eps_m + -1.0)))) / 2.0;
	double tmp;
	if (x <= -2e-269) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else if (x <= 62000000000000.0) {
		tmp = t_0;
	} else if (x <= 9e+57) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 2e+281) {
		tmp = t_0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = (1.0 + math.exp((x * (eps_m + -1.0)))) / 2.0
	tmp = 0
	if x <= -2e-269:
		tmp = (2.0 / math.exp(x)) / 2.0
	elif x <= 62000000000000.0:
		tmp = t_0
	elif x <= 9e+57:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	elif x <= 2e+281:
		tmp = t_0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(Float64(1.0 + exp(Float64(x * Float64(eps_m + -1.0)))) / 2.0)
	tmp = 0.0
	if (x <= -2e-269)
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	elseif (x <= 62000000000000.0)
		tmp = t_0;
	elseif (x <= 9e+57)
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	elseif (x <= 2e+281)
		tmp = t_0;
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = (1.0 + exp((x * (eps_m + -1.0)))) / 2.0;
	tmp = 0.0;
	if (x <= -2e-269)
		tmp = (2.0 / exp(x)) / 2.0;
	elseif (x <= 62000000000000.0)
		tmp = t_0;
	elseif (x <= 9e+57)
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	elseif (x <= 2e+281)
		tmp = t_0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(N[(1.0 + N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -2e-269], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 62000000000000.0], t$95$0, If[LessEqual[x, 9e+57], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2e+281], t$95$0, 0.0]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := \frac{1 + e^{x \cdot \left(eps\_m + -1\right)}}{2}\\
\mathbf{if}\;x \leq -2 \cdot 10^{-269}:\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{elif}\;x \leq 62000000000000:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;x \leq 9 \cdot 10^{+57}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+281}:\\
\;\;\;\;t\_0\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -1.9999999999999999e-269

    1. Initial program 63.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified52.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 95.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 95.5%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*95.5%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-195.5%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in95.5%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg95.5%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified95.5%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around 0 82.8%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    9. Step-by-step derivation
      1. neg-mul-182.8%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
      2. exp-neg82.8%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      3. associate-*r/82.8%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      4. metadata-eval82.8%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified82.8%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if -1.9999999999999999e-269 < x < 6.2e13 or 8.99999999999999991e57 < x < 2.0000000000000001e281

    1. Initial program 79.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified58.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in x around 0 65.4%

      \[\leadsto \frac{\color{blue}{1} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]

    if 6.2e13 < x < 8.99999999999999991e57

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 21.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in x around 0 74.2%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 2.0000000000000001e281 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification73.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-269}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 62000000000000:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{elif}\;x \leq 9 \cdot 10^{+57}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+281}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 84.3% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 3.3 \cdot 10^{+22}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;eps\_m \leq 3.6 \cdot 10^{+124}:\\ \;\;\;\;\frac{\frac{1}{\frac{-1}{eps\_m}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(eps\_m + -1\right)}}{eps\_m}\right)}{2}\\ \mathbf{elif}\;eps\_m \leq 1.7 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 3.3e+22)
   (/ (/ 2.0 (exp x)) 2.0)
   (if (<= eps_m 3.6e+124)
     (/
      (*
       (/ 1.0 (/ -1.0 eps_m))
       (+ x (/ (- -1.0 (exp (* x (+ eps_m -1.0)))) eps_m)))
      2.0)
     (if (<= eps_m 1.7e+154)
       (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
       (/ (* eps_m (- (/ (+ 1.0 (exp (* eps_m x))) eps_m) x)) 2.0)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 3.3e+22) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else if (eps_m <= 3.6e+124) {
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0;
	} else if (eps_m <= 1.7e+154) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else {
		tmp = (eps_m * (((1.0 + exp((eps_m * x))) / eps_m) - x)) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 3.3d+22) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else if (eps_m <= 3.6d+124) then
        tmp = ((1.0d0 / ((-1.0d0) / eps_m)) * (x + (((-1.0d0) - exp((x * (eps_m + (-1.0d0))))) / eps_m))) / 2.0d0
    else if (eps_m <= 1.7d+154) then
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    else
        tmp = (eps_m * (((1.0d0 + exp((eps_m * x))) / eps_m) - x)) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 3.3e+22) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else if (eps_m <= 3.6e+124) {
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - Math.exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0;
	} else if (eps_m <= 1.7e+154) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else {
		tmp = (eps_m * (((1.0 + Math.exp((eps_m * x))) / eps_m) - x)) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 3.3e+22:
		tmp = (2.0 / math.exp(x)) / 2.0
	elif eps_m <= 3.6e+124:
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - math.exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0
	elif eps_m <= 1.7e+154:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	else:
		tmp = (eps_m * (((1.0 + math.exp((eps_m * x))) / eps_m) - x)) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 3.3e+22)
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	elseif (eps_m <= 3.6e+124)
		tmp = Float64(Float64(Float64(1.0 / Float64(-1.0 / eps_m)) * Float64(x + Float64(Float64(-1.0 - exp(Float64(x * Float64(eps_m + -1.0)))) / eps_m))) / 2.0);
	elseif (eps_m <= 1.7e+154)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	else
		tmp = Float64(Float64(eps_m * Float64(Float64(Float64(1.0 + exp(Float64(eps_m * x))) / eps_m) - x)) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 3.3e+22)
		tmp = (2.0 / exp(x)) / 2.0;
	elseif (eps_m <= 3.6e+124)
		tmp = ((1.0 / (-1.0 / eps_m)) * (x + ((-1.0 - exp((x * (eps_m + -1.0)))) / eps_m))) / 2.0;
	elseif (eps_m <= 1.7e+154)
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	else
		tmp = (eps_m * (((1.0 + exp((eps_m * x))) / eps_m) - x)) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 3.3e+22], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps$95$m, 3.6e+124], N[(N[(N[(1.0 / N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] * N[(x + N[(N[(-1.0 - N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps$95$m, 1.7e+154], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(eps$95$m * N[(N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 3.3 \cdot 10^{+22}:\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{elif}\;eps\_m \leq 3.6 \cdot 10^{+124}:\\
\;\;\;\;\frac{\frac{1}{\frac{-1}{eps\_m}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(eps\_m + -1\right)}}{eps\_m}\right)}{2}\\

\mathbf{elif}\;eps\_m \leq 1.7 \cdot 10^{+154}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if eps < 3.2999999999999998e22

    1. Initial program 66.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified53.8%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.6%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 97.6%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*97.6%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-197.6%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in97.6%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg97.6%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified97.6%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around 0 76.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    9. Step-by-step derivation
      1. neg-mul-176.5%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
      2. exp-neg76.5%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      3. associate-*r/76.5%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      4. metadata-eval76.5%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified76.5%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 3.2999999999999998e22 < eps < 3.59999999999999986e124

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 83.3%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around -inf 87.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*87.6%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}}{2} \]
      2. neg-mul-187.6%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}{2} \]
      3. sub-neg87.6%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} + \left(--1 \cdot x\right)\right)}}{2} \]
    7. Simplified87.6%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}}{2} \]
    8. Step-by-step derivation
      1. add-sqr-sqrt0.0%

        \[\leadsto \frac{\color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      2. sqrt-unprod0.8%

        \[\leadsto \frac{\color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      3. sqr-neg0.8%

        \[\leadsto \frac{\sqrt{\color{blue}{\varepsilon \cdot \varepsilon}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      4. sqrt-unprod0.8%

        \[\leadsto \frac{\color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      5. add-sqr-sqrt0.8%

        \[\leadsto \frac{\color{blue}{\varepsilon} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      6. /-rgt-identity0.8%

        \[\leadsto \frac{\color{blue}{\frac{\varepsilon}{1}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      7. clear-num0.8%

        \[\leadsto \frac{\color{blue}{\frac{1}{\frac{1}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      8. frac-2neg0.8%

        \[\leadsto \frac{\frac{1}{\color{blue}{\frac{-1}{-\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      9. metadata-eval0.8%

        \[\leadsto \frac{\frac{1}{\frac{\color{blue}{-1}}{-\varepsilon}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      10. add-sqr-sqrt0.0%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      11. sqrt-unprod87.6%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      12. sqr-neg87.6%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      13. sqrt-unprod87.0%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
      14. add-sqr-sqrt87.6%

        \[\leadsto \frac{\frac{1}{\frac{-1}{\color{blue}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]
    9. Applied egg-rr87.6%

      \[\leadsto \frac{\color{blue}{\frac{1}{\frac{-1}{\varepsilon}}} \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}{2} \]

    if 3.59999999999999986e124 < eps < 1.69999999999999987e154

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 80.6%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Taylor expanded in eps around inf 80.6%

      \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
    6. Step-by-step derivation
      1. mul-1-neg80.6%

        \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
      2. associate-*r*80.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]
      3. neg-mul-180.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 + \varepsilon\right)}\right)}{2} \]
    7. Simplified80.6%

      \[\leadsto \frac{\color{blue}{1 - \left(-e^{\left(-x\right) \cdot \left(1 + \varepsilon\right)}\right)}}{2} \]

    if 1.69999999999999987e154 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 77.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around -inf 90.8%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*90.8%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}}{2} \]
      2. neg-mul-190.8%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}{2} \]
      3. sub-neg90.8%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} + \left(--1 \cdot x\right)\right)}}{2} \]
    7. Simplified90.8%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}}{2} \]
    8. Taylor expanded in eps around inf 90.8%

      \[\leadsto \frac{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{\varepsilon}\right) + x\right)}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification79.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 3.3 \cdot 10^{+22}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 3.6 \cdot 10^{+124}:\\ \;\;\;\;\frac{\frac{1}{\frac{-1}{\varepsilon}} \cdot \left(x + \frac{-1 - e^{x \cdot \left(\varepsilon + -1\right)}}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 1.7 \cdot 10^{+154}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\varepsilon \cdot \left(\frac{1 + e^{\varepsilon \cdot x}}{\varepsilon} - x\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 9: 84.2% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 4.5 \cdot 10^{+22}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;eps\_m \leq 3.3 \cdot 10^{+124} \lor \neg \left(eps\_m \leq 1.7 \cdot 10^{+154}\right):\\ \;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 4.5e+22)
   (/ (/ 2.0 (exp x)) 2.0)
   (if (or (<= eps_m 3.3e+124) (not (<= eps_m 1.7e+154)))
     (/ (* eps_m (- (/ (+ 1.0 (exp (* eps_m x))) eps_m) x)) 2.0)
     (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 4.5e+22) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else if ((eps_m <= 3.3e+124) || !(eps_m <= 1.7e+154)) {
		tmp = (eps_m * (((1.0 + exp((eps_m * x))) / eps_m) - x)) / 2.0;
	} else {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 4.5d+22) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else if ((eps_m <= 3.3d+124) .or. (.not. (eps_m <= 1.7d+154))) then
        tmp = (eps_m * (((1.0d0 + exp((eps_m * x))) / eps_m) - x)) / 2.0d0
    else
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 4.5e+22) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else if ((eps_m <= 3.3e+124) || !(eps_m <= 1.7e+154)) {
		tmp = (eps_m * (((1.0 + Math.exp((eps_m * x))) / eps_m) - x)) / 2.0;
	} else {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 4.5e+22:
		tmp = (2.0 / math.exp(x)) / 2.0
	elif (eps_m <= 3.3e+124) or not (eps_m <= 1.7e+154):
		tmp = (eps_m * (((1.0 + math.exp((eps_m * x))) / eps_m) - x)) / 2.0
	else:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 4.5e+22)
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	elseif ((eps_m <= 3.3e+124) || !(eps_m <= 1.7e+154))
		tmp = Float64(Float64(eps_m * Float64(Float64(Float64(1.0 + exp(Float64(eps_m * x))) / eps_m) - x)) / 2.0);
	else
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 4.5e+22)
		tmp = (2.0 / exp(x)) / 2.0;
	elseif ((eps_m <= 3.3e+124) || ~((eps_m <= 1.7e+154)))
		tmp = (eps_m * (((1.0 + exp((eps_m * x))) / eps_m) - x)) / 2.0;
	else
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 4.5e+22], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[Or[LessEqual[eps$95$m, 3.3e+124], N[Not[LessEqual[eps$95$m, 1.7e+154]], $MachinePrecision]], N[(N[(eps$95$m * N[(N[(N[(1.0 + N[Exp[N[(eps$95$m * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 4.5 \cdot 10^{+22}:\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{elif}\;eps\_m \leq 3.3 \cdot 10^{+124} \lor \neg \left(eps\_m \leq 1.7 \cdot 10^{+154}\right):\\
\;\;\;\;\frac{eps\_m \cdot \left(\frac{1 + e^{eps\_m \cdot x}}{eps\_m} - x\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if eps < 4.4999999999999998e22

    1. Initial program 66.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified53.8%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.6%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 97.6%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*97.6%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-197.6%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in97.6%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg97.6%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified97.6%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around 0 76.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    9. Step-by-step derivation
      1. neg-mul-176.5%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
      2. exp-neg76.5%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      3. associate-*r/76.5%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      4. metadata-eval76.5%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified76.5%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 4.4999999999999998e22 < eps < 3.30000000000000015e124 or 1.69999999999999987e154 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 79.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around -inf 89.6%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*89.6%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}}{2} \]
      2. neg-mul-189.6%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot \left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} - -1 \cdot x\right)}{2} \]
      3. sub-neg89.6%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-1 \cdot \frac{1 + e^{-1 \cdot \left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}}{\varepsilon} + \left(--1 \cdot x\right)\right)}}{2} \]
    7. Simplified89.6%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)}}{\varepsilon}\right) + x\right)}}{2} \]
    8. Taylor expanded in eps around inf 89.6%

      \[\leadsto \frac{\left(-\varepsilon\right) \cdot \left(\left(-\frac{1 + e^{\color{blue}{\varepsilon \cdot x}}}{\varepsilon}\right) + x\right)}{2} \]

    if 3.30000000000000015e124 < eps < 1.69999999999999987e154

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 80.6%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Taylor expanded in eps around inf 80.6%

      \[\leadsto \frac{\color{blue}{1 - -1 \cdot e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}}{2} \]
    6. Step-by-step derivation
      1. mul-1-neg80.6%

        \[\leadsto \frac{1 - \color{blue}{\left(-e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}\right)}}{2} \]
      2. associate-*r*80.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]
      3. neg-mul-180.6%

        \[\leadsto \frac{1 - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 + \varepsilon\right)}\right)}{2} \]
    7. Simplified80.6%

      \[\leadsto \frac{\color{blue}{1 - \left(-e^{\left(-x\right) \cdot \left(1 + \varepsilon\right)}\right)}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification79.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 4.5 \cdot 10^{+22}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 3.3 \cdot 10^{+124} \lor \neg \left(\varepsilon \leq 1.7 \cdot 10^{+154}\right):\\ \;\;\;\;\frac{\varepsilon \cdot \left(\frac{1 + e^{\varepsilon \cdot x}}{\varepsilon} - x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 65.7% accurate, 2.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 7.5 \cdot 10^{+62}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\ \;\;\;\;e^{x}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 1.45e-16)
   (+ 1.0 (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666))))))
   (if (<= x 7.5e+62)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
     (if (<= x 2e+270) (exp x) 0.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.45e-16) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 7.5e+62) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 2e+270) {
		tmp = exp(x);
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 1.45d-16) then
        tmp = 1.0d0 + (x * ((-1.0d0) + (x * (0.5d0 + (x * (-0.16666666666666666d0))))))
    else if (x <= 7.5d+62) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else if (x <= 2d+270) then
        tmp = exp(x)
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.45e-16) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 7.5e+62) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 2e+270) {
		tmp = Math.exp(x);
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 1.45e-16:
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))))
	elif x <= 7.5e+62:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	elif x <= 2e+270:
		tmp = math.exp(x)
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 1.45e-16)
		tmp = Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))));
	elseif (x <= 7.5e+62)
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	elseif (x <= 2e+270)
		tmp = exp(x);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 1.45e-16)
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	elseif (x <= 7.5e+62)
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	elseif (x <= 2e+270)
		tmp = exp(x);
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 1.45e-16], N[(1.0 + N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 7.5e+62], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2e+270], N[Exp[x], $MachinePrecision], 0.0]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\
\;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\

\mathbf{elif}\;x \leq 7.5 \cdot 10^{+62}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\
\;\;\;\;e^{x}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < 1.4499999999999999e-16

    1. Initial program 64.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified42.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 78.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-178.7%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified78.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 74.5%

      \[\leadsto \color{blue}{1 + x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)} \]

    if 1.4499999999999999e-16 < x < 7.49999999999999998e62

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 42.2%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in x around 0 56.0%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 7.49999999999999998e62 < x < 2.0000000000000001e270

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 37.2%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-137.2%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified37.2%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg37.2%

        \[\leadsto \color{blue}{\frac{-2 \cdot e^{-x}}{-2}} \]
      2. div-inv37.2%

        \[\leadsto \color{blue}{\left(-2 \cdot e^{-x}\right) \cdot \frac{1}{-2}} \]
      3. *-commutative37.2%

        \[\leadsto \left(-\color{blue}{e^{-x} \cdot 2}\right) \cdot \frac{1}{-2} \]
      4. distribute-rgt-neg-in37.2%

        \[\leadsto \color{blue}{\left(e^{-x} \cdot \left(-2\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.0%

        \[\leadsto \left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod64.4%

        \[\leadsto \left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      7. sqr-neg64.4%

        \[\leadsto \left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod64.4%

        \[\leadsto \left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt64.4%

        \[\leadsto \left(e^{\color{blue}{x}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      10. metadata-eval64.4%

        \[\leadsto \left(e^{x} \cdot \color{blue}{-2}\right) \cdot \frac{1}{-2} \]
      11. metadata-eval64.4%

        \[\leadsto \left(e^{x} \cdot -2\right) \cdot \frac{1}{\color{blue}{-2}} \]
      12. metadata-eval64.4%

        \[\leadsto \left(e^{x} \cdot -2\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr64.4%

      \[\leadsto \color{blue}{\left(e^{x} \cdot -2\right) \cdot -0.5} \]
    10. Step-by-step derivation
      1. associate-*l*64.4%

        \[\leadsto \color{blue}{e^{x} \cdot \left(-2 \cdot -0.5\right)} \]
      2. metadata-eval64.4%

        \[\leadsto e^{x} \cdot \color{blue}{1} \]
      3. *-rgt-identity64.4%

        \[\leadsto \color{blue}{e^{x}} \]
    11. Simplified64.4%

      \[\leadsto \color{blue}{e^{x}} \]

    if 2.0000000000000001e270 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification71.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 7.5 \cdot 10^{+62}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\ \;\;\;\;e^{x}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 11: 70.3% accurate, 2.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 4.5 \cdot 10^{+58}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+276}:\\ \;\;\;\;e^{x}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 4.5e+58) (/ (/ 2.0 (exp x)) 2.0) (if (<= x 2e+276) (exp x) 0.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 4.5e+58) {
		tmp = (2.0 / exp(x)) / 2.0;
	} else if (x <= 2e+276) {
		tmp = exp(x);
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 4.5d+58) then
        tmp = (2.0d0 / exp(x)) / 2.0d0
    else if (x <= 2d+276) then
        tmp = exp(x)
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 4.5e+58) {
		tmp = (2.0 / Math.exp(x)) / 2.0;
	} else if (x <= 2e+276) {
		tmp = Math.exp(x);
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 4.5e+58:
		tmp = (2.0 / math.exp(x)) / 2.0
	elif x <= 2e+276:
		tmp = math.exp(x)
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 4.5e+58)
		tmp = Float64(Float64(2.0 / exp(x)) / 2.0);
	elseif (x <= 2e+276)
		tmp = exp(x);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 4.5e+58)
		tmp = (2.0 / exp(x)) / 2.0;
	elseif (x <= 2e+276)
		tmp = exp(x);
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 4.5e+58], N[(N[(2.0 / N[Exp[x], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2e+276], N[Exp[x], $MachinePrecision], 0.0]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 4.5 \cdot 10^{+58}:\\
\;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+276}:\\
\;\;\;\;e^{x}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 4.4999999999999998e58

    1. Initial program 67.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified48.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.8%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 97.8%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*97.8%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-197.8%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in97.8%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg97.8%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified97.8%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Taylor expanded in eps around 0 76.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    9. Step-by-step derivation
      1. neg-mul-176.5%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
      2. exp-neg76.5%

        \[\leadsto \frac{2 \cdot \color{blue}{\frac{1}{e^{x}}}}{2} \]
      3. associate-*r/76.5%

        \[\leadsto \frac{\color{blue}{\frac{2 \cdot 1}{e^{x}}}}{2} \]
      4. metadata-eval76.5%

        \[\leadsto \frac{\frac{\color{blue}{2}}{e^{x}}}{2} \]
    10. Simplified76.5%

      \[\leadsto \frac{\color{blue}{\frac{2}{e^{x}}}}{2} \]

    if 4.4999999999999998e58 < x < 2.0000000000000001e276

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 37.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-137.7%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified37.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg37.7%

        \[\leadsto \color{blue}{\frac{-2 \cdot e^{-x}}{-2}} \]
      2. div-inv37.7%

        \[\leadsto \color{blue}{\left(-2 \cdot e^{-x}\right) \cdot \frac{1}{-2}} \]
      3. *-commutative37.7%

        \[\leadsto \left(-\color{blue}{e^{-x} \cdot 2}\right) \cdot \frac{1}{-2} \]
      4. distribute-rgt-neg-in37.7%

        \[\leadsto \color{blue}{\left(e^{-x} \cdot \left(-2\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.0%

        \[\leadsto \left(e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod63.8%

        \[\leadsto \left(e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      7. sqr-neg63.8%

        \[\leadsto \left(e^{\sqrt{\color{blue}{x \cdot x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod63.8%

        \[\leadsto \left(e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt63.8%

        \[\leadsto \left(e^{\color{blue}{x}} \cdot \left(-2\right)\right) \cdot \frac{1}{-2} \]
      10. metadata-eval63.8%

        \[\leadsto \left(e^{x} \cdot \color{blue}{-2}\right) \cdot \frac{1}{-2} \]
      11. metadata-eval63.8%

        \[\leadsto \left(e^{x} \cdot -2\right) \cdot \frac{1}{\color{blue}{-2}} \]
      12. metadata-eval63.8%

        \[\leadsto \left(e^{x} \cdot -2\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr63.8%

      \[\leadsto \color{blue}{\left(e^{x} \cdot -2\right) \cdot -0.5} \]
    10. Step-by-step derivation
      1. associate-*l*63.8%

        \[\leadsto \color{blue}{e^{x} \cdot \left(-2 \cdot -0.5\right)} \]
      2. metadata-eval63.8%

        \[\leadsto e^{x} \cdot \color{blue}{1} \]
      3. *-rgt-identity63.8%

        \[\leadsto \color{blue}{e^{x}} \]
    11. Simplified63.8%

      \[\leadsto \color{blue}{e^{x}} \]

    if 2.0000000000000001e276 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification74.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 4.5 \cdot 10^{+58}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+276}:\\ \;\;\;\;e^{x}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 12: 65.6% accurate, 6.3× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} t_0 := 2 + x \cdot \left(x - 2\right)\\ \mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\ \;\;\;\;\frac{t\_0}{2}\\ \mathbf{elif}\;x \leq -0.037:\\ \;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 4.4 \cdot 10^{+102}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\ \;\;\;\;\frac{x \cdot t\_0}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (let* ((t_0 (+ 2.0 (* x (- x 2.0)))))
   (if (<= x -3.1e+153)
     (/ t_0 2.0)
     (if (<= x -0.037)
       (* (* eps_m x) -0.5)
       (if (<= x 4500000000.0)
         1.0
         (if (<= x 4.4e+102) 0.0 (if (<= x 2e+270) (/ (* x t_0) 2.0) 0.0)))))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double t_0 = 2.0 + (x * (x - 2.0));
	double tmp;
	if (x <= -3.1e+153) {
		tmp = t_0 / 2.0;
	} else if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else if (x <= 4.4e+102) {
		tmp = 0.0;
	} else if (x <= 2e+270) {
		tmp = (x * t_0) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: t_0
    real(8) :: tmp
    t_0 = 2.0d0 + (x * (x - 2.0d0))
    if (x <= (-3.1d+153)) then
        tmp = t_0 / 2.0d0
    else if (x <= (-0.037d0)) then
        tmp = (eps_m * x) * (-0.5d0)
    else if (x <= 4500000000.0d0) then
        tmp = 1.0d0
    else if (x <= 4.4d+102) then
        tmp = 0.0d0
    else if (x <= 2d+270) then
        tmp = (x * t_0) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double t_0 = 2.0 + (x * (x - 2.0));
	double tmp;
	if (x <= -3.1e+153) {
		tmp = t_0 / 2.0;
	} else if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else if (x <= 4.4e+102) {
		tmp = 0.0;
	} else if (x <= 2e+270) {
		tmp = (x * t_0) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	t_0 = 2.0 + (x * (x - 2.0))
	tmp = 0
	if x <= -3.1e+153:
		tmp = t_0 / 2.0
	elif x <= -0.037:
		tmp = (eps_m * x) * -0.5
	elif x <= 4500000000.0:
		tmp = 1.0
	elif x <= 4.4e+102:
		tmp = 0.0
	elif x <= 2e+270:
		tmp = (x * t_0) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	t_0 = Float64(2.0 + Float64(x * Float64(x - 2.0)))
	tmp = 0.0
	if (x <= -3.1e+153)
		tmp = Float64(t_0 / 2.0);
	elseif (x <= -0.037)
		tmp = Float64(Float64(eps_m * x) * -0.5);
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	elseif (x <= 4.4e+102)
		tmp = 0.0;
	elseif (x <= 2e+270)
		tmp = Float64(Float64(x * t_0) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	t_0 = 2.0 + (x * (x - 2.0));
	tmp = 0.0;
	if (x <= -3.1e+153)
		tmp = t_0 / 2.0;
	elseif (x <= -0.037)
		tmp = (eps_m * x) * -0.5;
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	elseif (x <= 4.4e+102)
		tmp = 0.0;
	elseif (x <= 2e+270)
		tmp = (x * t_0) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := Block[{t$95$0 = N[(2.0 + N[(x * N[(x - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -3.1e+153], N[(t$95$0 / 2.0), $MachinePrecision], If[LessEqual[x, -0.037], N[(N[(eps$95$m * x), $MachinePrecision] * -0.5), $MachinePrecision], If[LessEqual[x, 4500000000.0], 1.0, If[LessEqual[x, 4.4e+102], 0.0, If[LessEqual[x, 2e+270], N[(N[(x * t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
t_0 := 2 + x \cdot \left(x - 2\right)\\
\mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\
\;\;\;\;\frac{t\_0}{2}\\

\mathbf{elif}\;x \leq -0.037:\\
\;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\

\mathbf{elif}\;x \leq 4500000000:\\
\;\;\;\;1\\

\mathbf{elif}\;x \leq 4.4 \cdot 10^{+102}:\\
\;\;\;\;0\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\
\;\;\;\;\frac{x \cdot t\_0}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 5 regimes
  2. if x < -3.1e153

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-1100.0%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 93.3%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(x - 2\right)}}{2} \]

    if -3.1e153 < x < -0.0369999999999999982

    1. Initial program 81.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified81.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 58.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around inf 20.7%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*20.7%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-120.7%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    7. Simplified20.7%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg20.7%

        \[\leadsto \color{blue}{\frac{-\left(-\varepsilon\right) \cdot x}{-2}} \]
      2. div-inv20.7%

        \[\leadsto \color{blue}{\left(-\left(-\varepsilon\right) \cdot x\right) \cdot \frac{1}{-2}} \]
      3. *-commutative20.7%

        \[\leadsto \left(-\color{blue}{x \cdot \left(-\varepsilon\right)}\right) \cdot \frac{1}{-2} \]
      4. distribute-lft-neg-in20.7%

        \[\leadsto \color{blue}{\left(\left(-x\right) \cdot \left(-\varepsilon\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.7%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod0.7%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}\right) \cdot \frac{1}{-2} \]
      7. sqr-neg0.7%

        \[\leadsto \left(\left(-x\right) \cdot \sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod0.1%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt20.4%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\varepsilon}\right) \cdot \frac{1}{-2} \]
      10. add-sqr-sqrt20.4%

        \[\leadsto \left(\color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      11. sqrt-unprod20.4%

        \[\leadsto \left(\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      12. sqr-neg20.4%

        \[\leadsto \left(\sqrt{\color{blue}{x \cdot x}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      13. sqrt-unprod0.0%

        \[\leadsto \left(\color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      14. add-sqr-sqrt20.7%

        \[\leadsto \left(\color{blue}{x} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      15. metadata-eval20.7%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \frac{1}{\color{blue}{-2}} \]
      16. metadata-eval20.7%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr20.7%

      \[\leadsto \color{blue}{\left(x \cdot \varepsilon\right) \cdot -0.5} \]

    if -0.0369999999999999982 < x < 4.5e9

    1. Initial program 57.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified57.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 72.9%

      \[\leadsto \frac{\color{blue}{2}}{2} \]

    if 4.5e9 < x < 4.40000000000000015e102 or 2.0000000000000001e270 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified65.3%

      \[\leadsto \frac{\color{blue}{0}}{2} \]

    if 4.40000000000000015e102 < x < 2.0000000000000001e270

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 37.8%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified37.8%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
    6. Taylor expanded in x around inf 37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(e^{-x} - -1 \cdot e^{-x}\right)}}{2} \]
    7. Step-by-step derivation
      1. cancel-sign-sub-inv37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(e^{-x} + \left(--1\right) \cdot e^{-x}\right)}}{2} \]
      2. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{\color{blue}{-1 \cdot x}} + \left(--1\right) \cdot e^{-x}\right)}{2} \]
      3. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + \color{blue}{1} \cdot e^{-x}\right)}{2} \]
      4. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + 1 \cdot e^{\color{blue}{-1 \cdot x}}\right)}{2} \]
      5. distribute-rgt1-in37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 + 1\right) \cdot e^{-1 \cdot x}\right)}}{2} \]
      6. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(\color{blue}{2} \cdot e^{-1 \cdot x}\right)}{2} \]
      7. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
    8. Simplified37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
    9. Taylor expanded in x around 0 63.7%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}}{2} \]
  3. Recombined 5 regimes into one program.
  4. Final simplification69.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{elif}\;x \leq -0.037:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 4.4 \cdot 10^{+102}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+270}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 13: 65.6% accurate, 8.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 5.8 \cdot 10^{+102}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+280}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 1.45e-16)
   (+ 1.0 (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666))))))
   (if (<= x 5.8e+102)
     (/ (+ (+ 1.0 (/ 1.0 eps_m)) (+ 1.0 (/ -1.0 eps_m))) 2.0)
     (if (<= x 5e+280) (/ (* x (+ 2.0 (* x (- x 2.0)))) 2.0) 0.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.45e-16) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 5.8e+102) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 5e+280) {
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 1.45d-16) then
        tmp = 1.0d0 + (x * ((-1.0d0) + (x * (0.5d0 + (x * (-0.16666666666666666d0))))))
    else if (x <= 5.8d+102) then
        tmp = ((1.0d0 + (1.0d0 / eps_m)) + (1.0d0 + ((-1.0d0) / eps_m))) / 2.0d0
    else if (x <= 5d+280) then
        tmp = (x * (2.0d0 + (x * (x - 2.0d0)))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.45e-16) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 5.8e+102) {
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	} else if (x <= 5e+280) {
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 1.45e-16:
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))))
	elif x <= 5.8e+102:
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0
	elif x <= 5e+280:
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 1.45e-16)
		tmp = Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))));
	elseif (x <= 5.8e+102)
		tmp = Float64(Float64(Float64(1.0 + Float64(1.0 / eps_m)) + Float64(1.0 + Float64(-1.0 / eps_m))) / 2.0);
	elseif (x <= 5e+280)
		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x - 2.0)))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 1.45e-16)
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	elseif (x <= 5.8e+102)
		tmp = ((1.0 + (1.0 / eps_m)) + (1.0 + (-1.0 / eps_m))) / 2.0;
	elseif (x <= 5e+280)
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 1.45e-16], N[(1.0 + N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 5.8e+102], N[(N[(N[(1.0 + N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(-1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 5e+280], N[(N[(x * N[(2.0 + N[(x * N[(x - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\
\;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\

\mathbf{elif}\;x \leq 5.8 \cdot 10^{+102}:\\
\;\;\;\;\frac{\left(1 + \frac{1}{eps\_m}\right) + \left(1 + \frac{-1}{eps\_m}\right)}{2}\\

\mathbf{elif}\;x \leq 5 \cdot 10^{+280}:\\
\;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < 1.4499999999999999e-16

    1. Initial program 64.2%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified42.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 78.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-178.7%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified78.7%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 74.5%

      \[\leadsto \color{blue}{1 + x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)} \]

    if 1.4499999999999999e-16 < x < 5.8000000000000005e102

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 37.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in x around 0 50.0%

      \[\leadsto \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - 1\right)}{2} \]

    if 5.8000000000000005e102 < x < 5.0000000000000002e280

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 37.8%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified37.8%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
    6. Taylor expanded in x around inf 37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(e^{-x} - -1 \cdot e^{-x}\right)}}{2} \]
    7. Step-by-step derivation
      1. cancel-sign-sub-inv37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(e^{-x} + \left(--1\right) \cdot e^{-x}\right)}}{2} \]
      2. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{\color{blue}{-1 \cdot x}} + \left(--1\right) \cdot e^{-x}\right)}{2} \]
      3. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + \color{blue}{1} \cdot e^{-x}\right)}{2} \]
      4. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + 1 \cdot e^{\color{blue}{-1 \cdot x}}\right)}{2} \]
      5. distribute-rgt1-in37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 + 1\right) \cdot e^{-1 \cdot x}\right)}}{2} \]
      6. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(\color{blue}{2} \cdot e^{-1 \cdot x}\right)}{2} \]
      7. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
    8. Simplified37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
    9. Taylor expanded in x around 0 63.7%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}}{2} \]

    if 5.0000000000000002e280 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified87.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification70.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.45 \cdot 10^{-16}:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 5.8 \cdot 10^{+102}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+280}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 14: 66.2% accurate, 8.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 1.6:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+102}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 4 \cdot 10^{+281}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 1.6)
   (+ 1.0 (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666))))))
   (if (<= x 2e+102)
     0.0
     (if (<= x 4e+281) (/ (* x (+ 2.0 (* x (- x 2.0)))) 2.0) 0.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.6) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 2e+102) {
		tmp = 0.0;
	} else if (x <= 4e+281) {
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 1.6d0) then
        tmp = 1.0d0 + (x * ((-1.0d0) + (x * (0.5d0 + (x * (-0.16666666666666666d0))))))
    else if (x <= 2d+102) then
        tmp = 0.0d0
    else if (x <= 4d+281) then
        tmp = (x * (2.0d0 + (x * (x - 2.0d0)))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.6) {
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	} else if (x <= 2e+102) {
		tmp = 0.0;
	} else if (x <= 4e+281) {
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 1.6:
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))))
	elif x <= 2e+102:
		tmp = 0.0
	elif x <= 4e+281:
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 1.6)
		tmp = Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))));
	elseif (x <= 2e+102)
		tmp = 0.0;
	elseif (x <= 4e+281)
		tmp = Float64(Float64(x * Float64(2.0 + Float64(x * Float64(x - 2.0)))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 1.6)
		tmp = 1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666)))));
	elseif (x <= 2e+102)
		tmp = 0.0;
	elseif (x <= 4e+281)
		tmp = (x * (2.0 + (x * (x - 2.0)))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 1.6], N[(1.0 + N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 2e+102], 0.0, If[LessEqual[x, 4e+281], N[(N[(x * N[(2.0 + N[(x * N[(x - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.6:\\
\;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+102}:\\
\;\;\;\;0\\

\mathbf{elif}\;x \leq 4 \cdot 10^{+281}:\\
\;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 1.6000000000000001

    1. Initial program 64.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified43.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 77.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-177.5%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified77.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 73.3%

      \[\leadsto \color{blue}{1 + x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)} \]

    if 1.6000000000000001 < x < 1.99999999999999995e102 or 4.0000000000000001e281 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified61.7%

      \[\leadsto \frac{\color{blue}{0}}{2} \]

    if 1.99999999999999995e102 < x < 4.0000000000000001e281

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 37.8%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]
    5. Simplified37.8%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - -1 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
    6. Taylor expanded in x around inf 37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(e^{-x} - -1 \cdot e^{-x}\right)}}{2} \]
    7. Step-by-step derivation
      1. cancel-sign-sub-inv37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(e^{-x} + \left(--1\right) \cdot e^{-x}\right)}}{2} \]
      2. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{\color{blue}{-1 \cdot x}} + \left(--1\right) \cdot e^{-x}\right)}{2} \]
      3. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + \color{blue}{1} \cdot e^{-x}\right)}{2} \]
      4. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(e^{-1 \cdot x} + 1 \cdot e^{\color{blue}{-1 \cdot x}}\right)}{2} \]
      5. distribute-rgt1-in37.8%

        \[\leadsto \frac{x \cdot \color{blue}{\left(\left(1 + 1\right) \cdot e^{-1 \cdot x}\right)}}{2} \]
      6. metadata-eval37.8%

        \[\leadsto \frac{x \cdot \left(\color{blue}{2} \cdot e^{-1 \cdot x}\right)}{2} \]
      7. neg-mul-137.8%

        \[\leadsto \frac{x \cdot \left(2 \cdot e^{\color{blue}{-x}}\right)}{2} \]
    8. Simplified37.8%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 \cdot e^{-x}\right)}}{2} \]
    9. Taylor expanded in x around 0 63.7%

      \[\leadsto \frac{\color{blue}{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification70.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1.6:\\ \;\;\;\;1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+102}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 4 \cdot 10^{+281}:\\ \;\;\;\;\frac{x \cdot \left(2 + x \cdot \left(x - 2\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 15: 63.4% accurate, 9.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -0.037:\\ \;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 4.6 \cdot 10^{+130}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 1.45 \cdot 10^{+157}:\\ \;\;\;\;\frac{eps\_m \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -0.037)
   (* (* eps_m x) -0.5)
   (if (<= x 4500000000.0)
     1.0
     (if (<= x 4.6e+130) 0.0 (if (<= x 1.45e+157) (/ (* eps_m x) 2.0) 0.0)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else if (x <= 4.6e+130) {
		tmp = 0.0;
	} else if (x <= 1.45e+157) {
		tmp = (eps_m * x) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-0.037d0)) then
        tmp = (eps_m * x) * (-0.5d0)
    else if (x <= 4500000000.0d0) then
        tmp = 1.0d0
    else if (x <= 4.6d+130) then
        tmp = 0.0d0
    else if (x <= 1.45d+157) then
        tmp = (eps_m * x) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else if (x <= 4.6e+130) {
		tmp = 0.0;
	} else if (x <= 1.45e+157) {
		tmp = (eps_m * x) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -0.037:
		tmp = (eps_m * x) * -0.5
	elif x <= 4500000000.0:
		tmp = 1.0
	elif x <= 4.6e+130:
		tmp = 0.0
	elif x <= 1.45e+157:
		tmp = (eps_m * x) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -0.037)
		tmp = Float64(Float64(eps_m * x) * -0.5);
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	elseif (x <= 4.6e+130)
		tmp = 0.0;
	elseif (x <= 1.45e+157)
		tmp = Float64(Float64(eps_m * x) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -0.037)
		tmp = (eps_m * x) * -0.5;
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	elseif (x <= 4.6e+130)
		tmp = 0.0;
	elseif (x <= 1.45e+157)
		tmp = (eps_m * x) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -0.037], N[(N[(eps$95$m * x), $MachinePrecision] * -0.5), $MachinePrecision], If[LessEqual[x, 4500000000.0], 1.0, If[LessEqual[x, 4.6e+130], 0.0, If[LessEqual[x, 1.45e+157], N[(N[(eps$95$m * x), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.037:\\
\;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\

\mathbf{elif}\;x \leq 4500000000:\\
\;\;\;\;1\\

\mathbf{elif}\;x \leq 4.6 \cdot 10^{+130}:\\
\;\;\;\;0\\

\mathbf{elif}\;x \leq 1.45 \cdot 10^{+157}:\\
\;\;\;\;\frac{eps\_m \cdot x}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -0.0369999999999999982

    1. Initial program 92.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified92.7%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 48.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around inf 26.3%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*26.3%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-126.3%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    7. Simplified26.3%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg26.3%

        \[\leadsto \color{blue}{\frac{-\left(-\varepsilon\right) \cdot x}{-2}} \]
      2. div-inv26.3%

        \[\leadsto \color{blue}{\left(-\left(-\varepsilon\right) \cdot x\right) \cdot \frac{1}{-2}} \]
      3. *-commutative26.3%

        \[\leadsto \left(-\color{blue}{x \cdot \left(-\varepsilon\right)}\right) \cdot \frac{1}{-2} \]
      4. distribute-lft-neg-in26.3%

        \[\leadsto \color{blue}{\left(\left(-x\right) \cdot \left(-\varepsilon\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.3%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod0.3%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}\right) \cdot \frac{1}{-2} \]
      7. sqr-neg0.3%

        \[\leadsto \left(\left(-x\right) \cdot \sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod0.0%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt30.4%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\varepsilon}\right) \cdot \frac{1}{-2} \]
      10. add-sqr-sqrt30.4%

        \[\leadsto \left(\color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      11. sqrt-unprod37.2%

        \[\leadsto \left(\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      12. sqr-neg37.2%

        \[\leadsto \left(\sqrt{\color{blue}{x \cdot x}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      13. sqrt-unprod0.0%

        \[\leadsto \left(\color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      14. add-sqr-sqrt26.3%

        \[\leadsto \left(\color{blue}{x} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      15. metadata-eval26.3%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \frac{1}{\color{blue}{-2}} \]
      16. metadata-eval26.3%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr26.3%

      \[\leadsto \color{blue}{\left(x \cdot \varepsilon\right) \cdot -0.5} \]

    if -0.0369999999999999982 < x < 4.5e9

    1. Initial program 57.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified57.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 72.9%

      \[\leadsto \frac{\color{blue}{2}}{2} \]

    if 4.5e9 < x < 4.60000000000000042e130 or 1.44999999999999994e157 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified55.3%

      \[\leadsto \frac{\color{blue}{0}}{2} \]

    if 4.60000000000000042e130 < x < 1.44999999999999994e157

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 67.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around inf 51.0%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*51.0%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-151.0%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    7. Simplified51.0%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
    8. Step-by-step derivation
      1. add-sqr-sqrt51.0%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)}}{2} \]
      2. sqrt-unprod51.0%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\sqrt{x \cdot x}}}{2} \]
      3. sqr-neg51.0%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \sqrt{\color{blue}{\left(-x\right) \cdot \left(-x\right)}}}{2} \]
      4. sqrt-unprod0.0%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)}}{2} \]
      5. add-sqr-sqrt17.4%

        \[\leadsto \frac{\left(-\varepsilon\right) \cdot \color{blue}{\left(-x\right)}}{2} \]
      6. distribute-rgt-neg-in17.4%

        \[\leadsto \frac{\color{blue}{-\left(-\varepsilon\right) \cdot x}}{2} \]
      7. pow117.4%

        \[\leadsto \frac{\color{blue}{{\left(-\left(-\varepsilon\right) \cdot x\right)}^{1}}}{2} \]
      8. *-commutative17.4%

        \[\leadsto \frac{{\left(-\color{blue}{x \cdot \left(-\varepsilon\right)}\right)}^{1}}{2} \]
      9. distribute-lft-neg-in17.4%

        \[\leadsto \frac{{\color{blue}{\left(\left(-x\right) \cdot \left(-\varepsilon\right)\right)}}^{1}}{2} \]
      10. add-sqr-sqrt0.0%

        \[\leadsto \frac{{\left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)}\right)}^{1}}{2} \]
      11. sqrt-unprod0.1%

        \[\leadsto \frac{{\left(\left(-x\right) \cdot \color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}\right)}^{1}}{2} \]
      12. sqr-neg0.1%

        \[\leadsto \frac{{\left(\left(-x\right) \cdot \sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}\right)}^{1}}{2} \]
      13. sqrt-unprod0.1%

        \[\leadsto \frac{{\left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)}\right)}^{1}}{2} \]
      14. add-sqr-sqrt51.0%

        \[\leadsto \frac{{\left(\left(-x\right) \cdot \color{blue}{\varepsilon}\right)}^{1}}{2} \]
      15. add-sqr-sqrt0.0%

        \[\leadsto \frac{{\left(\color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} \cdot \varepsilon\right)}^{1}}{2} \]
      16. sqrt-unprod17.4%

        \[\leadsto \frac{{\left(\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \cdot \varepsilon\right)}^{1}}{2} \]
      17. sqr-neg17.4%

        \[\leadsto \frac{{\left(\sqrt{\color{blue}{x \cdot x}} \cdot \varepsilon\right)}^{1}}{2} \]
      18. sqrt-unprod17.4%

        \[\leadsto \frac{{\left(\color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} \cdot \varepsilon\right)}^{1}}{2} \]
      19. add-sqr-sqrt17.4%

        \[\leadsto \frac{{\left(\color{blue}{x} \cdot \varepsilon\right)}^{1}}{2} \]
    9. Applied egg-rr17.4%

      \[\leadsto \frac{\color{blue}{{\left(x \cdot \varepsilon\right)}^{1}}}{2} \]
    10. Step-by-step derivation
      1. unpow117.4%

        \[\leadsto \frac{\color{blue}{x \cdot \varepsilon}}{2} \]
    11. Simplified17.4%

      \[\leadsto \frac{\color{blue}{x \cdot \varepsilon}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification59.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.037:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 4.6 \cdot 10^{+130}:\\ \;\;\;\;0\\ \mathbf{elif}\;x \leq 1.45 \cdot 10^{+157}:\\ \;\;\;\;\frac{\varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 16: 65.6% accurate, 14.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{elif}\;x \leq -0.037:\\ \;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -3.1e+153)
   (/ (+ 2.0 (* x (- x 2.0))) 2.0)
   (if (<= x -0.037) (* (* eps_m x) -0.5) (if (<= x 4500000000.0) 1.0 0.0))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -3.1e+153) {
		tmp = (2.0 + (x * (x - 2.0))) / 2.0;
	} else if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-3.1d+153)) then
        tmp = (2.0d0 + (x * (x - 2.0d0))) / 2.0d0
    else if (x <= (-0.037d0)) then
        tmp = (eps_m * x) * (-0.5d0)
    else if (x <= 4500000000.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -3.1e+153) {
		tmp = (2.0 + (x * (x - 2.0))) / 2.0;
	} else if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -3.1e+153:
		tmp = (2.0 + (x * (x - 2.0))) / 2.0
	elif x <= -0.037:
		tmp = (eps_m * x) * -0.5
	elif x <= 4500000000.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -3.1e+153)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(x - 2.0))) / 2.0);
	elseif (x <= -0.037)
		tmp = Float64(Float64(eps_m * x) * -0.5);
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -3.1e+153)
		tmp = (2.0 + (x * (x - 2.0))) / 2.0;
	elseif (x <= -0.037)
		tmp = (eps_m * x) * -0.5;
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -3.1e+153], N[(N[(2.0 + N[(x * N[(x - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, -0.037], N[(N[(eps$95$m * x), $MachinePrecision] * -0.5), $MachinePrecision], If[LessEqual[x, 4500000000.0], 1.0, 0.0]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\
\;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\

\mathbf{elif}\;x \leq -0.037:\\
\;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\

\mathbf{elif}\;x \leq 4500000000:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -3.1e153

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-1100.0%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 93.3%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(x - 2\right)}}{2} \]

    if -3.1e153 < x < -0.0369999999999999982

    1. Initial program 81.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified81.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 58.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around inf 20.7%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*20.7%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-120.7%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    7. Simplified20.7%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg20.7%

        \[\leadsto \color{blue}{\frac{-\left(-\varepsilon\right) \cdot x}{-2}} \]
      2. div-inv20.7%

        \[\leadsto \color{blue}{\left(-\left(-\varepsilon\right) \cdot x\right) \cdot \frac{1}{-2}} \]
      3. *-commutative20.7%

        \[\leadsto \left(-\color{blue}{x \cdot \left(-\varepsilon\right)}\right) \cdot \frac{1}{-2} \]
      4. distribute-lft-neg-in20.7%

        \[\leadsto \color{blue}{\left(\left(-x\right) \cdot \left(-\varepsilon\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.7%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod0.7%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}\right) \cdot \frac{1}{-2} \]
      7. sqr-neg0.7%

        \[\leadsto \left(\left(-x\right) \cdot \sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod0.1%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt20.4%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\varepsilon}\right) \cdot \frac{1}{-2} \]
      10. add-sqr-sqrt20.4%

        \[\leadsto \left(\color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      11. sqrt-unprod20.4%

        \[\leadsto \left(\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      12. sqr-neg20.4%

        \[\leadsto \left(\sqrt{\color{blue}{x \cdot x}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      13. sqrt-unprod0.0%

        \[\leadsto \left(\color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      14. add-sqr-sqrt20.7%

        \[\leadsto \left(\color{blue}{x} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      15. metadata-eval20.7%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \frac{1}{\color{blue}{-2}} \]
      16. metadata-eval20.7%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr20.7%

      \[\leadsto \color{blue}{\left(x \cdot \varepsilon\right) \cdot -0.5} \]

    if -0.0369999999999999982 < x < 4.5e9

    1. Initial program 57.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified57.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 72.9%

      \[\leadsto \frac{\color{blue}{2}}{2} \]

    if 4.5e9 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified50.8%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification65.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3.1 \cdot 10^{+153}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{elif}\;x \leq -0.037:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 17: 64.1% accurate, 20.6× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -0.037:\\ \;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -0.037) (* (* eps_m x) -0.5) (if (<= x 4500000000.0) 1.0 0.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-0.037d0)) then
        tmp = (eps_m * x) * (-0.5d0)
    else if (x <= 4500000000.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -0.037) {
		tmp = (eps_m * x) * -0.5;
	} else if (x <= 4500000000.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -0.037:
		tmp = (eps_m * x) * -0.5
	elif x <= 4500000000.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -0.037)
		tmp = Float64(Float64(eps_m * x) * -0.5);
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -0.037)
		tmp = (eps_m * x) * -0.5;
	elseif (x <= 4500000000.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -0.037], N[(N[(eps$95$m * x), $MachinePrecision] * -0.5), $MachinePrecision], If[LessEqual[x, 4500000000.0], 1.0, 0.0]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.037:\\
\;\;\;\;\left(eps\_m \cdot x\right) \cdot -0.5\\

\mathbf{elif}\;x \leq 4500000000:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.0369999999999999982

    1. Initial program 92.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified92.7%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 48.1%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\left(-1 \cdot \left(x \cdot \left(\left(1 + \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - 1\right)\right)\right) + \frac{1}{\varepsilon}\right) - 1\right)}}{2} \]
    5. Taylor expanded in eps around inf 26.3%

      \[\leadsto \frac{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*26.3%

        \[\leadsto \frac{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-126.3%

        \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    7. Simplified26.3%

      \[\leadsto \frac{\color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]
    8. Step-by-step derivation
      1. frac-2neg26.3%

        \[\leadsto \color{blue}{\frac{-\left(-\varepsilon\right) \cdot x}{-2}} \]
      2. div-inv26.3%

        \[\leadsto \color{blue}{\left(-\left(-\varepsilon\right) \cdot x\right) \cdot \frac{1}{-2}} \]
      3. *-commutative26.3%

        \[\leadsto \left(-\color{blue}{x \cdot \left(-\varepsilon\right)}\right) \cdot \frac{1}{-2} \]
      4. distribute-lft-neg-in26.3%

        \[\leadsto \color{blue}{\left(\left(-x\right) \cdot \left(-\varepsilon\right)\right)} \cdot \frac{1}{-2} \]
      5. add-sqr-sqrt0.3%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{-\varepsilon} \cdot \sqrt{-\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      6. sqrt-unprod0.3%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\sqrt{\left(-\varepsilon\right) \cdot \left(-\varepsilon\right)}}\right) \cdot \frac{1}{-2} \]
      7. sqr-neg0.3%

        \[\leadsto \left(\left(-x\right) \cdot \sqrt{\color{blue}{\varepsilon \cdot \varepsilon}}\right) \cdot \frac{1}{-2} \]
      8. sqrt-unprod0.0%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\left(\sqrt{\varepsilon} \cdot \sqrt{\varepsilon}\right)}\right) \cdot \frac{1}{-2} \]
      9. add-sqr-sqrt30.4%

        \[\leadsto \left(\left(-x\right) \cdot \color{blue}{\varepsilon}\right) \cdot \frac{1}{-2} \]
      10. add-sqr-sqrt30.4%

        \[\leadsto \left(\color{blue}{\left(\sqrt{-x} \cdot \sqrt{-x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      11. sqrt-unprod37.2%

        \[\leadsto \left(\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      12. sqr-neg37.2%

        \[\leadsto \left(\sqrt{\color{blue}{x \cdot x}} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      13. sqrt-unprod0.0%

        \[\leadsto \left(\color{blue}{\left(\sqrt{x} \cdot \sqrt{x}\right)} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      14. add-sqr-sqrt26.3%

        \[\leadsto \left(\color{blue}{x} \cdot \varepsilon\right) \cdot \frac{1}{-2} \]
      15. metadata-eval26.3%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \frac{1}{\color{blue}{-2}} \]
      16. metadata-eval26.3%

        \[\leadsto \left(x \cdot \varepsilon\right) \cdot \color{blue}{-0.5} \]
    9. Applied egg-rr26.3%

      \[\leadsto \color{blue}{\left(x \cdot \varepsilon\right) \cdot -0.5} \]

    if -0.0369999999999999982 < x < 4.5e9

    1. Initial program 57.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified57.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 72.9%

      \[\leadsto \frac{\color{blue}{2}}{2} \]

    if 4.5e9 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified50.8%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification59.2%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.037:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 4500000000:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 18: 57.9% accurate, 28.3× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 1:\\ \;\;\;\;1 - x\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (if (<= x 1.0) (- 1.0 x) 0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.0) {
		tmp = 1.0 - x;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 1.0d0) then
        tmp = 1.0d0 - x
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 1.0) {
		tmp = 1.0 - x;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 1.0:
		tmp = 1.0 - x
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 1.0)
		tmp = Float64(1.0 - x);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 1.0)
		tmp = 1.0 - x;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 1.0], N[(1.0 - x), $MachinePrecision], 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 1:\\
\;\;\;\;1 - x\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 1

    1. Initial program 64.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified43.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 77.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. neg-mul-177.5%

        \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
    7. Simplified77.5%

      \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
    8. Taylor expanded in x around 0 58.4%

      \[\leadsto \color{blue}{1 + -1 \cdot x} \]
    9. Step-by-step derivation
      1. neg-mul-158.4%

        \[\leadsto 1 + \color{blue}{\left(-x\right)} \]
      2. unsub-neg58.4%

        \[\leadsto \color{blue}{1 - x} \]
    10. Simplified58.4%

      \[\leadsto \color{blue}{1 - x} \]

    if 1 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
    5. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    6. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}} + e^{x \cdot \left(\varepsilon - 1\right)}}{2} \]
    8. Step-by-step derivation
      1. flip-+0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
      2. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{e^{x \cdot \left(-1 - \varepsilon\right)} \cdot e^{x \cdot \left(-1 - \varepsilon\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}} - \frac{e^{x \cdot \left(\varepsilon - 1\right)} \cdot e^{x \cdot \left(\varepsilon - 1\right)}}{e^{x \cdot \left(-1 - \varepsilon\right)} - e^{x \cdot \left(\varepsilon - 1\right)}}}}{2} \]
    9. Applied egg-rr0.0%

      \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}} - \frac{{\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
    10. Step-by-step derivation
      1. div-sub0.0%

        \[\leadsto \frac{\color{blue}{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}}{2} \]
      2. +-commutative0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{{\left(e^{x}\right)}^{\color{blue}{\left(\varepsilon + -1\right)}} - {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}}}{2} \]
      3. +-inverses0.0%

        \[\leadsto \frac{\frac{{\left({\left(e^{x}\right)}^{\left(-1 + \varepsilon\right)}\right)}^{2} - {\left(e^{x}\right)}^{\left(2 \cdot \left(\varepsilon + -1\right)\right)}}{\color{blue}{0}}}{2} \]
    11. Simplified49.5%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification55.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 1:\\ \;\;\;\;1 - x\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 19: 44.2% accurate, 75.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ 1 - x \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (- 1.0 x))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return 1.0 - x;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = 1.0d0 - x
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return 1.0 - x;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return 1.0 - x
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(1.0 - x)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = 1.0 - x;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := N[(1.0 - x), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
1 - x
\end{array}
Derivation
  1. Initial program 75.0%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified59.6%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around inf 98.3%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
  5. Taylor expanded in eps around 0 69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
  6. Step-by-step derivation
    1. neg-mul-169.4%

      \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
  7. Simplified69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
  8. Taylor expanded in x around 0 41.9%

    \[\leadsto \color{blue}{1 + -1 \cdot x} \]
  9. Step-by-step derivation
    1. neg-mul-141.9%

      \[\leadsto 1 + \color{blue}{\left(-x\right)} \]
    2. unsub-neg41.9%

      \[\leadsto \color{blue}{1 - x} \]
  10. Simplified41.9%

    \[\leadsto \color{blue}{1 - x} \]
  11. Add Preprocessing

Alternative 20: 44.2% accurate, 75.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ x + 1 \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (+ x 1.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return x + 1.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = x + 1.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return x + 1.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return x + 1.0
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(x + 1.0)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = x + 1.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := N[(x + 1.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
x + 1
\end{array}
Derivation
  1. Initial program 75.0%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified59.6%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around inf 98.3%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
  5. Taylor expanded in eps around 0 69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
  6. Step-by-step derivation
    1. neg-mul-169.4%

      \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
  7. Simplified69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
  8. Taylor expanded in x around 0 41.9%

    \[\leadsto \color{blue}{1 + -1 \cdot x} \]
  9. Step-by-step derivation
    1. neg-mul-141.9%

      \[\leadsto 1 + \color{blue}{\left(-x\right)} \]
    2. unsub-neg41.9%

      \[\leadsto \color{blue}{1 - x} \]
  10. Simplified41.9%

    \[\leadsto \color{blue}{1 - x} \]
  11. Step-by-step derivation
    1. sub-neg41.9%

      \[\leadsto \color{blue}{1 + \left(-x\right)} \]
    2. add-sqr-sqrt20.5%

      \[\leadsto 1 + \color{blue}{\sqrt{-x} \cdot \sqrt{-x}} \]
    3. sqrt-unprod56.4%

      \[\leadsto 1 + \color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}} \]
    4. sqr-neg56.4%

      \[\leadsto 1 + \sqrt{\color{blue}{x \cdot x}} \]
    5. sqrt-unprod22.1%

      \[\leadsto 1 + \color{blue}{\sqrt{x} \cdot \sqrt{x}} \]
    6. add-sqr-sqrt41.8%

      \[\leadsto 1 + \color{blue}{x} \]
  12. Applied egg-rr41.8%

    \[\leadsto \color{blue}{1 + x} \]
  13. Final simplification41.8%

    \[\leadsto x + 1 \]
  14. Add Preprocessing

Alternative 21: 2.9% accurate, 113.5× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ -x \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (- x))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return -x;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = -x
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return -x;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return -x
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(-x)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = -x;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := (-x)
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
-x
\end{array}
Derivation
  1. Initial program 75.0%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified59.6%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around inf 98.3%

    \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)} + e^{x \cdot \left(\varepsilon - 1\right)}}}{2} \]
  5. Taylor expanded in eps around 0 69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-1 \cdot x}}}{2} \]
  6. Step-by-step derivation
    1. neg-mul-169.4%

      \[\leadsto \frac{2 \cdot e^{\color{blue}{-x}}}{2} \]
  7. Simplified69.4%

    \[\leadsto \frac{\color{blue}{2 \cdot e^{-x}}}{2} \]
  8. Taylor expanded in x around 0 41.9%

    \[\leadsto \color{blue}{1 + -1 \cdot x} \]
  9. Step-by-step derivation
    1. neg-mul-141.9%

      \[\leadsto 1 + \color{blue}{\left(-x\right)} \]
    2. unsub-neg41.9%

      \[\leadsto \color{blue}{1 - x} \]
  10. Simplified41.9%

    \[\leadsto \color{blue}{1 - x} \]
  11. Taylor expanded in x around inf 3.1%

    \[\leadsto \color{blue}{-1 \cdot x} \]
  12. Step-by-step derivation
    1. neg-mul-13.1%

      \[\leadsto \color{blue}{-x} \]
  13. Simplified3.1%

    \[\leadsto \color{blue}{-x} \]
  14. Add Preprocessing

Reproduce

?
herbie shell --seed 2024110 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))