NMSE Section 6.1 mentioned, A

Percentage Accurate: 73.1% → 100.0%
Time: 12.6s
Alternatives: 14
Speedup: 2.0×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 14 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 73.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 100.0% accurate, 1.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 1:\\ \;\;\;\;\frac{x + 1}{e^{x}}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1}{e^{x + x \cdot eps\_m}} + e^{x \cdot eps\_m}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 1.0)
   (/ (+ x 1.0) (exp x))
   (/ (+ (/ 1.0 (exp (+ x (* x eps_m)))) (exp (* x eps_m))) 2.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1.0) {
		tmp = (x + 1.0) / exp(x);
	} else {
		tmp = ((1.0 / exp((x + (x * eps_m)))) + exp((x * eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 1.0d0) then
        tmp = (x + 1.0d0) / exp(x)
    else
        tmp = ((1.0d0 / exp((x + (x * eps_m)))) + exp((x * eps_m))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1.0) {
		tmp = (x + 1.0) / Math.exp(x);
	} else {
		tmp = ((1.0 / Math.exp((x + (x * eps_m)))) + Math.exp((x * eps_m))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 1.0:
		tmp = (x + 1.0) / math.exp(x)
	else:
		tmp = ((1.0 / math.exp((x + (x * eps_m)))) + math.exp((x * eps_m))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 1.0)
		tmp = Float64(Float64(x + 1.0) / exp(x));
	else
		tmp = Float64(Float64(Float64(1.0 / exp(Float64(x + Float64(x * eps_m)))) + exp(Float64(x * eps_m))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 1.0)
		tmp = (x + 1.0) / exp(x);
	else
		tmp = ((1.0 / exp((x + (x * eps_m)))) + exp((x * eps_m))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 1.0], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 / N[Exp[N[(x + N[(x * eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[Exp[N[(x * eps$95$m), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 1:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{1}{e^{x + x \cdot eps\_m}} + e^{x \cdot eps\_m}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 1

    1. Initial program 59.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 30.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg71.6%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses71.6%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified71.6%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in eps around 0 71.6%

      \[\leadsto \color{blue}{e^{-x} \cdot \left(1 + x\right)} \]
    8. Step-by-step derivation
      1. rec-exp71.6%

        \[\leadsto \color{blue}{\frac{1}{e^{x}}} \cdot \left(1 + x\right) \]
      2. +-commutative71.6%

        \[\leadsto \frac{1}{e^{x}} \cdot \color{blue}{\left(x + 1\right)} \]
      3. associate-*l/71.6%

        \[\leadsto \color{blue}{\frac{1 \cdot \left(x + 1\right)}{e^{x}}} \]
      4. *-lft-identity71.6%

        \[\leadsto \frac{\color{blue}{x + 1}}{e^{x}} \]
    9. Simplified71.6%

      \[\leadsto \color{blue}{\frac{x + 1}{e^{x}}} \]

    if 1 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified85.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification79.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 1:\\ \;\;\;\;\frac{x + 1}{e^{x}}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{1}{e^{x + x \cdot \varepsilon}} + e^{x \cdot \varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 98.9% accurate, 1.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + x \cdot eps\_m}}}{2} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (/ (+ (exp (* x (+ eps_m -1.0))) (/ 1.0 (exp (+ x (* x eps_m))))) 2.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (x * eps_m))))) / 2.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = (exp((x * (eps_m + (-1.0d0)))) + (1.0d0 / exp((x + (x * eps_m))))) / 2.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return (Math.exp((x * (eps_m + -1.0))) + (1.0 / Math.exp((x + (x * eps_m))))) / 2.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return (math.exp((x * (eps_m + -1.0))) + (1.0 / math.exp((x + (x * eps_m))))) / 2.0
eps_m = abs(eps)
function code(x, eps_m)
	return Float64(Float64(exp(Float64(x * Float64(eps_m + -1.0))) + Float64(1.0 / exp(Float64(x + Float64(x * eps_m))))) / 2.0)
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = (exp((x * (eps_m + -1.0))) + (1.0 / exp((x + (x * eps_m))))) / 2.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := N[(N[(N[Exp[N[(x * N[(eps$95$m + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[Exp[N[(x + N[(x * eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\frac{e^{x \cdot \left(eps\_m + -1\right)} + \frac{1}{e^{x + x \cdot eps\_m}}}{2}
\end{array}
Derivation
  1. Initial program 71.2%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified62.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around inf 99.4%

    \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
  5. Final simplification99.4%

    \[\leadsto \frac{e^{x \cdot \left(\varepsilon + -1\right)} + \frac{1}{e^{x + x \cdot \varepsilon}}}{2} \]
  6. Add Preprocessing

Alternative 3: 85.0% accurate, 1.8× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -1 \cdot 10^{-277}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 27000:\\ \;\;\;\;\frac{e^{x \cdot eps\_m} + \frac{1}{1 + x \cdot \left(eps\_m + 1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -1e-277)
   (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
   (if (<= x 27000.0)
     (/ (+ (exp (* x eps_m)) (/ 1.0 (+ 1.0 (* x (+ eps_m 1.0))))) 2.0)
     0.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -1e-277) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 27000.0) {
		tmp = (exp((x * eps_m)) + (1.0 / (1.0 + (x * (eps_m + 1.0))))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-1d-277)) then
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    else if (x <= 27000.0d0) then
        tmp = (exp((x * eps_m)) + (1.0d0 / (1.0d0 + (x * (eps_m + 1.0d0))))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -1e-277) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 27000.0) {
		tmp = (Math.exp((x * eps_m)) + (1.0 / (1.0 + (x * (eps_m + 1.0))))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -1e-277:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	elif x <= 27000.0:
		tmp = (math.exp((x * eps_m)) + (1.0 / (1.0 + (x * (eps_m + 1.0))))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -1e-277)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	elseif (x <= 27000.0)
		tmp = Float64(Float64(exp(Float64(x * eps_m)) + Float64(1.0 / Float64(1.0 + Float64(x * Float64(eps_m + 1.0))))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -1e-277)
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	elseif (x <= 27000.0)
		tmp = (exp((x * eps_m)) + (1.0 / (1.0 + (x * (eps_m + 1.0))))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -1e-277], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 27000.0], N[(N[(N[Exp[N[(x * eps$95$m), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[(1.0 + N[(x * N[(eps$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1 \cdot 10^{-277}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 27000:\\
\;\;\;\;\frac{e^{x \cdot eps\_m} + \frac{1}{1 + x \cdot \left(eps\_m + 1\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -9.99999999999999969e-278

    1. Initial program 72.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified60.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 98.8%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 69.0%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in x around -inf 69.0%

      \[\leadsto \frac{\color{blue}{1 + \frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    7. Simplified69.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]

    if -9.99999999999999969e-278 < x < 27000

    1. Initial program 52.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified39.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.7%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 99.8%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative99.8%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified99.8%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 87.9%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{1 + x \cdot \left(1 + \varepsilon\right)}}}{2} \]

    if 27000 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 59.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub59.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg59.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp59.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses59.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval59.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified59.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification73.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1 \cdot 10^{-277}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 27000:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + \frac{1}{1 + x \cdot \left(\varepsilon + 1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 85.1% accurate, 1.9× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-276}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \mathbf{elif}\;x \leq 40000000:\\ \;\;\;\;\frac{1 + e^{x \cdot eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -2e-276)
   (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)
   (if (<= x 40000000.0) (/ (+ 1.0 (exp (* x eps_m))) 2.0) 0.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -2e-276) {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 40000000.0) {
		tmp = (1.0 + exp((x * eps_m))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-2d-276)) then
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    else if (x <= 40000000.0d0) then
        tmp = (1.0d0 + exp((x * eps_m))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -2e-276) {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	} else if (x <= 40000000.0) {
		tmp = (1.0 + Math.exp((x * eps_m))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -2e-276:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	elif x <= 40000000.0:
		tmp = (1.0 + math.exp((x * eps_m))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -2e-276)
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	elseif (x <= 40000000.0)
		tmp = Float64(Float64(1.0 + exp(Float64(x * eps_m))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -2e-276)
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	elseif (x <= 40000000.0)
		tmp = (1.0 + exp((x * eps_m))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -2e-276], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 40000000.0], N[(N[(1.0 + N[Exp[N[(x * eps$95$m), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2 \cdot 10^{-276}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\

\mathbf{elif}\;x \leq 40000000:\\
\;\;\;\;\frac{1 + e^{x \cdot eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2e-276

    1. Initial program 72.1%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified60.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 98.8%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 69.0%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in x around -inf 69.0%

      \[\leadsto \frac{\color{blue}{1 + \frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    7. Simplified69.0%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]

    if -2e-276 < x < 4e7

    1. Initial program 52.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified39.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.7%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 99.8%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative99.8%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified99.8%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 87.3%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{1}}}{2} \]

    if 4e7 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 59.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub59.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg59.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp59.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses59.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval59.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified59.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification73.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2 \cdot 10^{-276}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{elif}\;x \leq 40000000:\\ \;\;\;\;\frac{1 + e^{x \cdot \varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 74.1% accurate, 2.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 1:\\ \;\;\;\;\frac{x + 1}{e^{x}}\\ \mathbf{elif}\;eps\_m \leq 2.35 \cdot 10^{+204}:\\ \;\;\;\;0.5 + \frac{0.5}{e^{x}}\\ \mathbf{elif}\;eps\_m \leq 2.2 \cdot 10^{+263}:\\ \;\;\;\;\frac{2 + x \cdot \left(\frac{1 + eps\_m \cdot \left(eps\_m + 1\right)}{eps\_m} - eps\_m\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 1.0)
   (/ (+ x 1.0) (exp x))
   (if (<= eps_m 2.35e+204)
     (+ 0.5 (/ 0.5 (exp x)))
     (if (<= eps_m 2.2e+263)
       (/
        (+ 2.0 (* x (- (/ (+ 1.0 (* eps_m (+ eps_m 1.0))) eps_m) eps_m)))
        2.0)
       (/
        (+
         2.0
         (*
          x
          (/
           (* (+ eps_m 1.0) (+ 1.0 (+ (/ 1.0 eps_m) (- (/ 1.0 eps_m) eps_m))))
           (+ eps_m 1.0))))
        2.0)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1.0) {
		tmp = (x + 1.0) / exp(x);
	} else if (eps_m <= 2.35e+204) {
		tmp = 0.5 + (0.5 / exp(x));
	} else if (eps_m <= 2.2e+263) {
		tmp = (2.0 + (x * (((1.0 + (eps_m * (eps_m + 1.0))) / eps_m) - eps_m))) / 2.0;
	} else {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 1.0d0) then
        tmp = (x + 1.0d0) / exp(x)
    else if (eps_m <= 2.35d+204) then
        tmp = 0.5d0 + (0.5d0 / exp(x))
    else if (eps_m <= 2.2d+263) then
        tmp = (2.0d0 + (x * (((1.0d0 + (eps_m * (eps_m + 1.0d0))) / eps_m) - eps_m))) / 2.0d0
    else
        tmp = (2.0d0 + (x * (((eps_m + 1.0d0) * (1.0d0 + ((1.0d0 / eps_m) + ((1.0d0 / eps_m) - eps_m)))) / (eps_m + 1.0d0)))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 1.0) {
		tmp = (x + 1.0) / Math.exp(x);
	} else if (eps_m <= 2.35e+204) {
		tmp = 0.5 + (0.5 / Math.exp(x));
	} else if (eps_m <= 2.2e+263) {
		tmp = (2.0 + (x * (((1.0 + (eps_m * (eps_m + 1.0))) / eps_m) - eps_m))) / 2.0;
	} else {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 1.0:
		tmp = (x + 1.0) / math.exp(x)
	elif eps_m <= 2.35e+204:
		tmp = 0.5 + (0.5 / math.exp(x))
	elif eps_m <= 2.2e+263:
		tmp = (2.0 + (x * (((1.0 + (eps_m * (eps_m + 1.0))) / eps_m) - eps_m))) / 2.0
	else:
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 1.0)
		tmp = Float64(Float64(x + 1.0) / exp(x));
	elseif (eps_m <= 2.35e+204)
		tmp = Float64(0.5 + Float64(0.5 / exp(x)));
	elseif (eps_m <= 2.2e+263)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(1.0 + Float64(eps_m * Float64(eps_m + 1.0))) / eps_m) - eps_m))) / 2.0);
	else
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(eps_m + 1.0) * Float64(1.0 + Float64(Float64(1.0 / eps_m) + Float64(Float64(1.0 / eps_m) - eps_m)))) / Float64(eps_m + 1.0)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 1.0)
		tmp = (x + 1.0) / exp(x);
	elseif (eps_m <= 2.35e+204)
		tmp = 0.5 + (0.5 / exp(x));
	elseif (eps_m <= 2.2e+263)
		tmp = (2.0 + (x * (((1.0 + (eps_m * (eps_m + 1.0))) / eps_m) - eps_m))) / 2.0;
	else
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 1.0], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 2.35e+204], N[(0.5 + N[(0.5 / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[eps$95$m, 2.2e+263], N[(N[(2.0 + N[(x * N[(N[(N[(1.0 + N[(eps$95$m * N[(eps$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(2.0 + N[(x * N[(N[(N[(eps$95$m + 1.0), $MachinePrecision] * N[(1.0 + N[(N[(1.0 / eps$95$m), $MachinePrecision] + N[(N[(1.0 / eps$95$m), $MachinePrecision] - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(eps$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 1:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\

\mathbf{elif}\;eps\_m \leq 2.35 \cdot 10^{+204}:\\
\;\;\;\;0.5 + \frac{0.5}{e^{x}}\\

\mathbf{elif}\;eps\_m \leq 2.2 \cdot 10^{+263}:\\
\;\;\;\;\frac{2 + x \cdot \left(\frac{1 + eps\_m \cdot \left(eps\_m + 1\right)}{eps\_m} - eps\_m\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if eps < 1

    1. Initial program 59.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 30.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg71.6%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses71.6%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified71.6%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in eps around 0 71.6%

      \[\leadsto \color{blue}{e^{-x} \cdot \left(1 + x\right)} \]
    8. Step-by-step derivation
      1. rec-exp71.6%

        \[\leadsto \color{blue}{\frac{1}{e^{x}}} \cdot \left(1 + x\right) \]
      2. +-commutative71.6%

        \[\leadsto \frac{1}{e^{x}} \cdot \color{blue}{\left(x + 1\right)} \]
      3. associate-*l/71.6%

        \[\leadsto \color{blue}{\frac{1 \cdot \left(x + 1\right)}{e^{x}}} \]
      4. *-lft-identity71.6%

        \[\leadsto \frac{\color{blue}{x + 1}}{e^{x}} \]
    9. Simplified71.6%

      \[\leadsto \color{blue}{\frac{x + 1}{e^{x}}} \]

    if 1 < eps < 2.3500000000000001e204

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified89.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 77.4%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in eps around 0 68.6%

      \[\leadsto \color{blue}{0.5 \cdot \left(1 + \frac{1}{e^{x}}\right)} \]
    7. Step-by-step derivation
      1. distribute-lft-in68.6%

        \[\leadsto \color{blue}{0.5 \cdot 1 + 0.5 \cdot \frac{1}{e^{x}}} \]
      2. metadata-eval68.6%

        \[\leadsto \color{blue}{0.5} + 0.5 \cdot \frac{1}{e^{x}} \]
      3. associate-*r/68.6%

        \[\leadsto 0.5 + \color{blue}{\frac{0.5 \cdot 1}{e^{x}}} \]
      4. metadata-eval68.6%

        \[\leadsto 0.5 + \frac{\color{blue}{0.5}}{e^{x}} \]
    8. Simplified68.6%

      \[\leadsto \color{blue}{0.5 + \frac{0.5}{e^{x}}} \]

    if 2.3500000000000001e204 < eps < 2.2e263

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified70.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 10.6%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around inf 10.6%

      \[\leadsto \frac{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \color{blue}{\varepsilon} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in eps around 0 69.5%

      \[\leadsto \frac{2 + x \cdot \left(\color{blue}{\frac{1 + \varepsilon \cdot \left(1 + \varepsilon\right)}{\varepsilon}} - \varepsilon\right)}{2} \]

    if 2.2e263 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified78.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 7.6%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 42.0%

      \[\leadsto \frac{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \color{blue}{-1} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Step-by-step derivation
      1. associate--l+42.0%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{2} \]
      2. flip-+62.6%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
      3. *-commutative62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. *-commutative62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. swap-sqr62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. metadata-eval62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{1} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. *-un-lft-identity62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. pow262.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{{\left(1 + \frac{1}{\varepsilon}\right)}^{2}} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
    7. Applied egg-rr62.6%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{{\left(1 + \frac{1}{\varepsilon}\right)}^{2} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
    8. Step-by-step derivation
      1. unpow262.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      2. difference-of-squares62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      3. associate-+l+62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. associate--l+62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. associate--r-62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. +-inverses62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \left(\color{blue}{0} + \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. associate-+l+62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(\left(1 + 0\right) + \varepsilon\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. metadata-eval62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(\color{blue}{1} + \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      9. associate--l+62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}}{2} \]
      10. associate--r-62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}}}{2} \]
      11. +-inverses62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \left(\color{blue}{0} + \varepsilon\right)}}{2} \]
      12. associate-+l+62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{\left(1 + 0\right) + \varepsilon}}}{2} \]
      13. metadata-eval62.6%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1} + \varepsilon}}{2} \]
    9. Simplified62.6%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \varepsilon}}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification70.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 1:\\ \;\;\;\;\frac{x + 1}{e^{x}}\\ \mathbf{elif}\;\varepsilon \leq 2.35 \cdot 10^{+204}:\\ \;\;\;\;0.5 + \frac{0.5}{e^{x}}\\ \mathbf{elif}\;\varepsilon \leq 2.2 \cdot 10^{+263}:\\ \;\;\;\;\frac{2 + x \cdot \left(\frac{1 + \varepsilon \cdot \left(\varepsilon + 1\right)}{\varepsilon} - \varepsilon\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(\varepsilon + 1\right) \cdot \left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}{\varepsilon + 1}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 79.3% accurate, 2.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;eps\_m \leq 2.15:\\ \;\;\;\;\frac{x + 1}{e^{x}}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= eps_m 2.15)
   (/ (+ x 1.0) (exp x))
   (/ (+ 1.0 (exp (* x (- -1.0 eps_m)))) 2.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.15) {
		tmp = (x + 1.0) / exp(x);
	} else {
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (eps_m <= 2.15d0) then
        tmp = (x + 1.0d0) / exp(x)
    else
        tmp = (1.0d0 + exp((x * ((-1.0d0) - eps_m)))) / 2.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (eps_m <= 2.15) {
		tmp = (x + 1.0) / Math.exp(x);
	} else {
		tmp = (1.0 + Math.exp((x * (-1.0 - eps_m)))) / 2.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if eps_m <= 2.15:
		tmp = (x + 1.0) / math.exp(x)
	else:
		tmp = (1.0 + math.exp((x * (-1.0 - eps_m)))) / 2.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (eps_m <= 2.15)
		tmp = Float64(Float64(x + 1.0) / exp(x));
	else
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps_m)))) / 2.0);
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (eps_m <= 2.15)
		tmp = (x + 1.0) / exp(x);
	else
		tmp = (1.0 + exp((x * (-1.0 - eps_m)))) / 2.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[eps$95$m, 2.15], N[(N[(x + 1.0), $MachinePrecision] / N[Exp[x], $MachinePrecision]), $MachinePrecision], N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps$95$m), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;eps\_m \leq 2.15:\\
\;\;\;\;\frac{x + 1}{e^{x}}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(-1 - eps\_m\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 2.14999999999999991

    1. Initial program 59.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 30.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg71.6%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg71.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses71.6%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg71.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified71.6%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in eps around 0 71.6%

      \[\leadsto \color{blue}{e^{-x} \cdot \left(1 + x\right)} \]
    8. Step-by-step derivation
      1. rec-exp71.6%

        \[\leadsto \color{blue}{\frac{1}{e^{x}}} \cdot \left(1 + x\right) \]
      2. +-commutative71.6%

        \[\leadsto \frac{1}{e^{x}} \cdot \color{blue}{\left(x + 1\right)} \]
      3. associate-*l/71.6%

        \[\leadsto \color{blue}{\frac{1 \cdot \left(x + 1\right)}{e^{x}}} \]
      4. *-lft-identity71.6%

        \[\leadsto \frac{\color{blue}{x + 1}}{e^{x}} \]
    9. Simplified71.6%

      \[\leadsto \color{blue}{\frac{x + 1}{e^{x}}} \]

    if 2.14999999999999991 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified85.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 68.5%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in x around -inf 68.5%

      \[\leadsto \frac{\color{blue}{1 + \frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    7. Simplified68.5%

      \[\leadsto \frac{\color{blue}{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 7: 66.8% accurate, 2.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -6:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\ \mathbf{elif}\;x \leq 1.35:\\ \;\;\;\;\frac{\frac{eps\_m \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{e^{x}}\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -6.0)
   (/
    (+
     2.0
     (*
      x
      (/
       (* (+ eps_m 1.0) (+ 1.0 (+ (/ 1.0 eps_m) (- (/ 1.0 eps_m) eps_m))))
       (+ eps_m 1.0))))
    2.0)
   (if (<= x 1.35)
     (/
      (/
       (*
        eps_m
        (*
         2.0
         (*
          (+ x 1.0)
          (+ 1.0 (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666)))))))))
       eps_m)
      2.0)
     (/ x (exp x)))))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -6.0) {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	} else if (x <= 1.35) {
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps_m) / 2.0;
	} else {
		tmp = x / exp(x);
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-6.0d0)) then
        tmp = (2.0d0 + (x * (((eps_m + 1.0d0) * (1.0d0 + ((1.0d0 / eps_m) + ((1.0d0 / eps_m) - eps_m)))) / (eps_m + 1.0d0)))) / 2.0d0
    else if (x <= 1.35d0) then
        tmp = ((eps_m * (2.0d0 * ((x + 1.0d0) * (1.0d0 + (x * ((-1.0d0) + (x * (0.5d0 + (x * (-0.16666666666666666d0)))))))))) / eps_m) / 2.0d0
    else
        tmp = x / exp(x)
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -6.0) {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	} else if (x <= 1.35) {
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps_m) / 2.0;
	} else {
		tmp = x / Math.exp(x);
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -6.0:
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0
	elif x <= 1.35:
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps_m) / 2.0
	else:
		tmp = x / math.exp(x)
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -6.0)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(eps_m + 1.0) * Float64(1.0 + Float64(Float64(1.0 / eps_m) + Float64(Float64(1.0 / eps_m) - eps_m)))) / Float64(eps_m + 1.0)))) / 2.0);
	elseif (x <= 1.35)
		tmp = Float64(Float64(Float64(eps_m * Float64(2.0 * Float64(Float64(x + 1.0) * Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))))))) / eps_m) / 2.0);
	else
		tmp = Float64(x / exp(x));
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -6.0)
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	elseif (x <= 1.35)
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps_m) / 2.0;
	else
		tmp = x / exp(x);
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -6.0], N[(N[(2.0 + N[(x * N[(N[(N[(eps$95$m + 1.0), $MachinePrecision] * N[(1.0 + N[(N[(1.0 / eps$95$m), $MachinePrecision] + N[(N[(1.0 / eps$95$m), $MachinePrecision] - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(eps$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1.35], N[(N[(N[(eps$95$m * N[(2.0 * N[(N[(x + 1.0), $MachinePrecision] * N[(1.0 + N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], N[(x / N[Exp[x], $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -6:\\
\;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\

\mathbf{elif}\;x \leq 1.35:\\
\;\;\;\;\frac{\frac{eps\_m \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{x}{e^{x}}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -6

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 3.2%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 30.1%

      \[\leadsto \frac{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \color{blue}{-1} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Step-by-step derivation
      1. associate--l+30.1%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{2} \]
      2. flip-+35.3%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
      3. *-commutative35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. *-commutative35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. swap-sqr35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{1} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. *-un-lft-identity35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. pow235.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{{\left(1 + \frac{1}{\varepsilon}\right)}^{2}} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
    7. Applied egg-rr35.3%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{{\left(1 + \frac{1}{\varepsilon}\right)}^{2} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
    8. Step-by-step derivation
      1. unpow235.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      2. difference-of-squares35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      3. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. associate--l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. associate--r-35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. +-inverses35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \left(\color{blue}{0} + \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(\left(1 + 0\right) + \varepsilon\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(\color{blue}{1} + \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      9. associate--l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}}{2} \]
      10. associate--r-35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}}}{2} \]
      11. +-inverses35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \left(\color{blue}{0} + \varepsilon\right)}}{2} \]
      12. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{\left(1 + 0\right) + \varepsilon}}}{2} \]
      13. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1} + \varepsilon}}{2} \]
    9. Simplified35.3%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \varepsilon}}}{2} \]

    if -6 < x < 1.3500000000000001

    1. Initial program 54.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified33.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 27.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+74.0%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg74.0%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg74.0%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses74.0%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out74.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in74.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg74.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified74.0%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in x around 0 73.5%

      \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \color{blue}{\left(1 + x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)\right)}\right)\right)}{\varepsilon}}{2} \]

    if 1.3500000000000001 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 58.3%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+58.3%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg58.3%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg58.3%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses58.3%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out58.3%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in58.3%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg58.3%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified58.3%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in x around inf 58.3%

      \[\leadsto \color{blue}{x \cdot e^{-x}} \]
    8. Step-by-step derivation
      1. rec-exp58.3%

        \[\leadsto x \cdot \color{blue}{\frac{1}{e^{x}}} \]
      2. associate-*r/58.3%

        \[\leadsto \color{blue}{\frac{x \cdot 1}{e^{x}}} \]
      3. *-rgt-identity58.3%

        \[\leadsto \frac{\color{blue}{x}}{e^{x}} \]
    9. Simplified58.3%

      \[\leadsto \color{blue}{\frac{x}{e^{x}}} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification64.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -6:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(\varepsilon + 1\right) \cdot \left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}{\varepsilon + 1}}{2}\\ \mathbf{elif}\;x \leq 1.35:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{x}{e^{x}}\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 71.2% accurate, 2.1× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;0.5 + \frac{0.5}{e^{x}}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 520.0) (+ 0.5 (/ 0.5 (exp x))) 0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 520.0) {
		tmp = 0.5 + (0.5 / exp(x));
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 520.0d0) then
        tmp = 0.5d0 + (0.5d0 / exp(x))
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 520.0) {
		tmp = 0.5 + (0.5 / Math.exp(x));
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 520.0:
		tmp = 0.5 + (0.5 / math.exp(x))
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 520.0)
		tmp = Float64(0.5 + Float64(0.5 / exp(x)));
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 520.0)
		tmp = 0.5 + (0.5 / exp(x));
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 520.0], N[(0.5 + N[(0.5 / N[Exp[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 520:\\
\;\;\;\;0.5 + \frac{0.5}{e^{x}}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 520

    1. Initial program 62.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 77.6%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in eps around 0 77.5%

      \[\leadsto \color{blue}{0.5 \cdot \left(1 + \frac{1}{e^{x}}\right)} \]
    7. Step-by-step derivation
      1. distribute-lft-in77.5%

        \[\leadsto \color{blue}{0.5 \cdot 1 + 0.5 \cdot \frac{1}{e^{x}}} \]
      2. metadata-eval77.5%

        \[\leadsto \color{blue}{0.5} + 0.5 \cdot \frac{1}{e^{x}} \]
      3. associate-*r/77.5%

        \[\leadsto 0.5 + \color{blue}{\frac{0.5 \cdot 1}{e^{x}}} \]
      4. metadata-eval77.5%

        \[\leadsto 0.5 + \frac{\color{blue}{0.5}}{e^{x}} \]
    8. Simplified77.5%

      \[\leadsto \color{blue}{0.5 + \frac{0.5}{e^{x}}} \]

    if 520 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 59.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub59.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg59.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp59.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses59.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval59.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified59.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 9: 66.7% accurate, 7.3× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq -6:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;\frac{\frac{eps\_m \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{eps\_m}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x -6.0)
   (/
    (+
     2.0
     (*
      x
      (/
       (* (+ eps_m 1.0) (+ 1.0 (+ (/ 1.0 eps_m) (- (/ 1.0 eps_m) eps_m))))
       (+ eps_m 1.0))))
    2.0)
   (if (<= x 520.0)
     (/
      (/
       (* eps_m (* 2.0 (* (+ x 1.0) (+ 1.0 (* x (+ -1.0 (* x 0.5)))))))
       eps_m)
      2.0)
     0.0)))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= -6.0) {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	} else if (x <= 520.0) {
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps_m) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= (-6.0d0)) then
        tmp = (2.0d0 + (x * (((eps_m + 1.0d0) * (1.0d0 + ((1.0d0 / eps_m) + ((1.0d0 / eps_m) - eps_m)))) / (eps_m + 1.0d0)))) / 2.0d0
    else if (x <= 520.0d0) then
        tmp = ((eps_m * (2.0d0 * ((x + 1.0d0) * (1.0d0 + (x * ((-1.0d0) + (x * 0.5d0))))))) / eps_m) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= -6.0) {
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	} else if (x <= 520.0) {
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps_m) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= -6.0:
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0
	elif x <= 520.0:
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps_m) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= -6.0)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(eps_m + 1.0) * Float64(1.0 + Float64(Float64(1.0 / eps_m) + Float64(Float64(1.0 / eps_m) - eps_m)))) / Float64(eps_m + 1.0)))) / 2.0);
	elseif (x <= 520.0)
		tmp = Float64(Float64(Float64(eps_m * Float64(2.0 * Float64(Float64(x + 1.0) * Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * 0.5))))))) / eps_m) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= -6.0)
		tmp = (2.0 + (x * (((eps_m + 1.0) * (1.0 + ((1.0 / eps_m) + ((1.0 / eps_m) - eps_m)))) / (eps_m + 1.0)))) / 2.0;
	elseif (x <= 520.0)
		tmp = ((eps_m * (2.0 * ((x + 1.0) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps_m) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, -6.0], N[(N[(2.0 + N[(x * N[(N[(N[(eps$95$m + 1.0), $MachinePrecision] * N[(1.0 + N[(N[(1.0 / eps$95$m), $MachinePrecision] + N[(N[(1.0 / eps$95$m), $MachinePrecision] - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(eps$95$m + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 520.0], N[(N[(N[(eps$95$m * N[(2.0 * N[(N[(x + 1.0), $MachinePrecision] * N[(1.0 + N[(x * N[(-1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps$95$m), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq -6:\\
\;\;\;\;\frac{2 + x \cdot \frac{\left(eps\_m + 1\right) \cdot \left(1 + \left(\frac{1}{eps\_m} + \left(\frac{1}{eps\_m} - eps\_m\right)\right)\right)}{eps\_m + 1}}{2}\\

\mathbf{elif}\;x \leq 520:\\
\;\;\;\;\frac{\frac{eps\_m \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{eps\_m}}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -6

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 3.2%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 30.1%

      \[\leadsto \frac{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \color{blue}{-1} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Step-by-step derivation
      1. associate--l+30.1%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{2} \]
      2. flip-+35.3%

        \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
      3. *-commutative35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot -1\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. *-commutative35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) \cdot \color{blue}{\left(-1 \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. swap-sqr35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(-1 \cdot -1\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{1} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. *-un-lft-identity35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. pow235.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{{\left(1 + \frac{1}{\varepsilon}\right)}^{2}} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) \cdot -1 - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
    7. Applied egg-rr35.3%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{{\left(1 + \frac{1}{\varepsilon}\right)}^{2} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}}{2} \]
    8. Step-by-step derivation
      1. unpow235.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)} - \left(\frac{1}{\varepsilon} - \varepsilon\right) \cdot \left(\frac{1}{\varepsilon} - \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      2. difference-of-squares35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(\left(1 + \frac{1}{\varepsilon}\right) + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right) \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      3. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\color{blue}{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)} \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      4. associate--l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      5. associate--r-35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      6. +-inverses35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \left(\color{blue}{0} + \varepsilon\right)\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      7. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \color{blue}{\left(\left(1 + 0\right) + \varepsilon\right)}}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      8. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(\color{blue}{1} + \varepsilon\right)}{\left(1 + \frac{1}{\varepsilon}\right) - \left(\frac{1}{\varepsilon} - \varepsilon\right)}}{2} \]
      9. associate--l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1 + \left(\frac{1}{\varepsilon} - \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)}}}{2} \]
      10. associate--r-35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \color{blue}{\left(\left(\frac{1}{\varepsilon} - \frac{1}{\varepsilon}\right) + \varepsilon\right)}}}{2} \]
      11. +-inverses35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \left(\color{blue}{0} + \varepsilon\right)}}{2} \]
      12. associate-+l+35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{\left(1 + 0\right) + \varepsilon}}}{2} \]
      13. metadata-eval35.3%

        \[\leadsto \frac{2 + x \cdot \frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{\color{blue}{1} + \varepsilon}}{2} \]
    9. Simplified35.3%

      \[\leadsto \frac{2 + x \cdot \color{blue}{\frac{\left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right) \cdot \left(1 + \varepsilon\right)}{1 + \varepsilon}}}{2} \]

    if -6 < x < 520

    1. Initial program 54.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified33.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 27.5%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+73.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg73.6%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg73.6%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses73.6%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out73.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in73.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg73.6%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified73.6%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in x around 0 73.1%

      \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \color{blue}{\left(1 + x \cdot \left(0.5 \cdot x - 1\right)\right)}\right)\right)}{\varepsilon}}{2} \]

    if 520 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 59.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub59.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg59.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp59.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses59.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval59.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified59.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification64.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -6:\\ \;\;\;\;\frac{2 + x \cdot \frac{\left(\varepsilon + 1\right) \cdot \left(1 + \left(\frac{1}{\varepsilon} + \left(\frac{1}{\varepsilon} - \varepsilon\right)\right)\right)}{\varepsilon + 1}}{2}\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 64.5% accurate, 10.3× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 13.5:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{eps\_m} + \left(-1 - \frac{1}{eps\_m}\right)\right) - eps\_m\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 13.5)
   (/ (+ 2.0 (* x (- (+ (/ 1.0 eps_m) (- -1.0 (/ 1.0 eps_m))) eps_m))) 2.0)
   0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 13.5) {
		tmp = (2.0 + (x * (((1.0 / eps_m) + (-1.0 - (1.0 / eps_m))) - eps_m))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 13.5d0) then
        tmp = (2.0d0 + (x * (((1.0d0 / eps_m) + ((-1.0d0) - (1.0d0 / eps_m))) - eps_m))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 13.5) {
		tmp = (2.0 + (x * (((1.0 / eps_m) + (-1.0 - (1.0 / eps_m))) - eps_m))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 13.5:
		tmp = (2.0 + (x * (((1.0 / eps_m) + (-1.0 - (1.0 / eps_m))) - eps_m))) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 13.5)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(1.0 / eps_m) + Float64(-1.0 - Float64(1.0 / eps_m))) - eps_m))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 13.5)
		tmp = (2.0 + (x * (((1.0 / eps_m) + (-1.0 - (1.0 / eps_m))) - eps_m))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 13.5], N[(N[(2.0 + N[(x * N[(N[(N[(1.0 / eps$95$m), $MachinePrecision] + N[(-1.0 - N[(1.0 / eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - eps$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 13.5:\\
\;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{eps\_m} + \left(-1 - \frac{1}{eps\_m}\right)\right) - eps\_m\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 13.5

    1. Initial program 62.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 60.9%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 65.0%

      \[\leadsto \frac{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \color{blue}{-1} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]

    if 13.5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 58.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub58.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg58.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp58.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses58.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval58.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified58.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification63.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 13.5:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{\varepsilon} + \left(-1 - \frac{1}{\varepsilon}\right)\right) - \varepsilon\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 11: 64.5% accurate, 18.9× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 13.5:\\ \;\;\;\;\frac{2 - x \cdot eps\_m}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m)
 :precision binary64
 (if (<= x 13.5) (/ (- 2.0 (* x eps_m)) 2.0) 0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 13.5) {
		tmp = (2.0 - (x * eps_m)) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 13.5d0) then
        tmp = (2.0d0 - (x * eps_m)) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 13.5) {
		tmp = (2.0 - (x * eps_m)) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 13.5:
		tmp = (2.0 - (x * eps_m)) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 13.5)
		tmp = Float64(Float64(2.0 - Float64(x * eps_m)) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 13.5)
		tmp = (2.0 - (x * eps_m)) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 13.5], N[(N[(2.0 - N[(x * eps$95$m), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 13.5:\\
\;\;\;\;\frac{2 - x \cdot eps\_m}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 13.5

    1. Initial program 62.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 60.9%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 65.0%

      \[\leadsto \frac{2 + x \cdot \left(\left(\color{blue}{\frac{-1}{\varepsilon}} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in x around 0 65.0%

      \[\leadsto \frac{2 + \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    7. Step-by-step derivation
      1. associate-*r*65.0%

        \[\leadsto \frac{2 + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}{2} \]
      2. neg-mul-165.0%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right)} \cdot x}{2} \]
    8. Simplified65.0%

      \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon\right) \cdot x}}{2} \]

    if 13.5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 58.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub58.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg58.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp58.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses58.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval58.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified58.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification63.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 13.5:\\ \;\;\;\;\frac{2 - x \cdot \varepsilon}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 12: 57.5% accurate, 22.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 2:\\ \;\;\;\;\frac{2 - x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (if (<= x 2.0) (/ (- 2.0 x) 2.0) 0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 2.0) {
		tmp = (2.0 - x) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 2.0d0) then
        tmp = (2.0d0 - x) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 2.0) {
		tmp = (2.0 - x) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 2.0:
		tmp = (2.0 - x) / 2.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 2.0)
		tmp = Float64(Float64(2.0 - x) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 2.0)
		tmp = (2.0 - x) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 2.0], N[(N[(2.0 - x), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 2:\\
\;\;\;\;\frac{2 - x}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 2

    1. Initial program 62.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative99.3%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified99.3%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 61.1%

      \[\leadsto \frac{\color{blue}{2 + -1 \cdot x}}{2} \]
    9. Step-by-step derivation
      1. neg-mul-161.1%

        \[\leadsto \frac{2 + \color{blue}{\left(-x\right)}}{2} \]
      2. unsub-neg61.1%

        \[\leadsto \frac{\color{blue}{2 - x}}{2} \]
    10. Simplified61.1%

      \[\leadsto \frac{\color{blue}{2 - x}}{2} \]

    if 2 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 58.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub58.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg58.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp58.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses58.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval58.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified58.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 13: 57.5% accurate, 37.7× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ \begin{array}{l} \mathbf{if}\;x \leq 490:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 (if (<= x 490.0) 1.0 0.0))
eps_m = fabs(eps);
double code(double x, double eps_m) {
	double tmp;
	if (x <= 490.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    real(8) :: tmp
    if (x <= 490.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	double tmp;
	if (x <= 490.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	tmp = 0
	if x <= 490.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
eps_m = abs(eps)
function code(x, eps_m)
	tmp = 0.0
	if (x <= 490.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
eps_m = abs(eps);
function tmp_2 = code(x, eps_m)
	tmp = 0.0;
	if (x <= 490.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := If[LessEqual[x, 490.0], 1.0, 0.0]
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
\begin{array}{l}
\mathbf{if}\;x \leq 490:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 490

    1. Initial program 62.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified50.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 77.6%

      \[\leadsto \frac{\color{blue}{1} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Taylor expanded in x around 0 60.6%

      \[\leadsto \color{blue}{1} \]

    if 490 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 59.3%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub59.3%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg59.3%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp59.3%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses59.3%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval59.3%

        \[\leadsto \color{blue}{0} \]
    6. Simplified59.3%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 14: 16.6% accurate, 227.0× speedup?

\[\begin{array}{l} eps_m = \left|\varepsilon\right| \\ 0 \end{array} \]
eps_m = (fabs.f64 eps)
(FPCore (x eps_m) :precision binary64 0.0)
eps_m = fabs(eps);
double code(double x, double eps_m) {
	return 0.0;
}
eps_m = abs(eps)
real(8) function code(x, eps_m)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps_m
    code = 0.0d0
end function
eps_m = Math.abs(eps);
public static double code(double x, double eps_m) {
	return 0.0;
}
eps_m = math.fabs(eps)
def code(x, eps_m):
	return 0.0
eps_m = abs(eps)
function code(x, eps_m)
	return 0.0
end
eps_m = abs(eps);
function tmp = code(x, eps_m)
	tmp = 0.0;
end
eps_m = N[Abs[eps], $MachinePrecision]
code[x_, eps$95$m_] := 0.0
\begin{array}{l}
eps_m = \left|\varepsilon\right|

\\
0
\end{array}
Derivation
  1. Initial program 71.2%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified62.0%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around 0 15.2%

    \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
  5. Step-by-step derivation
    1. div-sub15.2%

      \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
    2. mul-1-neg15.2%

      \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
    3. rec-exp15.2%

      \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
    4. +-inverses15.4%

      \[\leadsto 0.5 \cdot \color{blue}{0} \]
    5. metadata-eval15.4%

      \[\leadsto \color{blue}{0} \]
  6. Simplified15.4%

    \[\leadsto \color{blue}{0} \]
  7. Add Preprocessing

Reproduce

?
herbie shell --seed 2024180 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))