NMSE Section 6.1 mentioned, A

Percentage Accurate: 73.2% → 99.7%
Time: 14.7s
Alternatives: 20
Speedup: 1.1×

Specification

?
\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 20 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 73.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\end{array}

Alternative 1: 99.7% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\\ t_1 := e^{-x}\\ t_2 := t\_1 + x \cdot t\_1\\ \mathbf{if}\;t\_0 \leq 2:\\ \;\;\;\;\frac{t\_2 + t\_2}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t\_0}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0
         (+
          (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
          (* (exp (* x (- -1.0 eps))) (+ 1.0 (/ -1.0 eps)))))
        (t_1 (exp (- x)))
        (t_2 (+ t_1 (* x t_1))))
   (if (<= t_0 2.0) (/ (+ t_2 t_2) 2.0) (/ t_0 2.0))))
double code(double x, double eps) {
	double t_0 = ((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	double t_1 = exp(-x);
	double t_2 = t_1 + (x * t_1);
	double tmp;
	if (t_0 <= 2.0) {
		tmp = (t_2 + t_2) / 2.0;
	} else {
		tmp = t_0 / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: tmp
    t_0 = ((1.0d0 + (1.0d0 / eps)) * exp((x * (eps + (-1.0d0))))) + (exp((x * ((-1.0d0) - eps))) * (1.0d0 + ((-1.0d0) / eps)))
    t_1 = exp(-x)
    t_2 = t_1 + (x * t_1)
    if (t_0 <= 2.0d0) then
        tmp = (t_2 + t_2) / 2.0d0
    else
        tmp = t_0 / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = ((1.0 + (1.0 / eps)) * Math.exp((x * (eps + -1.0)))) + (Math.exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	double t_1 = Math.exp(-x);
	double t_2 = t_1 + (x * t_1);
	double tmp;
	if (t_0 <= 2.0) {
		tmp = (t_2 + t_2) / 2.0;
	} else {
		tmp = t_0 / 2.0;
	}
	return tmp;
}
def code(x, eps):
	t_0 = ((1.0 + (1.0 / eps)) * math.exp((x * (eps + -1.0)))) + (math.exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)))
	t_1 = math.exp(-x)
	t_2 = t_1 + (x * t_1)
	tmp = 0
	if t_0 <= 2.0:
		tmp = (t_2 + t_2) / 2.0
	else:
		tmp = t_0 / 2.0
	return tmp
function code(x, eps)
	t_0 = Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(exp(Float64(x * Float64(-1.0 - eps))) * Float64(1.0 + Float64(-1.0 / eps))))
	t_1 = exp(Float64(-x))
	t_2 = Float64(t_1 + Float64(x * t_1))
	tmp = 0.0
	if (t_0 <= 2.0)
		tmp = Float64(Float64(t_2 + t_2) / 2.0);
	else
		tmp = Float64(t_0 / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = ((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	t_1 = exp(-x);
	t_2 = t_1 + (x * t_1);
	tmp = 0.0;
	if (t_0 <= 2.0)
		tmp = (t_2 + t_2) / 2.0;
	else
		tmp = t_0 / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Exp[(-x)], $MachinePrecision]}, Block[{t$95$2 = N[(t$95$1 + N[(x * t$95$1), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 2.0], N[(N[(t$95$2 + t$95$2), $MachinePrecision] / 2.0), $MachinePrecision], N[(t$95$0 / 2.0), $MachinePrecision]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\\
t_1 := e^{-x}\\
t_2 := t\_1 + x \cdot t\_1\\
\mathbf{if}\;t\_0 \leq 2:\\
\;\;\;\;\frac{t\_2 + t\_2}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{t\_0}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 2

    1. Initial program 49.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified49.3%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right) - \left(-1 \cdot e^{-1 \cdot x} + -1 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{2} \]

    if 2 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}}}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;\frac{\left(e^{-x} + x \cdot e^{-x}\right) + \left(e^{-x} + x \cdot e^{-x}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.6% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\\ \mathbf{if}\;t\_0 \leq 2:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t\_0}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0
         (+
          (* (+ 1.0 (/ 1.0 eps)) (exp (* x (+ eps -1.0))))
          (* (exp (* x (- -1.0 eps))) (+ 1.0 (/ -1.0 eps))))))
   (if (<= t_0 2.0)
     (/ (/ (* eps (* 2.0 (* (exp (- x)) (+ 1.0 x)))) eps) 2.0)
     (/ t_0 2.0))))
double code(double x, double eps) {
	double t_0 = ((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	double tmp;
	if (t_0 <= 2.0) {
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = t_0 / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = ((1.0d0 + (1.0d0 / eps)) * exp((x * (eps + (-1.0d0))))) + (exp((x * ((-1.0d0) - eps))) * (1.0d0 + ((-1.0d0) / eps)))
    if (t_0 <= 2.0d0) then
        tmp = ((eps * (2.0d0 * (exp(-x) * (1.0d0 + x)))) / eps) / 2.0d0
    else
        tmp = t_0 / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = ((1.0 + (1.0 / eps)) * Math.exp((x * (eps + -1.0)))) + (Math.exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	double tmp;
	if (t_0 <= 2.0) {
		tmp = ((eps * (2.0 * (Math.exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = t_0 / 2.0;
	}
	return tmp;
}
def code(x, eps):
	t_0 = ((1.0 + (1.0 / eps)) * math.exp((x * (eps + -1.0)))) + (math.exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)))
	tmp = 0
	if t_0 <= 2.0:
		tmp = ((eps * (2.0 * (math.exp(-x) * (1.0 + x)))) / eps) / 2.0
	else:
		tmp = t_0 / 2.0
	return tmp
function code(x, eps)
	t_0 = Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(x * Float64(eps + -1.0)))) + Float64(exp(Float64(x * Float64(-1.0 - eps))) * Float64(1.0 + Float64(-1.0 / eps))))
	tmp = 0.0
	if (t_0 <= 2.0)
		tmp = Float64(Float64(Float64(eps * Float64(2.0 * Float64(exp(Float64(-x)) * Float64(1.0 + x)))) / eps) / 2.0);
	else
		tmp = Float64(t_0 / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = ((1.0 + (1.0 / eps)) * exp((x * (eps + -1.0)))) + (exp((x * (-1.0 - eps))) * (1.0 + (-1.0 / eps)));
	tmp = 0.0;
	if (t_0 <= 2.0)
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	else
		tmp = t_0 / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, 2.0], N[(N[(N[(eps * N[(2.0 * N[(N[Exp[(-x)], $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(t$95$0 / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\\
\mathbf{if}\;t\_0 \leq 2:\\
\;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{t\_0}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x))))) < 2

    1. Initial program 49.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified29.4%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 48.4%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+100.0%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg100.0%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg100.0%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses100.0%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out100.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in100.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg100.0%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]

    if 2 < (-.f64 (*.f64 (+.f64 #s(literal 1 binary64) (/.f64 #s(literal 1 binary64) eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 #s(literal 1 binary64) eps) x)))) (*.f64 (-.f64 (/.f64 #s(literal 1 binary64) eps) #s(literal 1 binary64)) (exp.f64 (neg.f64 (*.f64 (+.f64 #s(literal 1 binary64) eps) x)))))

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
    5. Step-by-step derivation
      1. associate-*r*100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
      2. neg-mul-1100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      3. distribute-rgt-in100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}}}{2} \]
      4. unsub-neg100.0%

        \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}}}{2} \]
    6. Simplified100.0%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq 2:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 79.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\varepsilon \leq 15:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= eps 15.0)
   (/ (/ (* eps (* 2.0 (* (exp (- x)) (+ 1.0 x)))) eps) 2.0)
   (/ (+ (exp (* eps x)) (/ 1.0 (exp (+ x (* eps x))))) 2.0)))
double code(double x, double eps) {
	double tmp;
	if (eps <= 15.0) {
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = (exp((eps * x)) + (1.0 / exp((x + (eps * x))))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (eps <= 15.0d0) then
        tmp = ((eps * (2.0d0 * (exp(-x) * (1.0d0 + x)))) / eps) / 2.0d0
    else
        tmp = (exp((eps * x)) + (1.0d0 / exp((x + (eps * x))))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (eps <= 15.0) {
		tmp = ((eps * (2.0 * (Math.exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = (Math.exp((eps * x)) + (1.0 / Math.exp((x + (eps * x))))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if eps <= 15.0:
		tmp = ((eps * (2.0 * (math.exp(-x) * (1.0 + x)))) / eps) / 2.0
	else:
		tmp = (math.exp((eps * x)) + (1.0 / math.exp((x + (eps * x))))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (eps <= 15.0)
		tmp = Float64(Float64(Float64(eps * Float64(2.0 * Float64(exp(Float64(-x)) * Float64(1.0 + x)))) / eps) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(eps * x)) + Float64(1.0 / exp(Float64(x + Float64(eps * x))))) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (eps <= 15.0)
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	else
		tmp = (exp((eps * x)) + (1.0 / exp((x + (eps * x))))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[eps, 15.0], N[(N[(N[(eps * N[(2.0 * N[(N[Exp[(-x)], $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[Exp[N[(x + N[(eps * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 15:\\
\;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 15

    1. Initial program 61.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 31.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+70.8%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg70.8%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg70.8%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses70.8%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified70.8%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]

    if 15 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified83.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification78.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 15:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 79.1% accurate, 1.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\varepsilon \leq 15:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + e^{\varepsilon \cdot x}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= eps 15.0)
   (/ (/ (* eps (* 2.0 (* (exp (- x)) (+ 1.0 x)))) eps) 2.0)
   (/ (+ (exp (* x (- -1.0 eps))) (exp (* eps x))) 2.0)))
double code(double x, double eps) {
	double tmp;
	if (eps <= 15.0) {
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = (exp((x * (-1.0 - eps))) + exp((eps * x))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (eps <= 15.0d0) then
        tmp = ((eps * (2.0d0 * (exp(-x) * (1.0d0 + x)))) / eps) / 2.0d0
    else
        tmp = (exp((x * ((-1.0d0) - eps))) + exp((eps * x))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (eps <= 15.0) {
		tmp = ((eps * (2.0 * (Math.exp(-x) * (1.0 + x)))) / eps) / 2.0;
	} else {
		tmp = (Math.exp((x * (-1.0 - eps))) + Math.exp((eps * x))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if eps <= 15.0:
		tmp = ((eps * (2.0 * (math.exp(-x) * (1.0 + x)))) / eps) / 2.0
	else:
		tmp = (math.exp((x * (-1.0 - eps))) + math.exp((eps * x))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (eps <= 15.0)
		tmp = Float64(Float64(Float64(eps * Float64(2.0 * Float64(exp(Float64(-x)) * Float64(1.0 + x)))) / eps) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(-1.0 - eps))) + exp(Float64(eps * x))) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (eps <= 15.0)
		tmp = ((eps * (2.0 * (exp(-x) * (1.0 + x)))) / eps) / 2.0;
	else
		tmp = (exp((x * (-1.0 - eps))) + exp((eps * x))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[eps, 15.0], N[(N[(N[(eps * N[(2.0 * N[(N[Exp[(-x)], $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;\varepsilon \leq 15:\\
\;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + e^{\varepsilon \cdot x}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if eps < 15

    1. Initial program 61.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 31.1%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+70.8%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg70.8%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg70.8%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses70.8%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg70.8%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified70.8%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]

    if 15 < eps

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified83.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative100.0%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around -inf 100.0%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{\frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    9. Step-by-step derivation
      1. cancel-sign-sub-inv100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x + \left(--1\right) \cdot x}}}}{2} \]
      2. exp-sum100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x} \cdot e^{\left(--1\right) \cdot x}}}}{2} \]
      3. metadata-eval100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{1} \cdot x}}}{2} \]
      4. *-lft-identity100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{x}}}}{2} \]
      5. exp-sum100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x + x}}}}{2} \]
      6. *-commutative100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon} + x}}}{2} \]
      7. fma-undefine100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}}{2} \]
      8. *-rgt-identity100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}}{2} \]
      9. exp-neg100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{-\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}{2} \]
      10. *-rgt-identity100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      11. neg-mul-1100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot \mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      12. fma-undefine100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon + x\right)}}}{2} \]
      13. *-commutative100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \left(\color{blue}{\varepsilon \cdot x} + x\right)}}{2} \]
      14. +-commutative100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x + \varepsilon \cdot x\right)}}}{2} \]
      15. distribute-lft-in100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
      16. associate-*r*100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
      17. neg-mul-1100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      18. distribute-rgt-in100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}}}{2} \]
      19. unsub-neg100.0%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}}}{2} \]
    10. Simplified100.0%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification78.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq 15:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(e^{-x} \cdot \left(1 + x\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + e^{\varepsilon \cdot x}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 63.6% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -1.05 \cdot 10^{-244}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -2.6e+77)
   (/
    (/
     (*
      x
      (+
       -1.0
       (* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
     eps)
    2.0)
   (if (<= x -1.05e-244)
     (/ (+ (exp (* x (- -1.0 eps))) (+ 1.0 (* eps x))) 2.0)
     (/ (+ (exp (* eps x)) (/ 1.0 (+ 1.0 (* x (+ 1.0 eps))))) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= -2.6e+77) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= -1.05e-244) {
		tmp = (exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	} else {
		tmp = (exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-2.6d+77)) then
        tmp = ((x * ((-1.0d0) + (x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))))) / eps) / 2.0d0
    else if (x <= (-1.05d-244)) then
        tmp = (exp((x * ((-1.0d0) - eps))) + (1.0d0 + (eps * x))) / 2.0d0
    else
        tmp = (exp((eps * x)) + (1.0d0 / (1.0d0 + (x * (1.0d0 + eps))))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -2.6e+77) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= -1.05e-244) {
		tmp = (Math.exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	} else {
		tmp = (Math.exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -2.6e+77:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	elif x <= -1.05e-244:
		tmp = (math.exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0
	else:
		tmp = (math.exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -2.6e+77)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0);
	elseif (x <= -1.05e-244)
		tmp = Float64(Float64(exp(Float64(x * Float64(-1.0 - eps))) + Float64(1.0 + Float64(eps * x))) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(eps * x)) + Float64(1.0 / Float64(1.0 + Float64(x * Float64(1.0 + eps))))) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -2.6e+77)
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	elseif (x <= -1.05e-244)
		tmp = (exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	else
		tmp = (exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -2.6e+77], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, -1.05e-244], N[(N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 + N[(eps * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[(1.0 + N[(x * N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq -1.05 \cdot 10^{-244}:\\
\;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.6000000000000002e77

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 51.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 50.0%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define50.0%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-150.0%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified50.0%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 50.0%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -2.6000000000000002e77 < x < -1.05000000000000001e-244

    1. Initial program 55.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified47.6%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 94.8%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 94.9%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative94.9%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified94.9%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around -inf 94.9%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{\frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    9. Step-by-step derivation
      1. cancel-sign-sub-inv94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x + \left(--1\right) \cdot x}}}}{2} \]
      2. exp-sum86.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x} \cdot e^{\left(--1\right) \cdot x}}}}{2} \]
      3. metadata-eval86.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{1} \cdot x}}}{2} \]
      4. *-lft-identity86.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{x}}}}{2} \]
      5. exp-sum94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x + x}}}}{2} \]
      6. *-commutative94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon} + x}}}{2} \]
      7. fma-undefine94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}}{2} \]
      8. *-rgt-identity94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}}{2} \]
      9. exp-neg94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{-\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}{2} \]
      10. *-rgt-identity94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      11. neg-mul-194.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot \mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      12. fma-undefine94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon + x\right)}}}{2} \]
      13. *-commutative94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \left(\color{blue}{\varepsilon \cdot x} + x\right)}}{2} \]
      14. +-commutative94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x + \varepsilon \cdot x\right)}}}{2} \]
      15. distribute-lft-in94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
      16. associate-*r*94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
      17. neg-mul-194.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      18. distribute-rgt-in94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}}}{2} \]
      19. unsub-neg94.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}}}{2} \]
    10. Simplified94.9%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
    11. Taylor expanded in x around 0 76.4%

      \[\leadsto \frac{\color{blue}{\left(1 + \varepsilon \cdot x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]

    if -1.05000000000000001e-244 < x

    1. Initial program 74.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified67.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 86.2%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative86.2%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified86.2%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 64.0%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{1 + x \cdot \left(1 + \varepsilon\right)}}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification66.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -1.05 \cdot 10^{-244}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 63.5% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 10^{-287}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -2.6e+77)
   (/
    (/
     (*
      x
      (+
       -1.0
       (* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
     eps)
    2.0)
   (if (<= x 1e-287)
     (/ (+ (exp (* x (- -1.0 eps))) (+ 1.0 (* eps x))) 2.0)
     (/ (+ 1.0 (exp (* x (+ eps -1.0)))) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= -2.6e+77) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 1e-287) {
		tmp = (exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	} else {
		tmp = (1.0 + exp((x * (eps + -1.0)))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-2.6d+77)) then
        tmp = ((x * ((-1.0d0) + (x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))))) / eps) / 2.0d0
    else if (x <= 1d-287) then
        tmp = (exp((x * ((-1.0d0) - eps))) + (1.0d0 + (eps * x))) / 2.0d0
    else
        tmp = (1.0d0 + exp((x * (eps + (-1.0d0))))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -2.6e+77) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 1e-287) {
		tmp = (Math.exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	} else {
		tmp = (1.0 + Math.exp((x * (eps + -1.0)))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -2.6e+77:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	elif x <= 1e-287:
		tmp = (math.exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0
	else:
		tmp = (1.0 + math.exp((x * (eps + -1.0)))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -2.6e+77)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0);
	elseif (x <= 1e-287)
		tmp = Float64(Float64(exp(Float64(x * Float64(-1.0 - eps))) + Float64(1.0 + Float64(eps * x))) / 2.0);
	else
		tmp = Float64(Float64(1.0 + exp(Float64(x * Float64(eps + -1.0)))) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -2.6e+77)
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	elseif (x <= 1e-287)
		tmp = (exp((x * (-1.0 - eps))) + (1.0 + (eps * x))) / 2.0;
	else
		tmp = (1.0 + exp((x * (eps + -1.0)))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -2.6e+77], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1e-287], N[(N[(N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[(1.0 + N[(eps * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq 10^{-287}:\\
\;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -2.6000000000000002e77

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 51.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 50.0%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define50.0%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-150.0%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified50.0%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 50.0%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -2.6000000000000002e77 < x < 1.00000000000000002e-287

    1. Initial program 55.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified49.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 96.0%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 96.1%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative96.1%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified96.1%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around -inf 96.1%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{\frac{1}{e^{\varepsilon \cdot x - -1 \cdot x}}}}{2} \]
    9. Step-by-step derivation
      1. cancel-sign-sub-inv96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\varepsilon \cdot x + \left(--1\right) \cdot x}}}}{2} \]
      2. exp-sum89.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x} \cdot e^{\left(--1\right) \cdot x}}}}{2} \]
      3. metadata-eval89.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{1} \cdot x}}}{2} \]
      4. *-lft-identity89.9%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\varepsilon \cdot x} \cdot e^{\color{blue}{x}}}}{2} \]
      5. exp-sum96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{e^{\varepsilon \cdot x + x}}}}{2} \]
      6. *-commutative96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{x \cdot \varepsilon} + x}}}{2} \]
      7. fma-undefine96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}}{2} \]
      8. *-rgt-identity96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{e^{\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}}{2} \]
      9. exp-neg96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{-\mathsf{fma}\left(x, \varepsilon, x\right) \cdot 1}}}{2} \]
      10. *-rgt-identity96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-\color{blue}{\mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      11. neg-mul-196.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot \mathsf{fma}\left(x, \varepsilon, x\right)}}}{2} \]
      12. fma-undefine96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon + x\right)}}}{2} \]
      13. *-commutative96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \left(\color{blue}{\varepsilon \cdot x} + x\right)}}{2} \]
      14. +-commutative96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot \color{blue}{\left(x + \varepsilon \cdot x\right)}}}{2} \]
      15. distribute-lft-in96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{-1 \cdot x + -1 \cdot \left(\varepsilon \cdot x\right)}}}{2} \]
      16. associate-*r*96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}}{2} \]
      17. neg-mul-196.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{-1 \cdot x + \color{blue}{\left(-\varepsilon\right)} \cdot x}}{2} \]
      18. distribute-rgt-in96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{\color{blue}{x \cdot \left(-1 + \left(-\varepsilon\right)\right)}}}{2} \]
      19. unsub-neg96.1%

        \[\leadsto \frac{e^{x \cdot \varepsilon} + e^{x \cdot \color{blue}{\left(-1 - \varepsilon\right)}}}{2} \]
    10. Simplified96.1%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{e^{x \cdot \left(-1 - \varepsilon\right)}}}{2} \]
    11. Taylor expanded in x around 0 81.7%

      \[\leadsto \frac{\color{blue}{\left(1 + \varepsilon \cdot x\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]

    if 1.00000000000000002e-287 < x

    1. Initial program 77.8%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified69.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.1%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in x around 0 58.1%

      \[\leadsto \frac{e^{x \cdot \left(\varepsilon - 1\right)} + \color{blue}{1}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification66.3%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -2.6 \cdot 10^{+77}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 10^{-287}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 - \varepsilon\right)} + \left(1 + \varepsilon \cdot x\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 7: 67.4% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -1.1 \cdot 10^{-242}:\\ \;\;\;\;\frac{\varepsilon \cdot \left(x + \frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{\varepsilon}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -1.1e-242)
   (/ (* eps (+ x (/ (+ 1.0 (exp (* x (- -1.0 eps)))) eps))) 2.0)
   (/ (+ (exp (* eps x)) (/ 1.0 (+ 1.0 (* x (+ 1.0 eps))))) 2.0)))
double code(double x, double eps) {
	double tmp;
	if (x <= -1.1e-242) {
		tmp = (eps * (x + ((1.0 + exp((x * (-1.0 - eps)))) / eps))) / 2.0;
	} else {
		tmp = (exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-1.1d-242)) then
        tmp = (eps * (x + ((1.0d0 + exp((x * ((-1.0d0) - eps)))) / eps))) / 2.0d0
    else
        tmp = (exp((eps * x)) + (1.0d0 / (1.0d0 + (x * (1.0d0 + eps))))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -1.1e-242) {
		tmp = (eps * (x + ((1.0 + Math.exp((x * (-1.0 - eps)))) / eps))) / 2.0;
	} else {
		tmp = (Math.exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -1.1e-242:
		tmp = (eps * (x + ((1.0 + math.exp((x * (-1.0 - eps)))) / eps))) / 2.0
	else:
		tmp = (math.exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -1.1e-242)
		tmp = Float64(Float64(eps * Float64(x + Float64(Float64(1.0 + exp(Float64(x * Float64(-1.0 - eps)))) / eps))) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(eps * x)) + Float64(1.0 / Float64(1.0 + Float64(x * Float64(1.0 + eps))))) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -1.1e-242)
		tmp = (eps * (x + ((1.0 + exp((x * (-1.0 - eps)))) / eps))) / 2.0;
	else
		tmp = (exp((eps * x)) + (1.0 / (1.0 + (x * (1.0 + eps))))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -1.1e-242], N[(N[(eps * N[(x + N[(N[(1.0 + N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision] + N[(1.0 / N[(1.0 + N[(x * N[(1.0 + eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.1 \cdot 10^{-242}:\\
\;\;\;\;\frac{\varepsilon \cdot \left(x + \frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{\varepsilon}\right)}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -1.10000000000000001e-242

    1. Initial program 66.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified66.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 39.4%

      \[\leadsto \frac{\color{blue}{\left(1 + \left(-1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right) + \frac{1}{\varepsilon}\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    5. Step-by-step derivation
      1. +-commutative39.4%

        \[\leadsto \frac{\left(1 + \color{blue}{\left(\frac{1}{\varepsilon} + -1 \cdot \left(x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      2. mul-1-neg39.4%

        \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} + \color{blue}{\left(-x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      3. unsub-neg39.4%

        \[\leadsto \frac{\left(1 + \color{blue}{\left(\frac{1}{\varepsilon} - x \cdot \left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right)\right)}\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
      4. *-commutative39.4%

        \[\leadsto \frac{\left(1 + \left(\frac{1}{\varepsilon} - x \cdot \color{blue}{\left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)}\right)\right) - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    6. Simplified39.4%

      \[\leadsto \frac{\color{blue}{\left(1 + \left(\frac{1}{\varepsilon} - x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(1 + \frac{1}{\varepsilon}\right)\right)\right)\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2} \]
    7. Taylor expanded in eps around inf 78.3%

      \[\leadsto \frac{\color{blue}{\varepsilon \cdot \left(\frac{1}{\varepsilon} - \left(-1 \cdot x + -1 \cdot \frac{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}{\varepsilon}\right)\right)}}{2} \]
    8. Simplified78.3%

      \[\leadsto \frac{\color{blue}{\varepsilon \cdot \left(\frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{\varepsilon} - \left(-x\right)\right)}}{2} \]

    if -1.10000000000000001e-242 < x

    1. Initial program 74.7%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified67.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 99.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 86.2%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative86.2%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified86.2%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 64.0%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \frac{1}{\color{blue}{1 + x \cdot \left(1 + \varepsilon\right)}}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification69.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -1.1 \cdot 10^{-242}:\\ \;\;\;\;\frac{\varepsilon \cdot \left(x + \frac{1 + e^{x \cdot \left(-1 - \varepsilon\right)}}{\varepsilon}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{\varepsilon \cdot x} + \frac{1}{1 + x \cdot \left(1 + \varepsilon\right)}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 8: 57.3% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -10000:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 2900000:\\ \;\;\;\;e^{-x} \cdot \left(1 + x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -10000.0)
   (/ (/ (expm1 (- x)) eps) 2.0)
   (if (<= x 2900000.0) (* (exp (- x)) (+ 1.0 x)) (/ (/ (expm1 x) eps) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= -10000.0) {
		tmp = (expm1(-x) / eps) / 2.0;
	} else if (x <= 2900000.0) {
		tmp = exp(-x) * (1.0 + x);
	} else {
		tmp = (expm1(x) / eps) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	double tmp;
	if (x <= -10000.0) {
		tmp = (Math.expm1(-x) / eps) / 2.0;
	} else if (x <= 2900000.0) {
		tmp = Math.exp(-x) * (1.0 + x);
	} else {
		tmp = (Math.expm1(x) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -10000.0:
		tmp = (math.expm1(-x) / eps) / 2.0
	elif x <= 2900000.0:
		tmp = math.exp(-x) * (1.0 + x)
	else:
		tmp = (math.expm1(x) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -10000.0)
		tmp = Float64(Float64(expm1(Float64(-x)) / eps) / 2.0);
	elseif (x <= 2900000.0)
		tmp = Float64(exp(Float64(-x)) * Float64(1.0 + x));
	else
		tmp = Float64(Float64(expm1(x) / eps) / 2.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, -10000.0], N[(N[(N[(Exp[(-x)] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2900000.0], N[(N[Exp[(-x)], $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision], N[(N[(N[(Exp[x] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -10000:\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq 2900000:\\
\;\;\;\;e^{-x} \cdot \left(1 + x\right)\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -1e4

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 52.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 48.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define48.6%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-148.6%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified48.6%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]

    if -1e4 < x < 2.9e6

    1. Initial program 52.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified31.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 31.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg80.1%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses80.1%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified80.1%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in eps around 0 80.1%

      \[\leadsto \color{blue}{e^{-x} \cdot \left(1 + x\right)} \]

    if 2.9e6 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 28.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 1.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define1.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-11.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified1.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Step-by-step derivation
      1. expm1-undefine1.9%

        \[\leadsto \frac{\frac{\color{blue}{e^{-x} - 1}}{\varepsilon}}{2} \]
      2. div-sub1.9%

        \[\leadsto \frac{\color{blue}{\frac{e^{-x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
      3. add-sqr-sqrt0.0%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      4. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      5. sqr-neg27.4%

        \[\leadsto \frac{\frac{e^{\sqrt{\color{blue}{x \cdot x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      6. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      7. add-sqr-sqrt27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{x}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
    9. Applied egg-rr27.4%

      \[\leadsto \frac{\color{blue}{\frac{e^{x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
    10. Step-by-step derivation
      1. div-sub27.4%

        \[\leadsto \frac{\color{blue}{\frac{e^{x} - 1}{\varepsilon}}}{2} \]
      2. expm1-define27.4%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(x\right)}}{\varepsilon}}{2} \]
    11. Simplified27.4%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Add Preprocessing

Alternative 9: 55.6% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -16000:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 1000000:\\ \;\;\;\;e^{-x} \cdot \left(1 + x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -16000.0)
   (/
    (/
     (*
      x
      (+
       -1.0
       (* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
     eps)
    2.0)
   (if (<= x 1000000.0) (* (exp (- x)) (+ 1.0 x)) (/ (/ (expm1 x) eps) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= -16000.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 1000000.0) {
		tmp = exp(-x) * (1.0 + x);
	} else {
		tmp = (expm1(x) / eps) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	double tmp;
	if (x <= -16000.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 1000000.0) {
		tmp = Math.exp(-x) * (1.0 + x);
	} else {
		tmp = (Math.expm1(x) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -16000.0:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	elif x <= 1000000.0:
		tmp = math.exp(-x) * (1.0 + x)
	else:
		tmp = (math.expm1(x) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -16000.0)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0);
	elseif (x <= 1000000.0)
		tmp = Float64(exp(Float64(-x)) * Float64(1.0 + x));
	else
		tmp = Float64(Float64(expm1(x) / eps) / 2.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, -16000.0], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 1000000.0], N[(N[Exp[(-x)], $MachinePrecision] * N[(1.0 + x), $MachinePrecision]), $MachinePrecision], N[(N[(N[(Exp[x] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -16000:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq 1000000:\\
\;\;\;\;e^{-x} \cdot \left(1 + x\right)\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -16000

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 52.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 48.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define48.6%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-148.6%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified48.6%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 34.9%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -16000 < x < 1e6

    1. Initial program 52.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified31.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 31.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg80.1%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses80.1%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified80.1%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in eps around 0 80.1%

      \[\leadsto \color{blue}{e^{-x} \cdot \left(1 + x\right)} \]

    if 1e6 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 28.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 1.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define1.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-11.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified1.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Step-by-step derivation
      1. expm1-undefine1.9%

        \[\leadsto \frac{\frac{\color{blue}{e^{-x} - 1}}{\varepsilon}}{2} \]
      2. div-sub1.9%

        \[\leadsto \frac{\color{blue}{\frac{e^{-x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
      3. add-sqr-sqrt0.0%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      4. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      5. sqr-neg27.4%

        \[\leadsto \frac{\frac{e^{\sqrt{\color{blue}{x \cdot x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      6. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      7. add-sqr-sqrt27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{x}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
    9. Applied egg-rr27.4%

      \[\leadsto \frac{\color{blue}{\frac{e^{x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
    10. Step-by-step derivation
      1. div-sub27.4%

        \[\leadsto \frac{\color{blue}{\frac{e^{x} - 1}{\varepsilon}}}{2} \]
      2. expm1-define27.4%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(x\right)}}{\varepsilon}}{2} \]
    11. Simplified27.4%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification60.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -16000:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 1000000:\\ \;\;\;\;e^{-x} \cdot \left(1 + x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 10: 55.0% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -15000:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 600000:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -15000.0)
   (/
    (/
     (*
      x
      (+
       -1.0
       (* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
     eps)
    2.0)
   (if (<= x 600000.0)
     (/
      (/
       (*
        eps
        (*
         2.0
         (*
          (+ 1.0 x)
          (+ 1.0 (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666)))))))))
       eps)
      2.0)
     (/ (/ (expm1 x) eps) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= -15000.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 600000.0) {
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps) / 2.0;
	} else {
		tmp = (expm1(x) / eps) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	double tmp;
	if (x <= -15000.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else if (x <= 600000.0) {
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps) / 2.0;
	} else {
		tmp = (Math.expm1(x) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -15000.0:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	elif x <= 600000.0:
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))))))) / eps) / 2.0
	else:
		tmp = (math.expm1(x) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -15000.0)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0);
	elseif (x <= 600000.0)
		tmp = Float64(Float64(Float64(eps * Float64(2.0 * Float64(Float64(1.0 + x) * Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))))))) / eps) / 2.0);
	else
		tmp = Float64(Float64(expm1(x) / eps) / 2.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, -15000.0], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 600000.0], N[(N[(N[(eps * N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] * N[(1.0 + N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(Exp[x] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -15000:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq 600000:\\
\;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -15000

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 52.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 48.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define48.6%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-148.6%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified48.6%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 34.9%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -15000 < x < 6e5

    1. Initial program 52.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified31.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 31.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg80.1%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg80.1%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses80.1%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg80.1%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified80.1%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in x around 0 77.8%

      \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \color{blue}{\left(1 + x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)\right)}\right)\right)}{\varepsilon}}{2} \]

    if 6e5 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 28.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 1.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define1.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-11.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified1.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Step-by-step derivation
      1. expm1-undefine1.9%

        \[\leadsto \frac{\frac{\color{blue}{e^{-x} - 1}}{\varepsilon}}{2} \]
      2. div-sub1.9%

        \[\leadsto \frac{\color{blue}{\frac{e^{-x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
      3. add-sqr-sqrt0.0%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{-x} \cdot \sqrt{-x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      4. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{\left(-x\right) \cdot \left(-x\right)}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      5. sqr-neg27.4%

        \[\leadsto \frac{\frac{e^{\sqrt{\color{blue}{x \cdot x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      6. sqrt-unprod27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{\sqrt{x} \cdot \sqrt{x}}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
      7. add-sqr-sqrt27.4%

        \[\leadsto \frac{\frac{e^{\color{blue}{x}}}{\varepsilon} - \frac{1}{\varepsilon}}{2} \]
    9. Applied egg-rr27.4%

      \[\leadsto \frac{\color{blue}{\frac{e^{x}}{\varepsilon} - \frac{1}{\varepsilon}}}{2} \]
    10. Step-by-step derivation
      1. div-sub27.4%

        \[\leadsto \frac{\color{blue}{\frac{e^{x} - 1}{\varepsilon}}}{2} \]
      2. expm1-define27.4%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(x\right)}}{\varepsilon}}{2} \]
    11. Simplified27.4%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification58.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -15000:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 600000:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(x\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 11: 63.9% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -550:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -550.0)
   (/ (/ (expm1 (- x)) eps) 2.0)
   (/ (+ 1.0 (exp (* eps x))) 2.0)))
double code(double x, double eps) {
	double tmp;
	if (x <= -550.0) {
		tmp = (expm1(-x) / eps) / 2.0;
	} else {
		tmp = (1.0 + exp((eps * x))) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	double tmp;
	if (x <= -550.0) {
		tmp = (Math.expm1(-x) / eps) / 2.0;
	} else {
		tmp = (1.0 + Math.exp((eps * x))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -550.0:
		tmp = (math.expm1(-x) / eps) / 2.0
	else:
		tmp = (1.0 + math.exp((eps * x))) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -550.0)
		tmp = Float64(Float64(expm1(Float64(-x)) / eps) / 2.0);
	else
		tmp = Float64(Float64(1.0 + exp(Float64(eps * x))) / 2.0);
	end
	return tmp
end
code[x_, eps_] := If[LessEqual[x, -550.0], N[(N[(N[(Exp[(-x)] - 1), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(1.0 + N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -550:\\
\;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -550

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 52.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 48.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define48.6%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-148.6%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified48.6%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]

    if -550 < x

    1. Initial program 67.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified59.1%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.7%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 88.5%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative88.5%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified88.5%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 69.5%

      \[\leadsto \frac{e^{x \cdot \varepsilon} + \color{blue}{1}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification66.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -550:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{1 + e^{\varepsilon \cdot x}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 12: 57.1% accurate, 6.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{if}\;x \leq -3 \cdot 10^{+37}:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{\varepsilon} - \left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right) - \varepsilon\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+79}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0
         (/
          (/
           (*
            x
            (+
             -1.0
             (*
              x
              (+
               0.5
               (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
           eps)
          2.0)))
   (if (<= x -3e+37)
     t_0
     (if (<= x 520.0)
       (/
        (+
         2.0
         (* x (- (- (/ 1.0 eps) (* (+ 1.0 (/ 1.0 eps)) (- 1.0 eps))) eps)))
        2.0)
       (if (<= x 2e+79) 0.0 t_0)))))
double code(double x, double eps) {
	double t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	double tmp;
	if (x <= -3e+37) {
		tmp = t_0;
	} else if (x <= 520.0) {
		tmp = (2.0 + (x * (((1.0 / eps) - ((1.0 + (1.0 / eps)) * (1.0 - eps))) - eps))) / 2.0;
	} else if (x <= 2e+79) {
		tmp = 0.0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = ((x * ((-1.0d0) + (x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))))) / eps) / 2.0d0
    if (x <= (-3d+37)) then
        tmp = t_0
    else if (x <= 520.0d0) then
        tmp = (2.0d0 + (x * (((1.0d0 / eps) - ((1.0d0 + (1.0d0 / eps)) * (1.0d0 - eps))) - eps))) / 2.0d0
    else if (x <= 2d+79) then
        tmp = 0.0d0
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	double tmp;
	if (x <= -3e+37) {
		tmp = t_0;
	} else if (x <= 520.0) {
		tmp = (2.0 + (x * (((1.0 / eps) - ((1.0 + (1.0 / eps)) * (1.0 - eps))) - eps))) / 2.0;
	} else if (x <= 2e+79) {
		tmp = 0.0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, eps):
	t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	tmp = 0
	if x <= -3e+37:
		tmp = t_0
	elif x <= 520.0:
		tmp = (2.0 + (x * (((1.0 / eps) - ((1.0 + (1.0 / eps)) * (1.0 - eps))) - eps))) / 2.0
	elif x <= 2e+79:
		tmp = 0.0
	else:
		tmp = t_0
	return tmp
function code(x, eps)
	t_0 = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0)
	tmp = 0.0
	if (x <= -3e+37)
		tmp = t_0;
	elseif (x <= 520.0)
		tmp = Float64(Float64(2.0 + Float64(x * Float64(Float64(Float64(1.0 / eps) - Float64(Float64(1.0 + Float64(1.0 / eps)) * Float64(1.0 - eps))) - eps))) / 2.0);
	elseif (x <= 2e+79)
		tmp = 0.0;
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	tmp = 0.0;
	if (x <= -3e+37)
		tmp = t_0;
	elseif (x <= 520.0)
		tmp = (2.0 + (x * (((1.0 / eps) - ((1.0 + (1.0 / eps)) * (1.0 - eps))) - eps))) / 2.0;
	elseif (x <= 2e+79)
		tmp = 0.0;
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -3e+37], t$95$0, If[LessEqual[x, 520.0], N[(N[(2.0 + N[(x * N[(N[(N[(1.0 / eps), $MachinePrecision] - N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[(1.0 - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2e+79], 0.0, t$95$0]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\
\mathbf{if}\;x \leq -3 \cdot 10^{+37}:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;x \leq 520:\\
\;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{\varepsilon} - \left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right) - \varepsilon\right)}{2}\\

\mathbf{elif}\;x \leq 2 \cdot 10^{+79}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.00000000000000022e37 or 1.99999999999999993e79 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 39.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 18.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define18.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-118.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified18.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 33.5%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -3.00000000000000022e37 < x < 520

    1. Initial program 53.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified42.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 75.5%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]

    if 520 < x < 1.99999999999999993e79

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 40.9%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub40.9%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg40.9%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp40.9%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses40.9%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval40.9%

        \[\leadsto \color{blue}{0} \]
    6. Simplified40.9%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification59.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3 \cdot 10^{+37}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(\frac{1}{\varepsilon} - \left(1 + \frac{1}{\varepsilon}\right) \cdot \left(1 - \varepsilon\right)\right) - \varepsilon\right)}{2}\\ \mathbf{elif}\;x \leq 2 \cdot 10^{+79}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 13: 57.2% accurate, 6.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{if}\;x \leq -3.5 \cdot 10^{+37}:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+82}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0
         (/
          (/
           (*
            x
            (+
             -1.0
             (*
              x
              (+
               0.5
               (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
           eps)
          2.0)))
   (if (<= x -3.5e+37) t_0 (if (<= x 520.0) 1.0 (if (<= x 5e+82) 0.0 t_0)))))
double code(double x, double eps) {
	double t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	double tmp;
	if (x <= -3.5e+37) {
		tmp = t_0;
	} else if (x <= 520.0) {
		tmp = 1.0;
	} else if (x <= 5e+82) {
		tmp = 0.0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = ((x * ((-1.0d0) + (x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))))) / eps) / 2.0d0
    if (x <= (-3.5d+37)) then
        tmp = t_0
    else if (x <= 520.0d0) then
        tmp = 1.0d0
    else if (x <= 5d+82) then
        tmp = 0.0d0
    else
        tmp = t_0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	double tmp;
	if (x <= -3.5e+37) {
		tmp = t_0;
	} else if (x <= 520.0) {
		tmp = 1.0;
	} else if (x <= 5e+82) {
		tmp = 0.0;
	} else {
		tmp = t_0;
	}
	return tmp;
}
def code(x, eps):
	t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	tmp = 0
	if x <= -3.5e+37:
		tmp = t_0
	elif x <= 520.0:
		tmp = 1.0
	elif x <= 5e+82:
		tmp = 0.0
	else:
		tmp = t_0
	return tmp
function code(x, eps)
	t_0 = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0)
	tmp = 0.0
	if (x <= -3.5e+37)
		tmp = t_0;
	elseif (x <= 520.0)
		tmp = 1.0;
	elseif (x <= 5e+82)
		tmp = 0.0;
	else
		tmp = t_0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	t_0 = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	tmp = 0.0;
	if (x <= -3.5e+37)
		tmp = t_0;
	elseif (x <= 520.0)
		tmp = 1.0;
	elseif (x <= 5e+82)
		tmp = 0.0;
	else
		tmp = t_0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := Block[{t$95$0 = N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]}, If[LessEqual[x, -3.5e+37], t$95$0, If[LessEqual[x, 520.0], 1.0, If[LessEqual[x, 5e+82], 0.0, t$95$0]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\
\mathbf{if}\;x \leq -3.5 \cdot 10^{+37}:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;x \leq 520:\\
\;\;\;\;1\\

\mathbf{elif}\;x \leq 5 \cdot 10^{+82}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -3.5e37 or 5.00000000000000015e82 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 39.7%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 18.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define18.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-118.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified18.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 33.5%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -3.5e37 < x < 520

    1. Initial program 53.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified42.5%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 96.8%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 96.9%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative96.9%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified96.9%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 75.5%

      \[\leadsto \color{blue}{1} \]

    if 520 < x < 5.00000000000000015e82

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 40.9%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub40.9%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg40.9%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp40.9%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses40.9%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval40.9%

        \[\leadsto \color{blue}{0} \]
    6. Simplified40.9%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification59.8%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -3.5 \cdot 10^{+37}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 5 \cdot 10^{+82}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 14: 58.0% accurate, 8.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -8.4 \cdot 10^{+113}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 120:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -8.4e+113)
   (/ (/ (* x (+ -1.0 (* x (+ 0.5 (* x -0.16666666666666666))))) eps) 2.0)
   (if (<= x 120.0)
     (/ (- 2.0 (* eps x)) 2.0)
     (if (<= x 2.2e+152) 0.0 (/ (/ (* x (+ -1.0 (* x 0.5))) eps) 2.0)))))
double code(double x, double eps) {
	double tmp;
	if (x <= -8.4e+113) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))) / eps) / 2.0;
	} else if (x <= 120.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else if (x <= 2.2e+152) {
		tmp = 0.0;
	} else {
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-8.4d+113)) then
        tmp = ((x * ((-1.0d0) + (x * (0.5d0 + (x * (-0.16666666666666666d0)))))) / eps) / 2.0d0
    else if (x <= 120.0d0) then
        tmp = (2.0d0 - (eps * x)) / 2.0d0
    else if (x <= 2.2d+152) then
        tmp = 0.0d0
    else
        tmp = ((x * ((-1.0d0) + (x * 0.5d0))) / eps) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -8.4e+113) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))) / eps) / 2.0;
	} else if (x <= 120.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else if (x <= 2.2e+152) {
		tmp = 0.0;
	} else {
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -8.4e+113:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))) / eps) / 2.0
	elif x <= 120.0:
		tmp = (2.0 - (eps * x)) / 2.0
	elif x <= 2.2e+152:
		tmp = 0.0
	else:
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -8.4e+113)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * -0.16666666666666666))))) / eps) / 2.0);
	elseif (x <= 120.0)
		tmp = Float64(Float64(2.0 - Float64(eps * x)) / 2.0);
	elseif (x <= 2.2e+152)
		tmp = 0.0;
	else
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * 0.5))) / eps) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -8.4e+113)
		tmp = ((x * (-1.0 + (x * (0.5 + (x * -0.16666666666666666))))) / eps) / 2.0;
	elseif (x <= 120.0)
		tmp = (2.0 - (eps * x)) / 2.0;
	elseif (x <= 2.2e+152)
		tmp = 0.0;
	else
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -8.4e+113], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 120.0], N[(N[(2.0 - N[(eps * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2.2e+152], 0.0, N[(N[(N[(x * N[(-1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -8.4 \cdot 10^{+113}:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)}{\varepsilon}}{2}\\

\mathbf{elif}\;x \leq 120:\\
\;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\

\mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 4 regimes
  2. if x < -8.3999999999999996e113

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 51.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 50.0%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define50.0%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-150.0%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified50.0%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 50.0%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + -0.16666666666666666 \cdot x\right) - 1\right)}}{\varepsilon}}{2} \]

    if -8.3999999999999996e113 < x < 120

    1. Initial program 56.3%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified45.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 71.2%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 71.1%

      \[\leadsto \frac{2 + x \cdot \left(\left(\color{blue}{\frac{-1}{\varepsilon}} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in x around 0 71.1%

      \[\leadsto \frac{\color{blue}{2 + -1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    7. Step-by-step derivation
      1. mul-1-neg71.1%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon \cdot x\right)}}{2} \]
      2. *-commutative71.1%

        \[\leadsto \frac{2 + \left(-\color{blue}{x \cdot \varepsilon}\right)}{2} \]
      3. unsub-neg71.1%

        \[\leadsto \frac{\color{blue}{2 - x \cdot \varepsilon}}{2} \]
      4. *-commutative71.1%

        \[\leadsto \frac{2 - \color{blue}{\varepsilon \cdot x}}{2} \]
    8. Simplified71.1%

      \[\leadsto \frac{\color{blue}{2 - \varepsilon \cdot x}}{2} \]

    if 120 < x < 2.1999999999999998e152

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 38.5%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub38.5%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg38.5%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp38.5%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses38.5%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval38.5%

        \[\leadsto \color{blue}{0} \]
    6. Simplified38.5%

      \[\leadsto \color{blue}{0} \]

    if 2.1999999999999998e152 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 31.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 1.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define1.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-11.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified1.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 30.4%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(0.5 \cdot x - 1\right)}}{\varepsilon}}{2} \]
  3. Recombined 4 regimes into one program.
  4. Final simplification59.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -8.4 \cdot 10^{+113}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot -0.16666666666666666\right)\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 120:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 15: 59.6% accurate, 8.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -9500:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -9500.0)
   (/
    (/
     (*
      x
      (+
       -1.0
       (* x (+ 0.5 (* x (- (* x 0.041666666666666664) 0.16666666666666666))))))
     eps)
    2.0)
   (/
    (/ (* eps (* 2.0 (* (+ 1.0 x) (+ 1.0 (* x (+ -1.0 (* x 0.5))))))) eps)
    2.0)))
double code(double x, double eps) {
	double tmp;
	if (x <= -9500.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else {
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-9500.0d0)) then
        tmp = ((x * ((-1.0d0) + (x * (0.5d0 + (x * ((x * 0.041666666666666664d0) - 0.16666666666666666d0)))))) / eps) / 2.0d0
    else
        tmp = ((eps * (2.0d0 * ((1.0d0 + x) * (1.0d0 + (x * ((-1.0d0) + (x * 0.5d0))))))) / eps) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -9500.0) {
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	} else {
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -9500.0:
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0
	else:
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -9500.0)
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * Float64(0.5 + Float64(x * Float64(Float64(x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0);
	else
		tmp = Float64(Float64(Float64(eps * Float64(2.0 * Float64(Float64(1.0 + x) * Float64(1.0 + Float64(x * Float64(-1.0 + Float64(x * 0.5))))))) / eps) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -9500.0)
		tmp = ((x * (-1.0 + (x * (0.5 + (x * ((x * 0.041666666666666664) - 0.16666666666666666)))))) / eps) / 2.0;
	else
		tmp = ((eps * (2.0 * ((1.0 + x) * (1.0 + (x * (-1.0 + (x * 0.5))))))) / eps) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -9500.0], N[(N[(N[(x * N[(-1.0 + N[(x * N[(0.5 + N[(x * N[(N[(x * 0.041666666666666664), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[(eps * N[(2.0 * N[(N[(1.0 + x), $MachinePrecision] * N[(1.0 + N[(x * N[(-1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -9500:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < -9500

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 52.9%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 48.6%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define48.6%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-148.6%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified48.6%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 34.9%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(x \cdot \left(0.5 + x \cdot \left(0.041666666666666664 \cdot x - 0.16666666666666666\right)\right) - 1\right)}}{\varepsilon}}{2} \]

    if -9500 < x

    1. Initial program 67.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified52.3%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, e^{x \cdot \left(\varepsilon + -1\right)}, {\left(e^{1 + \varepsilon}\right)}^{\left(-x\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right)\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 33.8%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} + \left(-1 \cdot e^{-1 \cdot x} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)\right)}{\varepsilon}}}{2} \]
    5. Step-by-step derivation
      1. associate-+r+67.4%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} + -1 \cdot e^{-1 \cdot x}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      2. mul-1-neg67.4%

        \[\leadsto \frac{\frac{\left(e^{-1 \cdot x} + \color{blue}{\left(-e^{-1 \cdot x}\right)}\right) + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      3. sub-neg67.4%

        \[\leadsto \frac{\frac{\color{blue}{\left(e^{-1 \cdot x} - e^{-1 \cdot x}\right)} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      4. +-inverses67.4%

        \[\leadsto \frac{\frac{\color{blue}{0} + \varepsilon \cdot \left(2 \cdot e^{-1 \cdot x} + 2 \cdot \left(x \cdot e^{-1 \cdot x}\right)\right)}{\varepsilon}}{2} \]
      5. distribute-lft-out67.4%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \color{blue}{\left(2 \cdot \left(e^{-1 \cdot x} + x \cdot e^{-1 \cdot x}\right)\right)}}{\varepsilon}}{2} \]
      6. distribute-rgt1-in67.4%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \color{blue}{\left(\left(x + 1\right) \cdot e^{-1 \cdot x}\right)}\right)}{\varepsilon}}{2} \]
      7. mul-1-neg67.4%

        \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)\right)}{\varepsilon}}{2} \]
    6. Simplified67.4%

      \[\leadsto \frac{\color{blue}{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot e^{-x}\right)\right)}{\varepsilon}}}{2} \]
    7. Taylor expanded in x around 0 70.9%

      \[\leadsto \frac{\frac{0 + \varepsilon \cdot \left(2 \cdot \left(\left(x + 1\right) \cdot \color{blue}{\left(1 + x \cdot \left(0.5 \cdot x - 1\right)\right)}\right)\right)}{\varepsilon}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification66.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -9500:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot \left(0.5 + x \cdot \left(x \cdot 0.041666666666666664 - 0.16666666666666666\right)\right)\right)}{\varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{\varepsilon \cdot \left(2 \cdot \left(\left(1 + x\right) \cdot \left(1 + x \cdot \left(-1 + x \cdot 0.5\right)\right)\right)\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 16: 56.7% accurate, 10.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 210:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 210.0)
   (/ (- 2.0 (* eps x)) 2.0)
   (if (<= x 2.2e+152) 0.0 (/ (/ (* x (+ -1.0 (* x 0.5))) eps) 2.0))))
double code(double x, double eps) {
	double tmp;
	if (x <= 210.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else if (x <= 2.2e+152) {
		tmp = 0.0;
	} else {
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 210.0d0) then
        tmp = (2.0d0 - (eps * x)) / 2.0d0
    else if (x <= 2.2d+152) then
        tmp = 0.0d0
    else
        tmp = ((x * ((-1.0d0) + (x * 0.5d0))) / eps) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 210.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else if (x <= 2.2e+152) {
		tmp = 0.0;
	} else {
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 210.0:
		tmp = (2.0 - (eps * x)) / 2.0
	elif x <= 2.2e+152:
		tmp = 0.0
	else:
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 210.0)
		tmp = Float64(Float64(2.0 - Float64(eps * x)) / 2.0);
	elseif (x <= 2.2e+152)
		tmp = 0.0;
	else
		tmp = Float64(Float64(Float64(x * Float64(-1.0 + Float64(x * 0.5))) / eps) / 2.0);
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 210.0)
		tmp = (2.0 - (eps * x)) / 2.0;
	elseif (x <= 2.2e+152)
		tmp = 0.0;
	else
		tmp = ((x * (-1.0 + (x * 0.5))) / eps) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 210.0], N[(N[(2.0 - N[(eps * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[x, 2.2e+152], 0.0, N[(N[(N[(x * N[(-1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / eps), $MachinePrecision] / 2.0), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 210:\\
\;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\

\mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\
\;\;\;\;0\\

\mathbf{else}:\\
\;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < 210

    1. Initial program 60.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 63.9%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 67.3%

      \[\leadsto \frac{2 + x \cdot \left(\left(\color{blue}{\frac{-1}{\varepsilon}} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in x around 0 67.3%

      \[\leadsto \frac{\color{blue}{2 + -1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    7. Step-by-step derivation
      1. mul-1-neg67.3%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon \cdot x\right)}}{2} \]
      2. *-commutative67.3%

        \[\leadsto \frac{2 + \left(-\color{blue}{x \cdot \varepsilon}\right)}{2} \]
      3. unsub-neg67.3%

        \[\leadsto \frac{\color{blue}{2 - x \cdot \varepsilon}}{2} \]
      4. *-commutative67.3%

        \[\leadsto \frac{2 - \color{blue}{\varepsilon \cdot x}}{2} \]
    8. Simplified67.3%

      \[\leadsto \frac{\color{blue}{2 - \varepsilon \cdot x}}{2} \]

    if 210 < x < 2.1999999999999998e152

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 38.5%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub38.5%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg38.5%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp38.5%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses38.5%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval38.5%

        \[\leadsto \color{blue}{0} \]
    6. Simplified38.5%

      \[\leadsto \color{blue}{0} \]

    if 2.1999999999999998e152 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 31.6%

      \[\leadsto \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} - 1\right)}}{2} \]
    5. Taylor expanded in eps around 0 1.9%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - 1}{\varepsilon}}}{2} \]
    6. Step-by-step derivation
      1. expm1-define1.9%

        \[\leadsto \frac{\frac{\color{blue}{\mathsf{expm1}\left(-1 \cdot x\right)}}{\varepsilon}}{2} \]
      2. neg-mul-11.9%

        \[\leadsto \frac{\frac{\mathsf{expm1}\left(\color{blue}{-x}\right)}{\varepsilon}}{2} \]
    7. Simplified1.9%

      \[\leadsto \frac{\color{blue}{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}}{2} \]
    8. Taylor expanded in x around 0 30.4%

      \[\leadsto \frac{\frac{\color{blue}{x \cdot \left(0.5 \cdot x - 1\right)}}{\varepsilon}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification58.4%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 210:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{elif}\;x \leq 2.2 \cdot 10^{+152}:\\ \;\;\;\;0\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{x \cdot \left(-1 + x \cdot 0.5\right)}{\varepsilon}}{2}\\ \end{array} \]
  5. Add Preprocessing

Alternative 17: 60.7% accurate, 18.9× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 200:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x 200.0) (/ (- 2.0 (* eps x)) 2.0) 0.0))
double code(double x, double eps) {
	double tmp;
	if (x <= 200.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 200.0d0) then
        tmp = (2.0d0 - (eps * x)) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 200.0) {
		tmp = (2.0 - (eps * x)) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 200.0:
		tmp = (2.0 - (eps * x)) / 2.0
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 200.0)
		tmp = Float64(Float64(2.0 - Float64(eps * x)) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 200.0)
		tmp = (2.0 - (eps * x)) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 200.0], N[(N[(2.0 - N[(eps * x), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 200:\\
\;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 200

    1. Initial program 60.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 63.9%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 67.3%

      \[\leadsto \frac{2 + x \cdot \left(\left(\color{blue}{\frac{-1}{\varepsilon}} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in x around 0 67.3%

      \[\leadsto \frac{\color{blue}{2 + -1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    7. Step-by-step derivation
      1. mul-1-neg67.3%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon \cdot x\right)}}{2} \]
      2. *-commutative67.3%

        \[\leadsto \frac{2 + \left(-\color{blue}{x \cdot \varepsilon}\right)}{2} \]
      3. unsub-neg67.3%

        \[\leadsto \frac{\color{blue}{2 - x \cdot \varepsilon}}{2} \]
      4. *-commutative67.3%

        \[\leadsto \frac{2 - \color{blue}{\varepsilon \cdot x}}{2} \]
    8. Simplified67.3%

      \[\leadsto \frac{\color{blue}{2 - \varepsilon \cdot x}}{2} \]

    if 200 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 40.1%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub40.1%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg40.1%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp40.1%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses40.1%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval40.1%

        \[\leadsto \color{blue}{0} \]
    6. Simplified40.1%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 18: 61.1% accurate, 20.6× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq -0.32:\\ \;\;\;\;x \cdot \left(\varepsilon \cdot -0.5\right)\\ \mathbf{elif}\;x \leq 600:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (if (<= x -0.32) (* x (* eps -0.5)) (if (<= x 600.0) 1.0 0.0)))
double code(double x, double eps) {
	double tmp;
	if (x <= -0.32) {
		tmp = x * (eps * -0.5);
	} else if (x <= 600.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= (-0.32d0)) then
        tmp = x * (eps * (-0.5d0))
    else if (x <= 600.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= -0.32) {
		tmp = x * (eps * -0.5);
	} else if (x <= 600.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= -0.32:
		tmp = x * (eps * -0.5)
	elif x <= 600.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= -0.32)
		tmp = Float64(x * Float64(eps * -0.5));
	elseif (x <= 600.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= -0.32)
		tmp = x * (eps * -0.5);
	elseif (x <= 600.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, -0.32], N[(x * N[(eps * -0.5), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 600.0], 1.0, 0.0]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.32:\\
\;\;\;\;x \cdot \left(\varepsilon \cdot -0.5\right)\\

\mathbf{elif}\;x \leq 600:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if x < -0.320000000000000007

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in x around 0 3.1%

      \[\leadsto \frac{\color{blue}{2 + x \cdot \left(\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot \left(\varepsilon - 1\right) + \frac{1}{\varepsilon}\right) - \varepsilon\right)}}{2} \]
    5. Taylor expanded in eps around 0 26.2%

      \[\leadsto \frac{2 + x \cdot \left(\left(\color{blue}{\frac{-1}{\varepsilon}} + \frac{1}{\varepsilon}\right) - \varepsilon\right)}{2} \]
    6. Taylor expanded in x around 0 26.2%

      \[\leadsto \frac{\color{blue}{2 + -1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]
    7. Step-by-step derivation
      1. mul-1-neg26.2%

        \[\leadsto \frac{2 + \color{blue}{\left(-\varepsilon \cdot x\right)}}{2} \]
      2. *-commutative26.2%

        \[\leadsto \frac{2 + \left(-\color{blue}{x \cdot \varepsilon}\right)}{2} \]
      3. unsub-neg26.2%

        \[\leadsto \frac{\color{blue}{2 - x \cdot \varepsilon}}{2} \]
      4. *-commutative26.2%

        \[\leadsto \frac{2 - \color{blue}{\varepsilon \cdot x}}{2} \]
    8. Simplified26.2%

      \[\leadsto \frac{\color{blue}{2 - \varepsilon \cdot x}}{2} \]
    9. Taylor expanded in eps around inf 26.2%

      \[\leadsto \color{blue}{-0.5 \cdot \left(\varepsilon \cdot x\right)} \]
    10. Step-by-step derivation
      1. associate-*r*26.2%

        \[\leadsto \color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot x} \]
    11. Simplified26.2%

      \[\leadsto \color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot x} \]

    if -0.320000000000000007 < x < 600

    1. Initial program 51.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified40.2%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 96.6%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 96.7%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative96.7%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified96.7%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 78.4%

      \[\leadsto \color{blue}{1} \]

    if 600 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 40.1%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub40.1%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg40.1%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp40.1%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses40.1%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval40.1%

        \[\leadsto \color{blue}{0} \]
    6. Simplified40.1%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification60.7%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq -0.32:\\ \;\;\;\;x \cdot \left(\varepsilon \cdot -0.5\right)\\ \mathbf{elif}\;x \leq 600:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
  5. Add Preprocessing

Alternative 19: 57.7% accurate, 37.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 520:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \end{array} \]
(FPCore (x eps) :precision binary64 (if (<= x 520.0) 1.0 0.0))
double code(double x, double eps) {
	double tmp;
	if (x <= 520.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 520.0d0) then
        tmp = 1.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	double tmp;
	if (x <= 520.0) {
		tmp = 1.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	tmp = 0
	if x <= 520.0:
		tmp = 1.0
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	tmp = 0.0
	if (x <= 520.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 520.0)
		tmp = 1.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := If[LessEqual[x, 520.0], 1.0, 0.0]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;x \leq 520:\\
\;\;\;\;1\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if x < 520

    1. Initial program 60.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified51.7%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around inf 97.3%

      \[\leadsto \frac{\color{blue}{e^{x \cdot \left(\varepsilon - 1\right)} + \frac{1}{e^{x + \varepsilon \cdot x}}}}{2} \]
    5. Taylor expanded in eps around inf 97.4%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    6. Step-by-step derivation
      1. *-commutative97.4%

        \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    7. Simplified97.4%

      \[\leadsto \frac{e^{\color{blue}{x \cdot \varepsilon}} + \frac{1}{e^{x + \varepsilon \cdot x}}}{2} \]
    8. Taylor expanded in x around 0 63.9%

      \[\leadsto \color{blue}{1} \]

    if 520 < x

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
    3. Add Preprocessing
    4. Taylor expanded in eps around 0 40.1%

      \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
    5. Step-by-step derivation
      1. div-sub40.1%

        \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
      2. mul-1-neg40.1%

        \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      3. rec-exp40.1%

        \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
      4. +-inverses40.1%

        \[\leadsto 0.5 \cdot \color{blue}{0} \]
      5. metadata-eval40.1%

        \[\leadsto \color{blue}{0} \]
    6. Simplified40.1%

      \[\leadsto \color{blue}{0} \]
  3. Recombined 2 regimes into one program.
  4. Add Preprocessing

Alternative 20: 16.9% accurate, 227.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
	return 0.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = 0.0d0
end function
public static double code(double x, double eps) {
	return 0.0;
}
def code(x, eps):
	return 0.0
function code(x, eps)
	return 0.0
end
function tmp = code(x, eps)
	tmp = 0.0;
end
code[x_, eps_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 71.5%

    \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
  2. Simplified64.7%

    \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{x}\right)}^{\left(\varepsilon + -1\right)}, \frac{1 + \frac{-1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
  3. Add Preprocessing
  4. Taylor expanded in eps around 0 12.5%

    \[\leadsto \color{blue}{0.5 \cdot \frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}} \]
  5. Step-by-step derivation
    1. div-sub12.5%

      \[\leadsto 0.5 \cdot \color{blue}{\left(\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right)} \]
    2. mul-1-neg12.5%

      \[\leadsto 0.5 \cdot \left(\frac{e^{\color{blue}{-x}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
    3. rec-exp12.5%

      \[\leadsto 0.5 \cdot \left(\frac{\color{blue}{\frac{1}{e^{x}}}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}\right) \]
    4. +-inverses12.7%

      \[\leadsto 0.5 \cdot \color{blue}{0} \]
    5. metadata-eval12.7%

      \[\leadsto \color{blue}{0} \]
  6. Simplified12.7%

    \[\leadsto \color{blue}{0} \]
  7. Add Preprocessing

Reproduce

?
herbie shell --seed 2024152 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))