NMSE Section 6.1 mentioned, A

?

Percentage Accurate: 63.2% → 99.2%
Time: 21.0s
Precision: binary64
Cost: 28100

?

\[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
\[\begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_2 := \left(1 + x\right) \cdot e^{-x}\\ \mathbf{if}\;t_1 \cdot \left(1 - \frac{-1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{t_1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_2 + t_2}{2}\\ \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (* x (- -1.0 eps))))
        (t_1 (exp (* x (+ eps -1.0))))
        (t_2 (* (+ 1.0 x) (exp (- x)))))
   (if (<=
        (+ (* t_1 (- 1.0 (/ -1.0 eps))) (* t_0 (+ 1.0 (/ -1.0 eps))))
        INFINITY)
     (/ (+ t_1 t_0) 2.0)
     (/ (+ t_2 t_2) 2.0))))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
double code(double x, double eps) {
	double t_0 = exp((x * (-1.0 - eps)));
	double t_1 = exp((x * (eps + -1.0)));
	double t_2 = (1.0 + x) * exp(-x);
	double tmp;
	if (((t_1 * (1.0 - (-1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= ((double) INFINITY)) {
		tmp = (t_1 + t_0) / 2.0;
	} else {
		tmp = (t_2 + t_2) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
public static double code(double x, double eps) {
	double t_0 = Math.exp((x * (-1.0 - eps)));
	double t_1 = Math.exp((x * (eps + -1.0)));
	double t_2 = (1.0 + x) * Math.exp(-x);
	double tmp;
	if (((t_1 * (1.0 - (-1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= Double.POSITIVE_INFINITY) {
		tmp = (t_1 + t_0) / 2.0;
	} else {
		tmp = (t_2 + t_2) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
def code(x, eps):
	t_0 = math.exp((x * (-1.0 - eps)))
	t_1 = math.exp((x * (eps + -1.0)))
	t_2 = (1.0 + x) * math.exp(-x)
	tmp = 0
	if ((t_1 * (1.0 - (-1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= math.inf:
		tmp = (t_1 + t_0) / 2.0
	else:
		tmp = (t_2 + t_2) / 2.0
	return tmp
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function code(x, eps)
	t_0 = exp(Float64(x * Float64(-1.0 - eps)))
	t_1 = exp(Float64(x * Float64(eps + -1.0)))
	t_2 = Float64(Float64(1.0 + x) * exp(Float64(-x)))
	tmp = 0.0
	if (Float64(Float64(t_1 * Float64(1.0 - Float64(-1.0 / eps))) + Float64(t_0 * Float64(1.0 + Float64(-1.0 / eps)))) <= Inf)
		tmp = Float64(Float64(t_1 + t_0) / 2.0);
	else
		tmp = Float64(Float64(t_2 + t_2) / 2.0);
	end
	return tmp
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
function tmp_2 = code(x, eps)
	t_0 = exp((x * (-1.0 - eps)));
	t_1 = exp((x * (eps + -1.0)));
	t_2 = (1.0 + x) * exp(-x);
	tmp = 0.0;
	if (((t_1 * (1.0 - (-1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= Inf)
		tmp = (t_1 + t_0) / 2.0;
	else
		tmp = (t_2 + t_2) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
code[x_, eps_] := Block[{t$95$0 = N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[(N[(1.0 + x), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(t$95$1 * N[(1.0 - N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(t$95$0 * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], Infinity], N[(N[(t$95$1 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$2 + t$95$2), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\begin{array}{l}
t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\
t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\
t_2 := \left(1 + x\right) \cdot e^{-x}\\
\mathbf{if}\;t_1 \cdot \left(1 - \frac{-1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\
\;\;\;\;\frac{t_1 + t_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{t_2 + t_2}{2}\\


\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Herbie found 12 alternatives:

AlternativeAccuracySpeedup

Accuracy vs Speed

The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Bogosity?

Bogosity

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 1 (/.f64 1 eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 1 eps) x)))) (*.f64 (-.f64 (/.f64 1 eps) 1) (exp.f64 (neg.f64 (*.f64 (+.f64 1 eps) x))))) < +inf.0

    1. Initial program 73.5%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified73.5%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]73.5%

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]73.5%

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]73.5%

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]73.5%

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around inf 99.5%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    4. Simplified99.5%

      \[\leadsto \frac{\color{blue}{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \left(\varepsilon + 1\right)}\right)}}{2} \]
      Step-by-step derivation

      [Start]99.5%

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]99.5%

      \[ \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      *-commutative [=>]99.5%

      \[ \frac{e^{-\color{blue}{x \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]

      mul-1-neg [=>]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-\left(\varepsilon + 1\right) \cdot x}}\right)}{2} \]

      distribute-rgt1-in [<=]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{\left(x + \varepsilon \cdot x\right)}}\right)}{2} \]

      *-commutative [<=]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\left(x + \color{blue}{x \cdot \varepsilon}\right)}\right)}{2} \]

      *-rgt-identity [<=]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\left(\color{blue}{x \cdot 1} + x \cdot \varepsilon\right)}\right)}{2} \]

      distribute-lft-in [<=]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-\color{blue}{x \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]

      +-commutative [=>]99.5%

      \[ \frac{e^{-x \cdot \left(1 - \varepsilon\right)} - \left(-e^{-x \cdot \color{blue}{\left(\varepsilon + 1\right)}}\right)}{2} \]

    if +inf.0 < (-.f64 (*.f64 (+.f64 1 (/.f64 1 eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 1 eps) x)))) (*.f64 (-.f64 (/.f64 1 eps) 1) (exp.f64 (neg.f64 (*.f64 (+.f64 1 eps) x)))))

    1. Initial program 0.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified0.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]0.0%

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]0.0%

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]0.0%

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]0.0%

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around 0 0.0%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}}{2} \]
    4. Simplified100.0%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
      Step-by-step derivation

      [Start]0.0%

      \[ \frac{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      *-commutative [=>]0.0%

      \[ \frac{\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft1-in [=>]0.0%

      \[ \frac{\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      mul-1-neg [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{\color{blue}{-x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft-out [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{-1 \cdot \left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)}}{2} \]

      mul-1-neg [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{\left(-\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)\right)}}{2} \]

      *-commutative [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right)\right)}{2} \]

      distribute-lft1-in [=>]100.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}}\right)}{2} \]

      mul-1-neg [=>]100.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.6%

    \[\leadsto \begin{array}{l} \mathbf{if}\;e^{x \cdot \left(\varepsilon + -1\right)} \cdot \left(1 - \frac{-1}{\varepsilon}\right) + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + x\right) \cdot e^{-x} + \left(1 + x\right) \cdot e^{-x}}{2}\\ \end{array} \]

Alternatives

Alternative 1
Accuracy99.2%
Cost28100
\[\begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_2 := \left(1 + x\right) \cdot e^{-x}\\ \mathbf{if}\;t_1 \cdot \left(1 - \frac{-1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{t_1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_2 + t_2}{2}\\ \end{array} \]
Alternative 2
Accuracy86.9%
Cost14028
\[\begin{array}{l} t_0 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_1 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ t_2 := 1 + \frac{-1}{\varepsilon}\\ \mathbf{if}\;x \leq -8.2 \cdot 10^{+82}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -5.1 \cdot 10^{+28}:\\ \;\;\;\;\frac{\left(1 + \varepsilon \cdot x\right) + t_1 \cdot t_2}{2}\\ \mathbf{elif}\;x \leq -62:\\ \;\;\;\;\frac{t_0 \cdot \left(1 - \frac{-1}{\varepsilon}\right) + t_2}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_0 + t_1}{2}\\ \end{array} \]
Alternative 3
Accuracy68.0%
Cost8012
\[\begin{array}{l} t_0 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_1 := 1 + \frac{-1}{\varepsilon}\\ \mathbf{if}\;x \leq -6.2 \cdot 10^{+82}:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -3 \cdot 10^{+28}:\\ \;\;\;\;\frac{\left(1 + \varepsilon \cdot x\right) + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot t_1}{2}\\ \mathbf{elif}\;x \leq -2.05:\\ \;\;\;\;\frac{t_0 \cdot \left(1 - \frac{-1}{\varepsilon}\right) + t_1}{2}\\ \mathbf{elif}\;x \leq 1.15 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 4
Accuracy68.0%
Cost7504
\[\begin{array}{l} t_0 := \frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{if}\;x \leq -6 \cdot 10^{+38}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq -3.4 \cdot 10^{+17}:\\ \;\;\;\;\frac{\frac{\left(1 - x\right) - e^{-x}}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -1950:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq 7.8 \cdot 10^{+42}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 5
Accuracy67.9%
Cost7240
\[\begin{array}{l} \mathbf{if}\;x \leq -700:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 8.4 \cdot 10^{+48}:\\ \;\;\;\;\frac{1 + e^{x \cdot \left(\varepsilon + -1\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 6
Accuracy63.2%
Cost6916
\[\begin{array}{l} \mathbf{if}\;x \leq -95:\\ \;\;\;\;\frac{\frac{\mathsf{expm1}\left(-x\right)}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 445:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 7
Accuracy58.5%
Cost1228
\[\begin{array}{l} t_0 := \frac{0.5 \cdot \frac{x}{\frac{\varepsilon}{x}} - \frac{x}{\varepsilon}}{2}\\ \mathbf{if}\;x \leq -1.12 \cdot 10^{+218}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq -3.6 \cdot 10^{+146}:\\ \;\;\;\;\frac{-0.5 \cdot \frac{x \cdot x}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq -6.3:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq 540:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 8
Accuracy57.7%
Cost964
\[\begin{array}{l} \mathbf{if}\;x \leq -2.1:\\ \;\;\;\;\frac{x \cdot \left(\left(1 - \varepsilon\right) \cdot \left(-1 + \frac{-1}{\varepsilon}\right)\right)}{2}\\ \mathbf{elif}\;x \leq 600:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 9
Accuracy58.3%
Cost708
\[\begin{array}{l} \mathbf{if}\;x \leq -7.5 \cdot 10^{+16}:\\ \;\;\;\;\frac{-0.5 \cdot \frac{x \cdot x}{\varepsilon}}{2}\\ \mathbf{elif}\;x \leq 485:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 10
Accuracy53.8%
Cost452
\[\begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 460:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 11
Accuracy51.0%
Cost196
\[\begin{array}{l} \mathbf{if}\;x \leq 490:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 12
Accuracy14.4%
Cost64
\[0 \]

Reproduce?

herbie shell --seed 2023272 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))