NMSE Section 6.1 mentioned, A

?

Percentage Accurate: 64.3% → 99.2%
Time: 21.4s
Precision: binary64
Cost: 28100

?

\[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
\[\begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_2 := \left(1 + x\right) \cdot e^{-x}\\ \mathbf{if}\;t_1 \cdot \left(1 + \frac{1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{t_1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_2 + t_2}{2}\\ \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (exp (* x (- -1.0 eps))))
        (t_1 (exp (* x (+ eps -1.0))))
        (t_2 (* (+ 1.0 x) (exp (- x)))))
   (if (<=
        (+ (* t_1 (+ 1.0 (/ 1.0 eps))) (* t_0 (+ 1.0 (/ -1.0 eps))))
        INFINITY)
     (/ (+ t_1 t_0) 2.0)
     (/ (+ t_2 t_2) 2.0))))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
double code(double x, double eps) {
	double t_0 = exp((x * (-1.0 - eps)));
	double t_1 = exp((x * (eps + -1.0)));
	double t_2 = (1.0 + x) * exp(-x);
	double tmp;
	if (((t_1 * (1.0 + (1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= ((double) INFINITY)) {
		tmp = (t_1 + t_0) / 2.0;
	} else {
		tmp = (t_2 + t_2) / 2.0;
	}
	return tmp;
}
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
public static double code(double x, double eps) {
	double t_0 = Math.exp((x * (-1.0 - eps)));
	double t_1 = Math.exp((x * (eps + -1.0)));
	double t_2 = (1.0 + x) * Math.exp(-x);
	double tmp;
	if (((t_1 * (1.0 + (1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= Double.POSITIVE_INFINITY) {
		tmp = (t_1 + t_0) / 2.0;
	} else {
		tmp = (t_2 + t_2) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
def code(x, eps):
	t_0 = math.exp((x * (-1.0 - eps)))
	t_1 = math.exp((x * (eps + -1.0)))
	t_2 = (1.0 + x) * math.exp(-x)
	tmp = 0
	if ((t_1 * (1.0 + (1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= math.inf:
		tmp = (t_1 + t_0) / 2.0
	else:
		tmp = (t_2 + t_2) / 2.0
	return tmp
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function code(x, eps)
	t_0 = exp(Float64(x * Float64(-1.0 - eps)))
	t_1 = exp(Float64(x * Float64(eps + -1.0)))
	t_2 = Float64(Float64(1.0 + x) * exp(Float64(-x)))
	tmp = 0.0
	if (Float64(Float64(t_1 * Float64(1.0 + Float64(1.0 / eps))) + Float64(t_0 * Float64(1.0 + Float64(-1.0 / eps)))) <= Inf)
		tmp = Float64(Float64(t_1 + t_0) / 2.0);
	else
		tmp = Float64(Float64(t_2 + t_2) / 2.0);
	end
	return tmp
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
function tmp_2 = code(x, eps)
	t_0 = exp((x * (-1.0 - eps)));
	t_1 = exp((x * (eps + -1.0)));
	t_2 = (1.0 + x) * exp(-x);
	tmp = 0.0;
	if (((t_1 * (1.0 + (1.0 / eps))) + (t_0 * (1.0 + (-1.0 / eps)))) <= Inf)
		tmp = (t_1 + t_0) / 2.0;
	else
		tmp = (t_2 + t_2) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
code[x_, eps_] := Block[{t$95$0 = N[Exp[N[(x * N[(-1.0 - eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[Exp[N[(x * N[(eps + -1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[(N[(1.0 + x), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(N[(t$95$1 * N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(t$95$0 * N[(1.0 + N[(-1.0 / eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], Infinity], N[(N[(t$95$1 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(t$95$2 + t$95$2), $MachinePrecision] / 2.0), $MachinePrecision]]]]]
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\begin{array}{l}
t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\
t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\
t_2 := \left(1 + x\right) \cdot e^{-x}\\
\mathbf{if}\;t_1 \cdot \left(1 + \frac{1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\
\;\;\;\;\frac{t_1 + t_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{t_2 + t_2}{2}\\


\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Herbie found 13 alternatives:

AlternativeAccuracySpeedup

Accuracy vs Speed

The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Bogosity?

Bogosity

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 2 regimes
  2. if (-.f64 (*.f64 (+.f64 1 (/.f64 1 eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 1 eps) x)))) (*.f64 (-.f64 (/.f64 1 eps) 1) (exp.f64 (neg.f64 (*.f64 (+.f64 1 eps) x))))) < +inf.0

    1. Initial program 81.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified81.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]81.0%

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]81.0%

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]81.0%

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]81.0%

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around inf 98.8%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    4. Simplified98.8%

      \[\leadsto \frac{\color{blue}{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-e^{x \cdot \left(-\left(\varepsilon + 1\right)\right)}\right)}}{2} \]
      Step-by-step derivation

      [Start]98.8%

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      neg-mul-1 [<=]98.8%

      \[ \frac{e^{\color{blue}{-\left(1 - \varepsilon\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      distribute-lft-neg-in [=>]98.8%

      \[ \frac{e^{\color{blue}{\left(-\left(1 - \varepsilon\right)\right) \cdot x}} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \color{blue}{\left(-e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}\right)}}{2} \]

      exp-prod [=>]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-\color{blue}{{\left(e^{-1}\right)}^{\left(\left(\varepsilon + 1\right) \cdot x\right)}}\right)}{2} \]

      +-commutative [<=]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-{\left(e^{-1}\right)}^{\left(\color{blue}{\left(1 + \varepsilon\right)} \cdot x\right)}\right)}{2} \]

      *-commutative [<=]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-{\left(e^{-1}\right)}^{\color{blue}{\left(x \cdot \left(1 + \varepsilon\right)\right)}}\right)}{2} \]

      exp-prod [<=]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-\color{blue}{e^{-1 \cdot \left(x \cdot \left(1 + \varepsilon\right)\right)}}\right)}{2} \]

      neg-mul-1 [<=]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-e^{\color{blue}{-x \cdot \left(1 + \varepsilon\right)}}\right)}{2} \]

      distribute-rgt-neg-in [=>]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-e^{\color{blue}{x \cdot \left(-\left(1 + \varepsilon\right)\right)}}\right)}{2} \]

      +-commutative [=>]98.8%

      \[ \frac{e^{\left(-\left(1 - \varepsilon\right)\right) \cdot x} - \left(-e^{x \cdot \left(-\color{blue}{\left(\varepsilon + 1\right)}\right)}\right)}{2} \]

    if +inf.0 < (-.f64 (*.f64 (+.f64 1 (/.f64 1 eps)) (exp.f64 (neg.f64 (*.f64 (-.f64 1 eps) x)))) (*.f64 (-.f64 (/.f64 1 eps) 1) (exp.f64 (neg.f64 (*.f64 (+.f64 1 eps) x)))))

    1. Initial program 0.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified0.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]0.0%

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]0.0%

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]0.0%

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]0.0%

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around 0 0.0%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}}{2} \]
    4. Simplified100.0%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
      Step-by-step derivation

      [Start]0.0%

      \[ \frac{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      *-commutative [=>]0.0%

      \[ \frac{\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft1-in [=>]0.0%

      \[ \frac{\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      mul-1-neg [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{\color{blue}{-x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft-out [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{-1 \cdot \left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)}}{2} \]

      mul-1-neg [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{\left(-\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)\right)}}{2} \]

      *-commutative [=>]0.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right)\right)}{2} \]

      distribute-lft1-in [=>]100.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}}\right)}{2} \]

      mul-1-neg [=>]100.0%

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.9%

    \[\leadsto \begin{array}{l} \mathbf{if}\;e^{x \cdot \left(\varepsilon + -1\right)} \cdot \left(1 + \frac{1}{\varepsilon}\right) + e^{x \cdot \left(-1 - \varepsilon\right)} \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\left(1 + x\right) \cdot e^{-x} + \left(1 + x\right) \cdot e^{-x}}{2}\\ \end{array} \]

Alternatives

Alternative 1
Accuracy99.2%
Cost28100
\[\begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ t_1 := e^{x \cdot \left(\varepsilon + -1\right)}\\ t_2 := \left(1 + x\right) \cdot e^{-x}\\ \mathbf{if}\;t_1 \cdot \left(1 + \frac{1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right) \leq \infty:\\ \;\;\;\;\frac{t_1 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_2 + t_2}{2}\\ \end{array} \]
Alternative 2
Accuracy86.7%
Cost13764
\[\begin{array}{l} t_0 := e^{x \cdot \left(-1 - \varepsilon\right)}\\ \mathbf{if}\;x \leq -8.4 \cdot 10^{+134}:\\ \;\;\;\;\frac{\left(1 + \frac{1}{\varepsilon}\right) + t_0 \cdot \left(1 + \frac{-1}{\varepsilon}\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(\varepsilon + -1\right)} + t_0}{2}\\ \end{array} \]
Alternative 3
Accuracy86.9%
Cost13705
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1 \lor \neg \left(\varepsilon \leq 1\right):\\ \;\;\;\;\frac{e^{\varepsilon \cdot \left(-x\right)} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 \cdot e^{-x}}{2}\\ \end{array} \]
Alternative 4
Accuracy64.5%
Cost7314
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1.7 \cdot 10^{+255} \lor \neg \left(\varepsilon \leq -2.15 \cdot 10^{+198} \lor \neg \left(\varepsilon \leq -2.7 \cdot 10^{+188}\right) \land \varepsilon \leq 6.4 \cdot 10^{+240}\right):\\ \;\;\;\;\frac{2 + \left(1 + \frac{-1}{\varepsilon}\right) \cdot \frac{x \cdot \left(-1 + \varepsilon \cdot \varepsilon\right)}{1 - \varepsilon}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 \cdot e^{-x}}{2}\\ \end{array} \]
Alternative 5
Accuracy75.8%
Cost7177
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -320 \lor \neg \left(\varepsilon \leq 1.9 \cdot 10^{+17}\right):\\ \;\;\;\;\frac{2 + {\left(\varepsilon \cdot x\right)}^{2}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 \cdot e^{-x}}{2}\\ \end{array} \]
Alternative 6
Accuracy58.1%
Cost1476
\[\begin{array}{l} \mathbf{if}\;x \leq -3.2 \cdot 10^{+43}:\\ \;\;\;\;\frac{2 + \left(1 + \frac{-1}{\varepsilon}\right) \cdot \frac{x \cdot \left(-1 + \varepsilon \cdot \varepsilon\right)}{1 - \varepsilon}}{2}\\ \mathbf{elif}\;x \leq 540:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 7
Accuracy57.2%
Cost1476
\[\begin{array}{l} \mathbf{if}\;x \leq -3.2 \cdot 10^{+43}:\\ \;\;\;\;\frac{2 + \left(\frac{1}{\varepsilon} + -1\right) \cdot \frac{1 - \varepsilon \cdot \varepsilon}{\frac{1 - \varepsilon}{x}}}{2}\\ \mathbf{elif}\;x \leq 540:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 8
Accuracy54.9%
Cost964
\[\begin{array}{l} \mathbf{if}\;x \leq -2.1 \cdot 10^{+158}:\\ \;\;\;\;\frac{2 + \left(1 + \varepsilon\right) \cdot \left(x + \frac{x}{\varepsilon}\right)}{2}\\ \mathbf{elif}\;x \leq 3.4 \cdot 10^{+23}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 9
Accuracy52.5%
Cost712
\[\begin{array}{l} \mathbf{if}\;x \leq -4.4 \cdot 10^{+158}:\\ \;\;\;\;\frac{x \cdot 0.5}{\varepsilon}\\ \mathbf{elif}\;x \leq 3.4 \cdot 10^{+23}:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 10
Accuracy53.7%
Cost584
\[\begin{array}{l} \mathbf{if}\;x \leq -3.5 \cdot 10^{+158}:\\ \;\;\;\;\frac{x \cdot 0.5}{\varepsilon}\\ \mathbf{elif}\;x \leq -3.2 \cdot 10^{+43}:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 490:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 11
Accuracy53.9%
Cost452
\[\begin{array}{l} \mathbf{if}\;x \leq -1.5 \cdot 10^{+44}:\\ \;\;\;\;\left(\varepsilon \cdot x\right) \cdot -0.5\\ \mathbf{elif}\;x \leq 600:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 12
Accuracy50.7%
Cost196
\[\begin{array}{l} \mathbf{if}\;x \leq 510:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 13
Accuracy39.0%
Cost64
\[1 \]

Reproduce?

herbie shell --seed 2023263 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))