NMSE Section 6.1 mentioned, A

?

Percentage Accurate: 73.8% → 99.9%
Time: 17.3s
Precision: binary64
Cost: 14024

?

\[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
\[\begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;\varepsilon \leq -1:\\ \;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{elif}\;\varepsilon \leq 2 \cdot 10^{-18}:\\ \;\;\;\;\frac{t_0 + t_0}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 + \varepsilon\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (* (+ x 1.0) (exp (- x)))))
   (if (<= eps -1.0)
     (/ (+ (exp (- x (* eps x))) (exp (* eps x))) 2.0)
     (if (<= eps 2e-18)
       (/ (+ t_0 t_0) 2.0)
       (/ (+ (exp (* x (+ -1.0 eps))) (exp (* x (- eps)))) 2.0)))))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
double code(double x, double eps) {
	double t_0 = (x + 1.0) * exp(-x);
	double tmp;
	if (eps <= -1.0) {
		tmp = (exp((x - (eps * x))) + exp((eps * x))) / 2.0;
	} else if (eps <= 2e-18) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (exp((x * (-1.0 + eps))) + exp((x * -eps))) / 2.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: tmp
    t_0 = (x + 1.0d0) * exp(-x)
    if (eps <= (-1.0d0)) then
        tmp = (exp((x - (eps * x))) + exp((eps * x))) / 2.0d0
    else if (eps <= 2d-18) then
        tmp = (t_0 + t_0) / 2.0d0
    else
        tmp = (exp((x * ((-1.0d0) + eps))) + exp((x * -eps))) / 2.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
public static double code(double x, double eps) {
	double t_0 = (x + 1.0) * Math.exp(-x);
	double tmp;
	if (eps <= -1.0) {
		tmp = (Math.exp((x - (eps * x))) + Math.exp((eps * x))) / 2.0;
	} else if (eps <= 2e-18) {
		tmp = (t_0 + t_0) / 2.0;
	} else {
		tmp = (Math.exp((x * (-1.0 + eps))) + Math.exp((x * -eps))) / 2.0;
	}
	return tmp;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
def code(x, eps):
	t_0 = (x + 1.0) * math.exp(-x)
	tmp = 0
	if eps <= -1.0:
		tmp = (math.exp((x - (eps * x))) + math.exp((eps * x))) / 2.0
	elif eps <= 2e-18:
		tmp = (t_0 + t_0) / 2.0
	else:
		tmp = (math.exp((x * (-1.0 + eps))) + math.exp((x * -eps))) / 2.0
	return tmp
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function code(x, eps)
	t_0 = Float64(Float64(x + 1.0) * exp(Float64(-x)))
	tmp = 0.0
	if (eps <= -1.0)
		tmp = Float64(Float64(exp(Float64(x - Float64(eps * x))) + exp(Float64(eps * x))) / 2.0);
	elseif (eps <= 2e-18)
		tmp = Float64(Float64(t_0 + t_0) / 2.0);
	else
		tmp = Float64(Float64(exp(Float64(x * Float64(-1.0 + eps))) + exp(Float64(x * Float64(-eps)))) / 2.0);
	end
	return tmp
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
function tmp_2 = code(x, eps)
	t_0 = (x + 1.0) * exp(-x);
	tmp = 0.0;
	if (eps <= -1.0)
		tmp = (exp((x - (eps * x))) + exp((eps * x))) / 2.0;
	elseif (eps <= 2e-18)
		tmp = (t_0 + t_0) / 2.0;
	else
		tmp = (exp((x * (-1.0 + eps))) + exp((x * -eps))) / 2.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
code[x_, eps_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] * N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[eps, -1.0], N[(N[(N[Exp[N[(x - N[(eps * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(eps * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], If[LessEqual[eps, 2e-18], N[(N[(t$95$0 + t$95$0), $MachinePrecision] / 2.0), $MachinePrecision], N[(N[(N[Exp[N[(x * N[(-1.0 + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]]]]
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\begin{array}{l}
t_0 := \left(x + 1\right) \cdot e^{-x}\\
\mathbf{if}\;\varepsilon \leq -1:\\
\;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\

\mathbf{elif}\;\varepsilon \leq 2 \cdot 10^{-18}:\\
\;\;\;\;\frac{t_0 + t_0}{2}\\

\mathbf{else}:\\
\;\;\;\;\frac{e^{x \cdot \left(-1 + \varepsilon\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\


\end{array}

Local Percentage Accuracy?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Herbie found 17 alternatives:

AlternativeAccuracySpeedup

Accuracy vs Speed

The accuracy (vertical axis) and speed (horizontal axis) of each of Herbie's proposed alternatives. Up and to the right is better. Each dot represents an alternative program; the red square represents the initial program.

Bogosity?

Bogosity

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 3 regimes
  2. if eps < -1

    1. Initial program 100.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified100.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]100.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]100.0

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]100.0

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]100.0

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(\varepsilon \cdot x\right)}}}{2} \]
    5. Simplified100.0%

      \[\leadsto \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
      Step-by-step derivation

      [Start]100.0

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]

      *-commutative [=>]100.0

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
    6. Applied egg-rr61.3%

      \[\leadsto \frac{\color{blue}{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + {\left(e^{\varepsilon}\right)}^{x}}}{2} \]
      Step-by-step derivation

      [Start]100.0

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}}{2} \]

      sub-neg [=>]100.0

      \[ \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}}{2} \]

      add-sqr-sqrt [=>]43.5

      \[ \frac{e^{\color{blue}{\sqrt{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} \cdot \sqrt{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)}}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      sqrt-unprod [=>]100.0

      \[ \frac{e^{\color{blue}{\sqrt{\left(-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)\right) \cdot \left(-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)\right)}}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      mul-1-neg [=>]100.0

      \[ \frac{e^{\sqrt{\color{blue}{\left(-\left(1 - \varepsilon\right) \cdot x\right)} \cdot \left(-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)\right)}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      mul-1-neg [=>]100.0

      \[ \frac{e^{\sqrt{\left(-\left(1 - \varepsilon\right) \cdot x\right) \cdot \color{blue}{\left(-\left(1 - \varepsilon\right) \cdot x\right)}}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      sqr-neg [=>]100.0

      \[ \frac{e^{\sqrt{\color{blue}{\left(\left(1 - \varepsilon\right) \cdot x\right) \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)}}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      sqrt-unprod [<=]56.5

      \[ \frac{e^{\color{blue}{\sqrt{\left(1 - \varepsilon\right) \cdot x} \cdot \sqrt{\left(1 - \varepsilon\right) \cdot x}}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      add-sqr-sqrt [<=]64.7

      \[ \frac{e^{\color{blue}{\left(1 - \varepsilon\right) \cdot x}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      *-commutative [=>]64.7

      \[ \frac{e^{\color{blue}{x \cdot \left(1 - \varepsilon\right)}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      exp-prod [=>]64.9

      \[ \frac{\color{blue}{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)}} + \left(--1 \cdot e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}{2} \]

      mul-1-neg [=>]64.9

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + \left(-\color{blue}{\left(-e^{-1 \cdot \left(x \cdot \varepsilon\right)}\right)}\right)}{2} \]

      remove-double-neg [=>]64.9

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + \color{blue}{e^{-1 \cdot \left(x \cdot \varepsilon\right)}}}{2} \]

      add-sqr-sqrt [=>]56.5

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + e^{\color{blue}{\sqrt{-1 \cdot \left(x \cdot \varepsilon\right)} \cdot \sqrt{-1 \cdot \left(x \cdot \varepsilon\right)}}}}{2} \]

      sqrt-unprod [=>]99.0

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + e^{\color{blue}{\sqrt{\left(-1 \cdot \left(x \cdot \varepsilon\right)\right) \cdot \left(-1 \cdot \left(x \cdot \varepsilon\right)\right)}}}}{2} \]

      mul-1-neg [=>]99.0

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + e^{\sqrt{\color{blue}{\left(-x \cdot \varepsilon\right)} \cdot \left(-1 \cdot \left(x \cdot \varepsilon\right)\right)}}}{2} \]
    7. Simplified100.0%

      \[\leadsto \frac{\color{blue}{e^{x - x \cdot \varepsilon} + e^{x \cdot \varepsilon}}}{2} \]
      Step-by-step derivation

      [Start]61.3

      \[ \frac{{\left(e^{x}\right)}^{\left(1 - \varepsilon\right)} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      exp-prod [<=]81.0

      \[ \frac{\color{blue}{e^{x \cdot \left(1 - \varepsilon\right)}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      sub-neg [=>]81.0

      \[ \frac{e^{x \cdot \color{blue}{\left(1 + \left(-\varepsilon\right)\right)}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      distribute-lft-in [=>]81.0

      \[ \frac{e^{\color{blue}{x \cdot 1 + x \cdot \left(-\varepsilon\right)}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      distribute-rgt-neg-in [<=]81.0

      \[ \frac{e^{x \cdot 1 + \color{blue}{\left(-x \cdot \varepsilon\right)}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      *-commutative [<=]81.0

      \[ \frac{e^{x \cdot 1 + \left(-\color{blue}{\varepsilon \cdot x}\right)} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      unsub-neg [=>]81.0

      \[ \frac{e^{\color{blue}{x \cdot 1 - \varepsilon \cdot x}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      *-rgt-identity [=>]81.0

      \[ \frac{e^{\color{blue}{x} - \varepsilon \cdot x} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      *-commutative [=>]81.0

      \[ \frac{e^{x - \color{blue}{x \cdot \varepsilon}} + {\left(e^{\varepsilon}\right)}^{x}}{2} \]

      exp-prod [<=]100.0

      \[ \frac{e^{x - x \cdot \varepsilon} + \color{blue}{e^{\varepsilon \cdot x}}}{2} \]

      *-commutative [=>]100.0

      \[ \frac{e^{x - x \cdot \varepsilon} + e^{\color{blue}{x \cdot \varepsilon}}}{2} \]

    if -1 < eps < 2.0000000000000001e-18

    1. Initial program 40.4%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified40.4%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]40.4

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]40.4

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]40.4

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]40.4

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\color{blue}{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}}{2} \]
    4. Simplified100.0%

      \[\leadsto \frac{\color{blue}{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{-x}\right)}}{2} \]
      Step-by-step derivation

      [Start]100.0

      \[ \frac{\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      *-commutative [=>]100.0

      \[ \frac{\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right) - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft1-in [=>]100.0

      \[ \frac{\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      mul-1-neg [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{\color{blue}{-x}} - \left(-1 \cdot \left(e^{-1 \cdot x} \cdot x\right) + -1 \cdot e^{-1 \cdot x}\right)}{2} \]

      distribute-lft-out [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{-1 \cdot \left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)}}{2} \]

      mul-1-neg [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \color{blue}{\left(-\left(e^{-1 \cdot x} \cdot x + e^{-1 \cdot x}\right)\right)}}{2} \]

      *-commutative [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(\color{blue}{x \cdot e^{-1 \cdot x}} + e^{-1 \cdot x}\right)\right)}{2} \]

      distribute-lft1-in [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\color{blue}{\left(x + 1\right) \cdot e^{-1 \cdot x}}\right)}{2} \]

      mul-1-neg [=>]100.0

      \[ \frac{\left(x + 1\right) \cdot e^{-x} - \left(-\left(x + 1\right) \cdot e^{\color{blue}{-x}}\right)}{2} \]

    if 2.0000000000000001e-18 < eps

    1. Initial program 98.6%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified98.6%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Step-by-step derivation

      [Start]98.6

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [=>]98.6

      \[ \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]

      +-rgt-identity [<=]98.6

      \[ \frac{\color{blue}{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0}}{2} - \frac{\left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      div-sub [<=]98.6

      \[ \color{blue}{\frac{\left(\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} + 0\right) - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}} \]
    3. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(\varepsilon + 1\right) \cdot x\right)}}}{2} \]
    4. Taylor expanded in eps around inf 100.0%

      \[\leadsto \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(\varepsilon \cdot x\right)}}}{2} \]
    5. Simplified100.0%

      \[\leadsto \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
      Step-by-step derivation

      [Start]100.0

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\varepsilon \cdot x\right)}}{2} \]

      *-commutative [=>]100.0

      \[ \frac{e^{-1 \cdot \left(\left(1 - \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \color{blue}{\left(x \cdot \varepsilon\right)}}}{2} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;\varepsilon \leq -1:\\ \;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{elif}\;\varepsilon \leq 2 \cdot 10^{-18}:\\ \;\;\;\;\frac{\left(x + 1\right) \cdot e^{-x} + \left(x + 1\right) \cdot e^{-x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{x \cdot \left(-1 + \varepsilon\right)} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \end{array} \]

Alternatives

Alternative 1
Accuracy100.0%
Cost14025
\[\begin{array}{l} t_0 := \left(x + 1\right) \cdot e^{-x}\\ \mathbf{if}\;\varepsilon \leq -1 \lor \neg \left(\varepsilon \leq 1\right):\\ \;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{t_0 + t_0}{2}\\ \end{array} \]
Alternative 2
Accuracy100.0%
Cost13833
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1 \lor \neg \left(\varepsilon \leq 1\right):\\ \;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{e^{-x} \cdot \left(x + 2\right) + \frac{x}{e^{x}}}{2}\\ \end{array} \]
Alternative 3
Accuracy98.7%
Cost13769
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -40000 \lor \neg \left(\varepsilon \leq 1\right):\\ \;\;\;\;\frac{e^{x - \varepsilon \cdot x} + e^{\varepsilon \cdot x}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \end{array} \]
Alternative 4
Accuracy98.8%
Cost13632
\[\frac{e^{x \cdot \left(-1 + \varepsilon\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]
Alternative 5
Accuracy86.4%
Cost8076
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(-2 - \varepsilon\right) + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 1.05:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 6.4 \cdot 10^{+168}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(-2 - \varepsilon\right) + x \cdot \left(0.5 + 0.5 \cdot {\left(\varepsilon + 1\right)}^{2}\right)\right)}{2}\\ \end{array} \]
Alternative 6
Accuracy88.7%
Cost8068
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1.45:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(-1 + \left(\varepsilon - \varepsilon\right)\right) + x \cdot \left(0.5 \cdot \left(\varepsilon \cdot \varepsilon + {\left(1 - \varepsilon\right)}^{2}\right)\right)\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 6.2:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \left(-2 - \left(\varepsilon \cdot \left(1 - x\right) - \mathsf{fma}\left(0.5, \varepsilon \cdot \left(\varepsilon \cdot x\right), x\right)\right)\right)}{2}\\ \end{array} \]
Alternative 7
Accuracy88.5%
Cost8008
\[\begin{array}{l} \mathbf{if}\;\varepsilon \leq -1:\\ \;\;\;\;\frac{2 + x \cdot \left(\left(-2 - \varepsilon\right) + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)}{2}\\ \mathbf{elif}\;\varepsilon \leq 1:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{else}:\\ \;\;\;\;\frac{2 + x \cdot \left(-2 - \left(\varepsilon \cdot \left(1 - x\right) - \mathsf{fma}\left(0.5, \varepsilon \cdot \left(\varepsilon \cdot x\right), x\right)\right)\right)}{2}\\ \end{array} \]
Alternative 8
Accuracy86.5%
Cost6984
\[\begin{array}{l} t_0 := \frac{2 + x \cdot \left(\left(-2 - \varepsilon\right) + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)}{2}\\ \mathbf{if}\;\varepsilon \leq -1.05:\\ \;\;\;\;t_0\\ \mathbf{elif}\;\varepsilon \leq 1.9:\\ \;\;\;\;\frac{\frac{2}{e^{x}}}{2}\\ \mathbf{elif}\;\varepsilon \leq 2 \cdot 10^{+164}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;t_0\\ \end{array} \]
Alternative 9
Accuracy77.2%
Cost1484
\[\begin{array}{l} t_0 := \frac{2 + x \cdot \left(\left(-2 - \varepsilon\right) + x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.5\right)\right)\right)}{2}\\ \mathbf{if}\;x \leq -7.5 \cdot 10^{-211}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq 3.2 \cdot 10^{-221}:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 7.8 \cdot 10^{+25}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 10
Accuracy76.2%
Cost1228
\[\begin{array}{l} t_0 := \frac{2 + \left(\varepsilon \cdot \varepsilon\right) \cdot \left(0.5 \cdot \left(x \cdot x\right)\right)}{2}\\ \mathbf{if}\;x \leq -1.9 \cdot 10^{-153}:\\ \;\;\;\;t_0\\ \mathbf{elif}\;x \leq 2.7 \cdot 10^{-162}:\\ \;\;\;\;1\\ \mathbf{elif}\;x \leq 2.8 \cdot 10^{+30}:\\ \;\;\;\;t_0\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 11
Accuracy62.5%
Cost836
\[\begin{array}{l} \mathbf{if}\;x \leq 550000000000:\\ \;\;\;\;\frac{2 + x \cdot \left(x + \left(-2 - \varepsilon\right)\right)}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 12
Accuracy63.3%
Cost712
\[\begin{array}{l} \mathbf{if}\;x \leq -9 \cdot 10^{+115}:\\ \;\;\;\;\frac{2 + x \cdot \left(x - 2\right)}{2}\\ \mathbf{elif}\;x \leq 550000000000:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 13
Accuracy59.1%
Cost580
\[\begin{array}{l} \mathbf{if}\;x \leq 3 \cdot 10^{+15}:\\ \;\;\;\;\frac{2 + \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 14
Accuracy59.2%
Cost580
\[\begin{array}{l} \mathbf{if}\;x \leq 550000000000:\\ \;\;\;\;\frac{2 - \varepsilon \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 15
Accuracy56.2%
Cost196
\[\begin{array}{l} \mathbf{if}\;x \leq 3 \cdot 10^{+15}:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 16
Accuracy15.8%
Cost64
\[0 \]

Reproduce?

herbie shell --seed 2023161 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))