?

Average Accuracy: 52.9% → 99.1%
Time: 18.1s
Precision: binary64
Cost: 13572

?

\[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
\[\begin{array}{l} \mathbf{if}\;x \leq 350:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  (-
   (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x))))
   (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x)))))
  2.0))
(FPCore (x eps)
 :precision binary64
 (if (<= x 350.0) (/ (+ (exp (* x eps)) (exp (* x (- eps)))) 2.0) 0.0))
double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
}
double code(double x, double eps) {
	double tmp;
	if (x <= 350.0) {
		tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (((1.0d0 + (1.0d0 / eps)) * exp(-((1.0d0 - eps) * x))) - (((1.0d0 / eps) - 1.0d0) * exp(-((1.0d0 + eps) * x)))) / 2.0d0
end function
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: tmp
    if (x <= 350.0d0) then
        tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0d0
    else
        tmp = 0.0d0
    end if
    code = tmp
end function
public static double code(double x, double eps) {
	return (((1.0 + (1.0 / eps)) * Math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * Math.exp(-((1.0 + eps) * x)))) / 2.0;
}
public static double code(double x, double eps) {
	double tmp;
	if (x <= 350.0) {
		tmp = (Math.exp((x * eps)) + Math.exp((x * -eps))) / 2.0;
	} else {
		tmp = 0.0;
	}
	return tmp;
}
def code(x, eps):
	return (((1.0 + (1.0 / eps)) * math.exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * math.exp(-((1.0 + eps) * x)))) / 2.0
def code(x, eps):
	tmp = 0
	if x <= 350.0:
		tmp = (math.exp((x * eps)) + math.exp((x * -eps))) / 2.0
	else:
		tmp = 0.0
	return tmp
function code(x, eps)
	return Float64(Float64(Float64(Float64(1.0 + Float64(1.0 / eps)) * exp(Float64(-Float64(Float64(1.0 - eps) * x)))) - Float64(Float64(Float64(1.0 / eps) - 1.0) * exp(Float64(-Float64(Float64(1.0 + eps) * x))))) / 2.0)
end
function code(x, eps)
	tmp = 0.0
	if (x <= 350.0)
		tmp = Float64(Float64(exp(Float64(x * eps)) + exp(Float64(x * Float64(-eps)))) / 2.0);
	else
		tmp = 0.0;
	end
	return tmp
end
function tmp = code(x, eps)
	tmp = (((1.0 + (1.0 / eps)) * exp(-((1.0 - eps) * x))) - (((1.0 / eps) - 1.0) * exp(-((1.0 + eps) * x)))) / 2.0;
end
function tmp_2 = code(x, eps)
	tmp = 0.0;
	if (x <= 350.0)
		tmp = (exp((x * eps)) + exp((x * -eps))) / 2.0;
	else
		tmp = 0.0;
	end
	tmp_2 = tmp;
end
code[x_, eps_] := N[(N[(N[(N[(1.0 + N[(1.0 / eps), $MachinePrecision]), $MachinePrecision] * N[Exp[(-N[(N[(1.0 - eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(1.0 / eps), $MachinePrecision] - 1.0), $MachinePrecision] * N[Exp[(-N[(N[(1.0 + eps), $MachinePrecision] * x), $MachinePrecision])], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]
code[x_, eps_] := If[LessEqual[x, 350.0], N[(N[(N[Exp[N[(x * eps), $MachinePrecision]], $MachinePrecision] + N[Exp[N[(x * (-eps)), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision], 0.0]
\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2}
\begin{array}{l}
\mathbf{if}\;x \leq 350:\\
\;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\

\mathbf{else}:\\
\;\;\;\;0\\


\end{array}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 2 regimes
  2. if x < 350

    1. Initial program 38.0%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified38.0%

      \[\leadsto \color{blue}{\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}{2}} \]
      Proof

      [Start]38.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      distribute-rgt-neg-in [=>]38.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\color{blue}{\left(1 - \varepsilon\right) \cdot \left(-x\right)}} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      sub-neg [=>]38.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \color{blue}{\left(\frac{1}{\varepsilon} + \left(-1\right)\right)} \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      metadata-eval [=>]38.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + \color{blue}{-1}\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]

      distribute-rgt-neg-in [=>]38.0

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{\left(1 - \varepsilon\right) \cdot \left(-x\right)} - \left(\frac{1}{\varepsilon} + -1\right) \cdot e^{\color{blue}{\left(1 + \varepsilon\right) \cdot \left(-x\right)}}}{2} \]
    3. Taylor expanded in eps around -inf 98.3%

      \[\leadsto \frac{\color{blue}{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}}{2} \]
    4. Simplified98.3%

      \[\leadsto \frac{\color{blue}{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\left(-x\right) \cdot \left(1 - \left(-\varepsilon\right)\right)}\right)}}{2} \]
      Proof

      [Start]98.3

      \[ \frac{e^{-1 \cdot \left(\left(1 + -1 \cdot \varepsilon\right) \cdot x\right)} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      *-commutative [=>]98.3

      \[ \frac{e^{-1 \cdot \color{blue}{\left(x \cdot \left(1 + -1 \cdot \varepsilon\right)\right)}} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{-1 \cdot \left(x \cdot \left(1 + \color{blue}{\left(-\varepsilon\right)}\right)\right)} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      sub-neg [<=]98.3

      \[ \frac{e^{-1 \cdot \left(x \cdot \color{blue}{\left(1 - \varepsilon\right)}\right)} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      associate-*r* [=>]98.3

      \[ \frac{e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 - \varepsilon\right)}} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{\color{blue}{\left(-x\right)} \cdot \left(1 - \varepsilon\right)} - -1 \cdot e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \color{blue}{\left(-e^{-1 \cdot \left(\left(1 - -1 \cdot \varepsilon\right) \cdot x\right)}\right)}}{2} \]

      *-commutative [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{-1 \cdot \color{blue}{\left(x \cdot \left(1 - -1 \cdot \varepsilon\right)\right)}}\right)}{2} \]

      associate-*r* [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot x\right) \cdot \left(1 - -1 \cdot \varepsilon\right)}}\right)}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-x\right)} \cdot \left(1 - -1 \cdot \varepsilon\right)}\right)}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\left(-x\right) \cdot \left(1 - \color{blue}{\left(-\varepsilon\right)}\right)}\right)}{2} \]
    5. Taylor expanded in eps around inf 98.3%

      \[\leadsto \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)}}\right)}{2} \]
    6. Simplified98.3%

      \[\leadsto \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right) \cdot x}}\right)}{2} \]
      Proof

      [Start]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{-1 \cdot \left(\varepsilon \cdot x\right)}\right)}{2} \]

      associate-*r* [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x}}\right)}{2} \]

      mul-1-neg [=>]98.3

      \[ \frac{e^{\left(-x\right) \cdot \left(1 - \varepsilon\right)} - \left(-e^{\color{blue}{\left(-\varepsilon\right)} \cdot x}\right)}{2} \]
    7. Taylor expanded in eps around inf 98.9%

      \[\leadsto \frac{e^{\color{blue}{\varepsilon \cdot x}} - \left(-e^{\left(-\varepsilon\right) \cdot x}\right)}{2} \]

    if 350 < x

    1. Initial program 99.9%

      \[\frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    2. Simplified99.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(1 + \frac{1}{\varepsilon}, {\left(e^{\varepsilon + -1}\right)}^{x}, \frac{1 - \frac{1}{\varepsilon}}{e^{\mathsf{fma}\left(\varepsilon, x, x\right)}}\right)}{2}} \]
      Proof

      [Start]99.9

      \[ \frac{\left(1 + \frac{1}{\varepsilon}\right) \cdot e^{-\left(1 - \varepsilon\right) \cdot x} - \left(\frac{1}{\varepsilon} - 1\right) \cdot e^{-\left(1 + \varepsilon\right) \cdot x}}{2} \]
    3. Taylor expanded in eps around 0 100.0%

      \[\leadsto \frac{\color{blue}{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}}{2} \]
    4. Simplified100.0%

      \[\leadsto \frac{\color{blue}{0}}{2} \]
      Proof

      [Start]100.0

      \[ \frac{\frac{e^{-1 \cdot x} - \frac{1}{e^{x}}}{\varepsilon}}{2} \]

      div-sub [=>]100.0

      \[ \frac{\color{blue}{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\frac{1}{e^{x}}}{\varepsilon}}}{2} \]

      rec-exp [=>]100.0

      \[ \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{\color{blue}{e^{-x}}}{\varepsilon}}{2} \]

      mul-1-neg [<=]100.0

      \[ \frac{\frac{e^{-1 \cdot x}}{\varepsilon} - \frac{e^{\color{blue}{-1 \cdot x}}}{\varepsilon}}{2} \]

      +-inverses [=>]100.0

      \[ \frac{\color{blue}{0}}{2} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification99.1%

    \[\leadsto \begin{array}{l} \mathbf{if}\;x \leq 350:\\ \;\;\;\;\frac{e^{x \cdot \varepsilon} + e^{x \cdot \left(-\varepsilon\right)}}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]

Alternatives

Alternative 1
Accuracy99.1%
Cost26688
\[\begin{array}{l} t_0 := e^{-x}\\ t_1 := t_0 + x \cdot t_0\\ \frac{t_1 + t_1}{2} \end{array} \]
Alternative 2
Accuracy99.1%
Cost20096
\[\begin{array}{l} t_0 := \frac{x}{e^{x}}\\ \frac{t_0 + \left(t_0 + e^{-x} \cdot 2\right)}{2} \end{array} \]
Alternative 3
Accuracy98.7%
Cost13632
\[\frac{e^{x \cdot \left(-1 + \varepsilon\right)} + e^{x \cdot \left(-1 - \varepsilon\right)}}{2} \]
Alternative 4
Accuracy98.5%
Cost580
\[\begin{array}{l} \mathbf{if}\;x \leq 1.4:\\ \;\;\;\;\frac{2 - x \cdot x}{2}\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 5
Accuracy98.3%
Cost196
\[\begin{array}{l} \mathbf{if}\;x \leq 350:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;0\\ \end{array} \]
Alternative 6
Accuracy26.5%
Cost64
\[0 \]

Error

Reproduce?

herbie shell --seed 2023137 
(FPCore (x eps)
  :name "NMSE Section 6.1 mentioned, A"
  :precision binary64
  (/ (- (* (+ 1.0 (/ 1.0 eps)) (exp (- (* (- 1.0 eps) x)))) (* (- (/ 1.0 eps) 1.0) (exp (- (* (+ 1.0 eps) x))))) 2.0))