?

Average Accuracy: 54.4% → 99.6%
Time: 10.3s
Precision: binary64
Cost: 14024

?

\[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
\[\begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -1:\\ \;\;\;\;-1 + \frac{2}{1 + e^{-2 \cdot x}}\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;-0.3333333333333333 \cdot {x}^{3} + \left(x + 0.13333333333333333 \cdot {x}^{5}\right)\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \]
(FPCore (x y) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
(FPCore (x y)
 :precision binary64
 (if (<= (* -2.0 x) -1.0)
   (+ -1.0 (/ 2.0 (+ 1.0 (exp (* -2.0 x)))))
   (if (<= (* -2.0 x) 0.002)
     (+
      (* -0.3333333333333333 (pow x 3.0))
      (+ x (* 0.13333333333333333 (pow x 5.0))))
     -1.0)))
double code(double x, double y) {
	return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
double code(double x, double y) {
	double tmp;
	if ((-2.0 * x) <= -1.0) {
		tmp = -1.0 + (2.0 / (1.0 + exp((-2.0 * x))));
	} else if ((-2.0 * x) <= 0.002) {
		tmp = (-0.3333333333333333 * pow(x, 3.0)) + (x + (0.13333333333333333 * pow(x, 5.0)));
	} else {
		tmp = -1.0;
	}
	return tmp;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: tmp
    if (((-2.0d0) * x) <= (-1.0d0)) then
        tmp = (-1.0d0) + (2.0d0 / (1.0d0 + exp(((-2.0d0) * x))))
    else if (((-2.0d0) * x) <= 0.002d0) then
        tmp = ((-0.3333333333333333d0) * (x ** 3.0d0)) + (x + (0.13333333333333333d0 * (x ** 5.0d0)))
    else
        tmp = -1.0d0
    end if
    code = tmp
end function
public static double code(double x, double y) {
	return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
public static double code(double x, double y) {
	double tmp;
	if ((-2.0 * x) <= -1.0) {
		tmp = -1.0 + (2.0 / (1.0 + Math.exp((-2.0 * x))));
	} else if ((-2.0 * x) <= 0.002) {
		tmp = (-0.3333333333333333 * Math.pow(x, 3.0)) + (x + (0.13333333333333333 * Math.pow(x, 5.0)));
	} else {
		tmp = -1.0;
	}
	return tmp;
}
def code(x, y):
	return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
def code(x, y):
	tmp = 0
	if (-2.0 * x) <= -1.0:
		tmp = -1.0 + (2.0 / (1.0 + math.exp((-2.0 * x))))
	elif (-2.0 * x) <= 0.002:
		tmp = (-0.3333333333333333 * math.pow(x, 3.0)) + (x + (0.13333333333333333 * math.pow(x, 5.0)))
	else:
		tmp = -1.0
	return tmp
function code(x, y)
	return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0)
end
function code(x, y)
	tmp = 0.0
	if (Float64(-2.0 * x) <= -1.0)
		tmp = Float64(-1.0 + Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))));
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = Float64(Float64(-0.3333333333333333 * (x ^ 3.0)) + Float64(x + Float64(0.13333333333333333 * (x ^ 5.0))));
	else
		tmp = -1.0;
	end
	return tmp
end
function tmp = code(x, y)
	tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
end
function tmp_2 = code(x, y)
	tmp = 0.0;
	if ((-2.0 * x) <= -1.0)
		tmp = -1.0 + (2.0 / (1.0 + exp((-2.0 * x))));
	elseif ((-2.0 * x) <= 0.002)
		tmp = (-0.3333333333333333 * (x ^ 3.0)) + (x + (0.13333333333333333 * (x ^ 5.0)));
	else
		tmp = -1.0;
	end
	tmp_2 = tmp;
end
code[x_, y_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -1.0], N[(-1.0 + N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(-0.3333333333333333 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(0.13333333333333333 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0]]
\frac{2}{1 + e^{-2 \cdot x}} - 1
\begin{array}{l}
\mathbf{if}\;-2 \cdot x \leq -1:\\
\;\;\;\;-1 + \frac{2}{1 + e^{-2 \cdot x}}\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;-0.3333333333333333 \cdot {x}^{3} + \left(x + 0.13333333333333333 \cdot {x}^{5}\right)\\

\mathbf{else}:\\
\;\;\;\;-1\\


\end{array}

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation?

  1. Split input into 3 regimes
  2. if (*.f64 -2 x) < -1

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]

    if -1 < (*.f64 -2 x) < 2e-3

    1. Initial program 9.2%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Taylor expanded in x around 0 100.0%

      \[\leadsto \color{blue}{-0.3333333333333333 \cdot {x}^{3} + \left(0.13333333333333333 \cdot {x}^{5} + x\right)} \]

    if 2e-3 < (*.f64 -2 x)

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Taylor expanded in x around 0 96.1%

      \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
    3. Simplified96.1%

      \[\leadsto \frac{2}{\color{blue}{2 + x \cdot -2}} - 1 \]
      Step-by-step derivation

      [Start]96.1

      \[ \frac{2}{2 + -2 \cdot x} - 1 \]

      *-commutative [=>]96.1

      \[ \frac{2}{2 + \color{blue}{x \cdot -2}} - 1 \]
    4. Taylor expanded in x around inf 100.0%

      \[\leadsto \color{blue}{-1} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -1:\\ \;\;\;\;-1 + \frac{2}{1 + e^{-2 \cdot x}}\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;-0.3333333333333333 \cdot {x}^{3} + \left(x + 0.13333333333333333 \cdot {x}^{5}\right)\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \]

Alternatives

Alternative 1
Accuracy99.9%
Cost7497
\[\begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -1 \lor \neg \left(-2 \cdot x \leq 5 \cdot 10^{-5}\right):\\ \;\;\;\;-1 + \frac{2}{1 + e^{-2 \cdot x}}\\ \mathbf{else}:\\ \;\;\;\;x + -0.3333333333333333 \cdot {x}^{3}\\ \end{array} \]
Alternative 2
Accuracy79.0%
Cost584
\[\begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;-1\\ \mathbf{elif}\;x \leq 2.6:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;2 - \frac{4}{x}\\ \end{array} \]
Alternative 3
Accuracy78.5%
Cost580
\[\begin{array}{l} \mathbf{if}\;x \leq -0.66:\\ \;\;\;\;-1\\ \mathbf{else}:\\ \;\;\;\;x \cdot \frac{2}{x + 2}\\ \end{array} \]
Alternative 4
Accuracy79.0%
Cost328
\[\begin{array}{l} \mathbf{if}\;x \leq -1:\\ \;\;\;\;-1\\ \mathbf{elif}\;x \leq 2:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;2\\ \end{array} \]
Alternative 5
Accuracy32.4%
Cost196
\[\begin{array}{l} \mathbf{if}\;x \leq 1.1 \cdot 10^{-308}:\\ \;\;\;\;-1\\ \mathbf{else}:\\ \;\;\;\;2\\ \end{array} \]
Alternative 6
Accuracy27.3%
Cost64
\[-1 \]

Error

Reproduce?

herbie shell --seed 2023157 
(FPCore (x y)
  :name "Logistic function from Lakshay Garg"
  :precision binary64
  (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))