Logistic function from Lakshay Garg

Percentage Accurate: 53.2% → 99.9%
Time: 10.9s
Alternatives: 11
Speedup: 5.3×

Specification

?
\[\begin{array}{l} \\ \frac{2}{1 + e^{-2 \cdot x}} - 1 \end{array} \]
(FPCore (x y) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x, double y) {
	return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x, double y) {
	return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x, y):
	return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x, y)
	return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0)
end
function tmp = code(x, y)
	tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
end
code[x_, y_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{2}{1 + e^{-2 \cdot x}} - 1 \end{array} \]
(FPCore (x y) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x, double y) {
	return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x, double y) {
	return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x, y):
	return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x, y)
	return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0)
end
function tmp = code(x, y)
	tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
end
code[x_, y_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}

Alternative 1: 99.9% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{-2 \cdot x}\\ t_1 := 1 + t\_0\\ t_2 := \frac{2}{t\_1}\\ t_3 := 1 + t\_2\\ t_4 := t\_1 \cdot 0.5\\ \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;t\_2 + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\frac{{t\_4}^{-3}}{\mathsf{fma}\left(64, {t\_1}^{-6}, {t\_3}^{3}\right)}, \mathsf{fma}\left(t\_3, t\_3 - {t\_4}^{-2}, {t\_4}^{-4}\right), \frac{1}{\frac{2}{-1 - t\_0} - \mathsf{fma}\left(4, {t\_1}^{-2}, 1\right)}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (exp (* -2.0 x)))
        (t_1 (+ 1.0 t_0))
        (t_2 (/ 2.0 t_1))
        (t_3 (+ 1.0 t_2))
        (t_4 (* t_1 0.5)))
   (if (<= (* -2.0 x) -20.0)
     (+ t_2 -1.0)
     (if (<= (* -2.0 x) 0.002)
       (fma
        (fma (* x x) 0.13333333333333333 -0.3333333333333333)
        (* x (* x x))
        x)
       (fma
        (/ (pow t_4 -3.0) (fma 64.0 (pow t_1 -6.0) (pow t_3 3.0)))
        (fma t_3 (- t_3 (pow t_4 -2.0)) (pow t_4 -4.0))
        (/ 1.0 (- (/ 2.0 (- -1.0 t_0)) (fma 4.0 (pow t_1 -2.0) 1.0))))))))
double code(double x, double y) {
	double t_0 = exp((-2.0 * x));
	double t_1 = 1.0 + t_0;
	double t_2 = 2.0 / t_1;
	double t_3 = 1.0 + t_2;
	double t_4 = t_1 * 0.5;
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = t_2 + -1.0;
	} else if ((-2.0 * x) <= 0.002) {
		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = fma((pow(t_4, -3.0) / fma(64.0, pow(t_1, -6.0), pow(t_3, 3.0))), fma(t_3, (t_3 - pow(t_4, -2.0)), pow(t_4, -4.0)), (1.0 / ((2.0 / (-1.0 - t_0)) - fma(4.0, pow(t_1, -2.0), 1.0))));
	}
	return tmp;
}
function code(x, y)
	t_0 = exp(Float64(-2.0 * x))
	t_1 = Float64(1.0 + t_0)
	t_2 = Float64(2.0 / t_1)
	t_3 = Float64(1.0 + t_2)
	t_4 = Float64(t_1 * 0.5)
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = Float64(t_2 + -1.0);
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = fma(Float64((t_4 ^ -3.0) / fma(64.0, (t_1 ^ -6.0), (t_3 ^ 3.0))), fma(t_3, Float64(t_3 - (t_4 ^ -2.0)), (t_4 ^ -4.0)), Float64(1.0 / Float64(Float64(2.0 / Float64(-1.0 - t_0)) - fma(4.0, (t_1 ^ -2.0), 1.0))));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(1.0 + t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(2.0 / t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[(1.0 + t$95$2), $MachinePrecision]}, Block[{t$95$4 = N[(t$95$1 * 0.5), $MachinePrecision]}, If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], N[(t$95$2 + -1.0), $MachinePrecision], If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(N[(N[Power[t$95$4, -3.0], $MachinePrecision] / N[(64.0 * N[Power[t$95$1, -6.0], $MachinePrecision] + N[Power[t$95$3, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(t$95$3 * N[(t$95$3 - N[Power[t$95$4, -2.0], $MachinePrecision]), $MachinePrecision] + N[Power[t$95$4, -4.0], $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[(N[(2.0 / N[(-1.0 - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(4.0 * N[Power[t$95$1, -2.0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{-2 \cdot x}\\
t_1 := 1 + t\_0\\
t_2 := \frac{2}{t\_1}\\
t_3 := 1 + t\_2\\
t_4 := t\_1 \cdot 0.5\\
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;t\_2 + -1\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{{t\_4}^{-3}}{\mathsf{fma}\left(64, {t\_1}^{-6}, {t\_3}^{3}\right)}, \mathsf{fma}\left(t\_3, t\_3 - {t\_4}^{-2}, {t\_4}^{-4}\right), \frac{1}{\frac{2}{-1 - t\_0} - \mathsf{fma}\left(4, {t\_1}^{-2}, 1\right)}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -20 < (*.f64 #s(literal -2 binary64) x) < 2e-3

    1. Initial program 9.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
      2. *-lft-identityN/A

        \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
      4. *-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
      6. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      8. sub-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
      15. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
      16. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
    5. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

    if 2e-3 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 99.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{{\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-3}}{\mathsf{fma}\left(64, {\left(1 + e^{-2 \cdot x}\right)}^{-6}, {\left(1 + \frac{2}{1 + e^{-2 \cdot x}}\right)}^{3}\right)}, \mathsf{fma}\left(1 + \frac{2}{1 + e^{-2 \cdot x}}, \left(1 + \frac{2}{1 + e^{-2 \cdot x}}\right) - {\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-2}, {\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-4}\right), -\frac{1}{\frac{2}{1 + e^{-2 \cdot x}} + \mathsf{fma}\left(4, {\left(1 + e^{-2 \cdot x}\right)}^{-2}, 1\right)}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\frac{{\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-3}}{\mathsf{fma}\left(64, {\left(1 + e^{-2 \cdot x}\right)}^{-6}, {\left(1 + \frac{2}{1 + e^{-2 \cdot x}}\right)}^{3}\right)}, \mathsf{fma}\left(1 + \frac{2}{1 + e^{-2 \cdot x}}, \left(1 + \frac{2}{1 + e^{-2 \cdot x}}\right) - {\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-2}, {\left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right)}^{-4}\right), \frac{1}{\frac{2}{-1 - e^{-2 \cdot x}} - \mathsf{fma}\left(4, {\left(1 + e^{-2 \cdot x}\right)}^{-2}, 1\right)}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 2: 99.9% accurate, 0.2× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 + e^{-2 \cdot x}\\ t_1 := \frac{2}{t\_0}\\ t_2 := 1 + t\_1\\ \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;t\_1 + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\frac{{t\_0}^{-2}}{t\_2}, 4, \frac{-1}{t\_2}\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (+ 1.0 (exp (* -2.0 x)))) (t_1 (/ 2.0 t_0)) (t_2 (+ 1.0 t_1)))
   (if (<= (* -2.0 x) -20.0)
     (+ t_1 -1.0)
     (if (<= (* -2.0 x) 0.002)
       (fma
        (fma (* x x) 0.13333333333333333 -0.3333333333333333)
        (* x (* x x))
        x)
       (fma (/ (pow t_0 -2.0) t_2) 4.0 (/ -1.0 t_2))))))
double code(double x, double y) {
	double t_0 = 1.0 + exp((-2.0 * x));
	double t_1 = 2.0 / t_0;
	double t_2 = 1.0 + t_1;
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = t_1 + -1.0;
	} else if ((-2.0 * x) <= 0.002) {
		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = fma((pow(t_0, -2.0) / t_2), 4.0, (-1.0 / t_2));
	}
	return tmp;
}
function code(x, y)
	t_0 = Float64(1.0 + exp(Float64(-2.0 * x)))
	t_1 = Float64(2.0 / t_0)
	t_2 = Float64(1.0 + t_1)
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = Float64(t_1 + -1.0);
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = fma(Float64((t_0 ^ -2.0) / t_2), 4.0, Float64(-1.0 / t_2));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(2.0 / t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 + t$95$1), $MachinePrecision]}, If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], N[(t$95$1 + -1.0), $MachinePrecision], If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(N[(N[Power[t$95$0, -2.0], $MachinePrecision] / t$95$2), $MachinePrecision] * 4.0 + N[(-1.0 / t$95$2), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 + e^{-2 \cdot x}\\
t_1 := \frac{2}{t\_0}\\
t_2 := 1 + t\_1\\
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;t\_1 + -1\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{{t\_0}^{-2}}{t\_2}, 4, \frac{-1}{t\_2}\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -20 < (*.f64 #s(literal -2 binary64) x) < 2e-3

    1. Initial program 9.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
      2. *-lft-identityN/A

        \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
      4. *-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
      6. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      8. sub-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
      15. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
      16. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
    5. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

    if 2e-3 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 99.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. flip--N/A

        \[\leadsto \color{blue}{\frac{\frac{2}{1 + e^{-2 \cdot x}} \cdot \frac{2}{1 + e^{-2 \cdot x}} - 1 \cdot 1}{\frac{2}{1 + e^{-2 \cdot x}} + 1}} \]
      2. metadata-evalN/A

        \[\leadsto \frac{\frac{2}{1 + e^{-2 \cdot x}} \cdot \frac{2}{1 + e^{-2 \cdot x}} - \color{blue}{1}}{\frac{2}{1 + e^{-2 \cdot x}} + 1} \]
      3. div-subN/A

        \[\leadsto \color{blue}{\frac{\frac{2}{1 + e^{-2 \cdot x}} \cdot \frac{2}{1 + e^{-2 \cdot x}}}{\frac{2}{1 + e^{-2 \cdot x}} + 1} - \frac{1}{\frac{2}{1 + e^{-2 \cdot x}} + 1}} \]
      4. sub-negN/A

        \[\leadsto \color{blue}{\frac{\frac{2}{1 + e^{-2 \cdot x}} \cdot \frac{2}{1 + e^{-2 \cdot x}}}{\frac{2}{1 + e^{-2 \cdot x}} + 1} + \left(\mathsf{neg}\left(\frac{1}{\frac{2}{1 + e^{-2 \cdot x}} + 1}\right)\right)} \]
    4. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(4, \frac{{\left(1 + e^{-2 \cdot x}\right)}^{-2}}{1 + \frac{2}{1 + e^{-2 \cdot x}}}, -\frac{1}{1 + \frac{2}{1 + e^{-2 \cdot x}}}\right)} \]
    5. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\frac{{\left(1 + e^{-2 \cdot x}\right)}^{-2}}{1 + \frac{2}{1 + e^{-2 \cdot x}}} \cdot 4} + \left(\mathsf{neg}\left(\frac{1}{1 + \frac{2}{1 + e^{-2 \cdot x}}}\right)\right) \]
      2. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{{\left(1 + e^{-2 \cdot x}\right)}^{-2}}{1 + \frac{2}{1 + e^{-2 \cdot x}}}, 4, \mathsf{neg}\left(\frac{1}{1 + \frac{2}{1 + e^{-2 \cdot x}}}\right)\right)} \]
    6. Applied egg-rr100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{{\left(1 + e^{-2 \cdot x}\right)}^{-2}}{1 + \frac{2}{1 + e^{-2 \cdot x}}}, 4, \frac{-1}{1 + \frac{2}{1 + e^{-2 \cdot x}}}\right)} \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\frac{{\left(1 + e^{-2 \cdot x}\right)}^{-2}}{1 + \frac{2}{1 + e^{-2 \cdot x}}}, 4, \frac{-1}{1 + \frac{2}{1 + e^{-2 \cdot x}}}\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 99.9% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{-2 \cdot x}\\ \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + t\_0} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(t\_0\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (exp (* -2.0 x))))
   (if (<= (* -2.0 x) -20.0)
     (+ (/ 2.0 (+ 1.0 t_0)) -1.0)
     (if (<= (* -2.0 x) 0.002)
       (fma
        (fma (* x x) 0.13333333333333333 -0.3333333333333333)
        (* x (* x x))
        x)
       (expm1 (- (log 2.0) (log1p t_0)))))))
double code(double x, double y) {
	double t_0 = exp((-2.0 * x));
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = (2.0 / (1.0 + t_0)) + -1.0;
	} else if ((-2.0 * x) <= 0.002) {
		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = expm1((log(2.0) - log1p(t_0)));
	}
	return tmp;
}
function code(x, y)
	t_0 = exp(Float64(-2.0 * x))
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = Float64(Float64(2.0 / Float64(1.0 + t_0)) + -1.0);
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = expm1(Float64(log(2.0) - log1p(t_0)));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], N[(N[(2.0 / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision], If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(Exp[N[(N[Log[2.0], $MachinePrecision] - N[Log[1 + t$95$0], $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{-2 \cdot x}\\
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;\frac{2}{1 + t\_0} + -1\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(t\_0\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -20 < (*.f64 #s(literal -2 binary64) x) < 2e-3

    1. Initial program 9.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
      2. *-lft-identityN/A

        \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
      4. *-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
      6. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      8. sub-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
      15. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
      16. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
    5. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

    if 2e-3 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 99.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{1 + e^{-2 \cdot x}}{2}}} - 1 \]
      2. inv-powN/A

        \[\leadsto \color{blue}{{\left(\frac{1 + e^{-2 \cdot x}}{2}\right)}^{-1}} - 1 \]
      3. metadata-evalN/A

        \[\leadsto {\left(\frac{1 + e^{-2 \cdot x}}{2}\right)}^{\color{blue}{\left(\mathsf{neg}\left(1\right)\right)}} - 1 \]
      4. pow-to-expN/A

        \[\leadsto \color{blue}{e^{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)}} - 1 \]
      5. accelerator-lowering-expm1.f64N/A

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right)} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)}\right) \]
      7. log-lowering-log.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      8. div-invN/A

        \[\leadsto \mathsf{expm1}\left(\log \color{blue}{\left(\left(1 + e^{-2 \cdot x}\right) \cdot \frac{1}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \color{blue}{\left(\left(1 + e^{-2 \cdot x}\right) \cdot \frac{1}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      10. +-lowering-+.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\color{blue}{\left(1 + e^{-2 \cdot x}\right)} \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      11. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + \color{blue}{e^{-2 \cdot x}}\right) \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      12. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{\color{blue}{-2 \cdot x}}\right) \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      13. metadata-evalN/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot \color{blue}{\frac{1}{2}}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      14. metadata-eval99.9

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right) \cdot \color{blue}{-1}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right) \cdot -1\right)} \]
    5. Taylor expanded in x around inf

      \[\leadsto \mathsf{expm1}\left(\color{blue}{-1 \cdot \log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)}\right) \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)\right)}\right) \]
      2. neg-lowering-neg.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)\right)}\right) \]
      3. log-lowering-log.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\color{blue}{\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)}\right)\right) \]
      4. +-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \color{blue}{\left(e^{-2 \cdot x} + 1\right)}\right)\right)\right) \]
      5. distribute-rgt-inN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(e^{-2 \cdot x} \cdot \frac{1}{2} + 1 \cdot \frac{1}{2}\right)}\right)\right) \]
      6. metadata-evalN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(e^{-2 \cdot x} \cdot \frac{1}{2} + \color{blue}{\frac{1}{2}}\right)\right)\right) \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(\mathsf{fma}\left(e^{-2 \cdot x}, \frac{1}{2}, \frac{1}{2}\right)\right)}\right)\right) \]
      8. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\mathsf{fma}\left(\color{blue}{e^{-2 \cdot x}}, \frac{1}{2}, \frac{1}{2}\right)\right)\right)\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\mathsf{fma}\left(e^{\color{blue}{x \cdot -2}}, \frac{1}{2}, \frac{1}{2}\right)\right)\right)\right) \]
      10. *-lowering-*.f6499.9

        \[\leadsto \mathsf{expm1}\left(-\log \left(\mathsf{fma}\left(e^{\color{blue}{x \cdot -2}}, 0.5, 0.5\right)\right)\right) \]
    7. Simplified99.9%

      \[\leadsto \mathsf{expm1}\left(\color{blue}{-\log \left(\mathsf{fma}\left(e^{x \cdot -2}, 0.5, 0.5\right)\right)}\right) \]
    8. Step-by-step derivation
      1. distribute-lft1-inN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(\left(e^{x \cdot -2} + 1\right) \cdot \frac{1}{2}\right)}\right)\right) \]
      2. *-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\left(e^{\color{blue}{-2 \cdot x}} + 1\right) \cdot \frac{1}{2}\right)\right)\right) \]
      3. +-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\color{blue}{\left(1 + e^{-2 \cdot x}\right)} \cdot \frac{1}{2}\right)\right)\right) \]
      4. metadata-evalN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot \color{blue}{\frac{1}{2}}\right)\right)\right) \]
      5. div-invN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(\frac{1 + e^{-2 \cdot x}}{2}\right)}\right)\right) \]
      6. neg-logN/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log \left(\frac{1}{\frac{1 + e^{-2 \cdot x}}{2}}\right)}\right) \]
      7. clear-numN/A

        \[\leadsto \mathsf{expm1}\left(\log \color{blue}{\left(\frac{2}{1 + e^{-2 \cdot x}}\right)}\right) \]
      8. log-divN/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log 2 - \log \left(1 + e^{-2 \cdot x}\right)}\right) \]
      9. --lowering--.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log 2 - \log \left(1 + e^{-2 \cdot x}\right)}\right) \]
      10. log-lowering-log.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log 2} - \log \left(1 + e^{-2 \cdot x}\right)\right) \]
      11. accelerator-lowering-log1p.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log 2 - \color{blue}{\mathsf{log1p}\left(e^{-2 \cdot x}\right)}\right) \]
      12. *-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(e^{\color{blue}{x \cdot -2}}\right)\right) \]
      13. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(\color{blue}{e^{x \cdot -2}}\right)\right) \]
      14. *-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(e^{\color{blue}{-2 \cdot x}}\right)\right) \]
      15. *-lowering-*.f64100.0

        \[\leadsto \mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(e^{\color{blue}{-2 \cdot x}}\right)\right) \]
    9. Applied egg-rr100.0%

      \[\leadsto \mathsf{expm1}\left(\color{blue}{\log 2 - \mathsf{log1p}\left(e^{-2 \cdot x}\right)}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left(e^{-2 \cdot x}\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 4: 99.9% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{-2 \cdot x}\\ \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + t\_0} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(-\log \left(\mathsf{fma}\left(t\_0, 0.5, 0.5\right)\right)\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (exp (* -2.0 x))))
   (if (<= (* -2.0 x) -20.0)
     (+ (/ 2.0 (+ 1.0 t_0)) -1.0)
     (if (<= (* -2.0 x) 0.002)
       (fma
        (fma (* x x) 0.13333333333333333 -0.3333333333333333)
        (* x (* x x))
        x)
       (expm1 (- (log (fma t_0 0.5 0.5))))))))
double code(double x, double y) {
	double t_0 = exp((-2.0 * x));
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = (2.0 / (1.0 + t_0)) + -1.0;
	} else if ((-2.0 * x) <= 0.002) {
		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = expm1(-log(fma(t_0, 0.5, 0.5)));
	}
	return tmp;
}
function code(x, y)
	t_0 = exp(Float64(-2.0 * x))
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = Float64(Float64(2.0 / Float64(1.0 + t_0)) + -1.0);
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = expm1(Float64(-log(fma(t_0, 0.5, 0.5))));
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], N[(N[(2.0 / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision], If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(Exp[(-N[Log[N[(t$95$0 * 0.5 + 0.5), $MachinePrecision]], $MachinePrecision])] - 1), $MachinePrecision]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{-2 \cdot x}\\
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;\frac{2}{1 + t\_0} + -1\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(-\log \left(\mathsf{fma}\left(t\_0, 0.5, 0.5\right)\right)\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -20 < (*.f64 #s(literal -2 binary64) x) < 2e-3

    1. Initial program 9.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
      2. *-lft-identityN/A

        \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
      4. *-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
      6. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      8. sub-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
      15. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
      16. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
    5. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

    if 2e-3 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 99.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{1 + e^{-2 \cdot x}}{2}}} - 1 \]
      2. inv-powN/A

        \[\leadsto \color{blue}{{\left(\frac{1 + e^{-2 \cdot x}}{2}\right)}^{-1}} - 1 \]
      3. metadata-evalN/A

        \[\leadsto {\left(\frac{1 + e^{-2 \cdot x}}{2}\right)}^{\color{blue}{\left(\mathsf{neg}\left(1\right)\right)}} - 1 \]
      4. pow-to-expN/A

        \[\leadsto \color{blue}{e^{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)}} - 1 \]
      5. accelerator-lowering-expm1.f64N/A

        \[\leadsto \color{blue}{\mathsf{expm1}\left(\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right)} \]
      6. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)}\right) \]
      7. log-lowering-log.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\log \left(\frac{1 + e^{-2 \cdot x}}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      8. div-invN/A

        \[\leadsto \mathsf{expm1}\left(\log \color{blue}{\left(\left(1 + e^{-2 \cdot x}\right) \cdot \frac{1}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      9. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \color{blue}{\left(\left(1 + e^{-2 \cdot x}\right) \cdot \frac{1}{2}\right)} \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      10. +-lowering-+.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\color{blue}{\left(1 + e^{-2 \cdot x}\right)} \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      11. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + \color{blue}{e^{-2 \cdot x}}\right) \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      12. *-lowering-*.f64N/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{\color{blue}{-2 \cdot x}}\right) \cdot \frac{1}{2}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      13. metadata-evalN/A

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot \color{blue}{\frac{1}{2}}\right) \cdot \left(\mathsf{neg}\left(1\right)\right)\right) \]
      14. metadata-eval99.9

        \[\leadsto \mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right) \cdot \color{blue}{-1}\right) \]
    4. Applied egg-rr99.9%

      \[\leadsto \color{blue}{\mathsf{expm1}\left(\log \left(\left(1 + e^{-2 \cdot x}\right) \cdot 0.5\right) \cdot -1\right)} \]
    5. Taylor expanded in x around inf

      \[\leadsto \mathsf{expm1}\left(\color{blue}{-1 \cdot \log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)}\right) \]
    6. Step-by-step derivation
      1. mul-1-negN/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)\right)}\right) \]
      2. neg-lowering-neg.f64N/A

        \[\leadsto \mathsf{expm1}\left(\color{blue}{\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)\right)}\right) \]
      3. log-lowering-log.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\color{blue}{\log \left(\frac{1}{2} \cdot \left(1 + e^{-2 \cdot x}\right)\right)}\right)\right) \]
      4. +-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\frac{1}{2} \cdot \color{blue}{\left(e^{-2 \cdot x} + 1\right)}\right)\right)\right) \]
      5. distribute-rgt-inN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(e^{-2 \cdot x} \cdot \frac{1}{2} + 1 \cdot \frac{1}{2}\right)}\right)\right) \]
      6. metadata-evalN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(e^{-2 \cdot x} \cdot \frac{1}{2} + \color{blue}{\frac{1}{2}}\right)\right)\right) \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \color{blue}{\left(\mathsf{fma}\left(e^{-2 \cdot x}, \frac{1}{2}, \frac{1}{2}\right)\right)}\right)\right) \]
      8. exp-lowering-exp.f64N/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\mathsf{fma}\left(\color{blue}{e^{-2 \cdot x}}, \frac{1}{2}, \frac{1}{2}\right)\right)\right)\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{expm1}\left(\mathsf{neg}\left(\log \left(\mathsf{fma}\left(e^{\color{blue}{x \cdot -2}}, \frac{1}{2}, \frac{1}{2}\right)\right)\right)\right) \]
      10. *-lowering-*.f6499.9

        \[\leadsto \mathsf{expm1}\left(-\log \left(\mathsf{fma}\left(e^{\color{blue}{x \cdot -2}}, 0.5, 0.5\right)\right)\right) \]
    7. Simplified99.9%

      \[\leadsto \mathsf{expm1}\left(\color{blue}{-\log \left(\mathsf{fma}\left(e^{x \cdot -2}, 0.5, 0.5\right)\right)}\right) \]
  3. Recombined 3 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{expm1}\left(-\log \left(\mathsf{fma}\left(e^{-2 \cdot x}, 0.5, 0.5\right)\right)\right)\\ \end{array} \]
  5. Add Preprocessing

Alternative 5: 99.9% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;t\_0\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;t\_0\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (let* ((t_0 (+ (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) -1.0)))
   (if (<= (* -2.0 x) -20.0)
     t_0
     (if (<= (* -2.0 x) 0.002)
       (fma
        (fma (* x x) 0.13333333333333333 -0.3333333333333333)
        (* x (* x x))
        x)
       t_0))))
double code(double x, double y) {
	double t_0 = (2.0 / (1.0 + exp((-2.0 * x)))) + -1.0;
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = t_0;
	} else if ((-2.0 * x) <= 0.002) {
		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = t_0;
	}
	return tmp;
}
function code(x, y)
	t_0 = Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) + -1.0)
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = t_0;
	elseif (Float64(-2.0 * x) <= 0.002)
		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = t_0;
	end
	return tmp
end
code[x_, y_] := Block[{t$95$0 = N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]}, If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], t$95$0, If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.002], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], t$95$0]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \frac{2}{1 + e^{-2 \cdot x}} + -1\\
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;t\_0\\

\mathbf{elif}\;-2 \cdot x \leq 0.002:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;t\_0\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20 or 2e-3 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -20 < (*.f64 #s(literal -2 binary64) x) < 2e-3

    1. Initial program 9.9%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
    4. Step-by-step derivation
      1. distribute-rgt-inN/A

        \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
      2. *-lft-identityN/A

        \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
      4. *-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
      6. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
      7. accelerator-lowering-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      8. sub-negN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
      9. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
      10. metadata-evalN/A

        \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      13. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
      14. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
      15. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
      16. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
    5. Simplified100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification100.0%

    \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \mathbf{elif}\;-2 \cdot x \leq 0.002:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} + -1\\ \end{array} \]
  5. Add Preprocessing

Alternative 6: 99.8% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;1\\ \mathbf{elif}\;-2 \cdot x \leq 0.2:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.05396825396825397, 0.13333333333333333\right), -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (<= (* -2.0 x) -20.0)
   1.0
   (if (<= (* -2.0 x) 0.2)
     (fma
      (fma
       (* x x)
       (fma (* x x) -0.05396825396825397 0.13333333333333333)
       -0.3333333333333333)
      (* x (* x x))
      x)
     -1.0)))
double code(double x, double y) {
	double tmp;
	if ((-2.0 * x) <= -20.0) {
		tmp = 1.0;
	} else if ((-2.0 * x) <= 0.2) {
		tmp = fma(fma((x * x), fma((x * x), -0.05396825396825397, 0.13333333333333333), -0.3333333333333333), (x * (x * x)), x);
	} else {
		tmp = -1.0;
	}
	return tmp;
}
function code(x, y)
	tmp = 0.0
	if (Float64(-2.0 * x) <= -20.0)
		tmp = 1.0;
	elseif (Float64(-2.0 * x) <= 0.2)
		tmp = fma(fma(Float64(x * x), fma(Float64(x * x), -0.05396825396825397, 0.13333333333333333), -0.3333333333333333), Float64(x * Float64(x * x)), x);
	else
		tmp = -1.0;
	end
	return tmp
end
code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], 1.0, If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.2], N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.05396825396825397 + 0.13333333333333333), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], -1.0]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;-2 \cdot x \leq -20:\\
\;\;\;\;1\\

\mathbf{elif}\;-2 \cdot x \leq 0.2:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.05396825396825397, 0.13333333333333333\right), -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\

\mathbf{else}:\\
\;\;\;\;-1\\


\end{array}
\end{array}
Derivation
  1. Split input into 3 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -20

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
    4. Step-by-step derivation
      1. metadata-evalN/A

        \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
      2. cancel-sign-sub-invN/A

        \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
      3. --lowering--.f64N/A

        \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
      4. count-2N/A

        \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
      5. +-lowering-+.f641.6

        \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
    5. Simplified1.6%

      \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
    6. Applied egg-rr97.4%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{\mathsf{fma}\left(x, 2, 4\right)}, \mathsf{fma}\left(x, 2, 2\right), -1\right)} \]
    7. Taylor expanded in x around inf

      \[\leadsto \color{blue}{1} \]
    8. Step-by-step derivation
      1. Simplified99.4%

        \[\leadsto \color{blue}{1} \]

      if -20 < (*.f64 #s(literal -2 binary64) x) < 0.20000000000000001

      1. Initial program 10.6%

        \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right)\right)} \]
      4. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right) + 1\right)} \]
        2. distribute-rgt-inN/A

          \[\leadsto \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right)\right) \cdot x + 1 \cdot x} \]
        3. *-commutativeN/A

          \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right)\right)} + 1 \cdot x \]
        4. associate-*r*N/A

          \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right)} + 1 \cdot x \]
        5. *-commutativeN/A

          \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + 1 \cdot x \]
        6. *-lft-identityN/A

          \[\leadsto \left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right) + \color{blue}{x} \]
        7. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2} \cdot \left(\frac{2}{15} + \frac{-17}{315} \cdot {x}^{2}\right) - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
      5. Simplified99.8%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.05396825396825397, 0.13333333333333333\right), -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

      if 0.20000000000000001 < (*.f64 #s(literal -2 binary64) x)

      1. Initial program 100.0%

        \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
      4. Step-by-step derivation
        1. metadata-evalN/A

          \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
        2. cancel-sign-sub-invN/A

          \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
        3. --lowering--.f64N/A

          \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
        4. count-2N/A

          \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
        5. +-lowering-+.f6496.0

          \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
      5. Simplified96.0%

        \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
      6. Taylor expanded in x around inf

        \[\leadsto \color{blue}{-1} \]
      7. Step-by-step derivation
        1. Simplified100.0%

          \[\leadsto \color{blue}{-1} \]
      8. Recombined 3 regimes into one program.
      9. Add Preprocessing

      Alternative 7: 99.7% accurate, 2.5× speedup?

      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;1\\ \mathbf{elif}\;-2 \cdot x \leq 0.2:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \end{array} \]
      (FPCore (x y)
       :precision binary64
       (if (<= (* -2.0 x) -20.0)
         1.0
         (if (<= (* -2.0 x) 0.2)
           (fma
            (fma (* x x) 0.13333333333333333 -0.3333333333333333)
            (* x (* x x))
            x)
           -1.0)))
      double code(double x, double y) {
      	double tmp;
      	if ((-2.0 * x) <= -20.0) {
      		tmp = 1.0;
      	} else if ((-2.0 * x) <= 0.2) {
      		tmp = fma(fma((x * x), 0.13333333333333333, -0.3333333333333333), (x * (x * x)), x);
      	} else {
      		tmp = -1.0;
      	}
      	return tmp;
      }
      
      function code(x, y)
      	tmp = 0.0
      	if (Float64(-2.0 * x) <= -20.0)
      		tmp = 1.0;
      	elseif (Float64(-2.0 * x) <= 0.2)
      		tmp = fma(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333), Float64(x * Float64(x * x)), x);
      	else
      		tmp = -1.0;
      	end
      	return tmp
      end
      
      code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], 1.0, If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.2], N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], -1.0]]
      
      \begin{array}{l}
      
      \\
      \begin{array}{l}
      \mathbf{if}\;-2 \cdot x \leq -20:\\
      \;\;\;\;1\\
      
      \mathbf{elif}\;-2 \cdot x \leq 0.2:\\
      \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)\\
      
      \mathbf{else}:\\
      \;\;\;\;-1\\
      
      
      \end{array}
      \end{array}
      
      Derivation
      1. Split input into 3 regimes
      2. if (*.f64 #s(literal -2 binary64) x) < -20

        1. Initial program 100.0%

          \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
        2. Add Preprocessing
        3. Taylor expanded in x around 0

          \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
        4. Step-by-step derivation
          1. metadata-evalN/A

            \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
          2. cancel-sign-sub-invN/A

            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
          3. --lowering--.f64N/A

            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
          4. count-2N/A

            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
          5. +-lowering-+.f641.6

            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
        5. Simplified1.6%

          \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
        6. Applied egg-rr97.4%

          \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{\mathsf{fma}\left(x, 2, 4\right)}, \mathsf{fma}\left(x, 2, 2\right), -1\right)} \]
        7. Taylor expanded in x around inf

          \[\leadsto \color{blue}{1} \]
        8. Step-by-step derivation
          1. Simplified99.4%

            \[\leadsto \color{blue}{1} \]

          if -20 < (*.f64 #s(literal -2 binary64) x) < 0.20000000000000001

          1. Initial program 10.6%

            \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
          2. Add Preprocessing
          3. Taylor expanded in x around 0

            \[\leadsto \color{blue}{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} \]
          4. Step-by-step derivation
            1. distribute-rgt-inN/A

              \[\leadsto \color{blue}{1 \cdot x + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x} \]
            2. *-lft-identityN/A

              \[\leadsto \color{blue}{x} + \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x \]
            3. +-commutativeN/A

              \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right) \cdot x + x} \]
            4. *-commutativeN/A

              \[\leadsto \color{blue}{x \cdot \left({x}^{2} \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)\right)} + x \]
            5. associate-*r*N/A

              \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right)} + x \]
            6. *-commutativeN/A

              \[\leadsto \color{blue}{\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}\right) \cdot \left(x \cdot {x}^{2}\right)} + x \]
            7. accelerator-lowering-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{15} \cdot {x}^{2} - \frac{1}{3}, x \cdot {x}^{2}, x\right)} \]
            8. sub-negN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\frac{2}{15} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right)}, x \cdot {x}^{2}, x\right) \]
            9. *-commutativeN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot \frac{2}{15}} + \left(\mathsf{neg}\left(\frac{1}{3}\right)\right), x \cdot {x}^{2}, x\right) \]
            10. metadata-evalN/A

              \[\leadsto \mathsf{fma}\left({x}^{2} \cdot \frac{2}{15} + \color{blue}{\frac{-1}{3}}, x \cdot {x}^{2}, x\right) \]
            11. accelerator-lowering-fma.f64N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{15}, \frac{-1}{3}\right)}, x \cdot {x}^{2}, x\right) \]
            12. unpow2N/A

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
            13. *-lowering-*.f64N/A

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{15}, \frac{-1}{3}\right), x \cdot {x}^{2}, x\right) \]
            14. *-lowering-*.f64N/A

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), \color{blue}{x \cdot {x}^{2}}, x\right) \]
            15. unpow2N/A

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \frac{2}{15}, \frac{-1}{3}\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
            16. *-lowering-*.f6499.7

              \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
          5. Simplified99.7%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)} \]

          if 0.20000000000000001 < (*.f64 #s(literal -2 binary64) x)

          1. Initial program 100.0%

            \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
          2. Add Preprocessing
          3. Taylor expanded in x around 0

            \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
          4. Step-by-step derivation
            1. metadata-evalN/A

              \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
            2. cancel-sign-sub-invN/A

              \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
            3. --lowering--.f64N/A

              \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
            4. count-2N/A

              \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
            5. +-lowering-+.f6496.0

              \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
          5. Simplified96.0%

            \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
          6. Taylor expanded in x around inf

            \[\leadsto \color{blue}{-1} \]
          7. Step-by-step derivation
            1. Simplified100.0%

              \[\leadsto \color{blue}{-1} \]
          8. Recombined 3 regimes into one program.
          9. Add Preprocessing

          Alternative 8: 99.7% accurate, 3.2× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;1\\ \mathbf{elif}\;-2 \cdot x \leq 0.2:\\ \;\;\;\;\mathsf{fma}\left(-0.3333333333333333, x \cdot \left(x \cdot x\right), x\right)\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \end{array} \]
          (FPCore (x y)
           :precision binary64
           (if (<= (* -2.0 x) -20.0)
             1.0
             (if (<= (* -2.0 x) 0.2) (fma -0.3333333333333333 (* x (* x x)) x) -1.0)))
          double code(double x, double y) {
          	double tmp;
          	if ((-2.0 * x) <= -20.0) {
          		tmp = 1.0;
          	} else if ((-2.0 * x) <= 0.2) {
          		tmp = fma(-0.3333333333333333, (x * (x * x)), x);
          	} else {
          		tmp = -1.0;
          	}
          	return tmp;
          }
          
          function code(x, y)
          	tmp = 0.0
          	if (Float64(-2.0 * x) <= -20.0)
          		tmp = 1.0;
          	elseif (Float64(-2.0 * x) <= 0.2)
          		tmp = fma(-0.3333333333333333, Float64(x * Float64(x * x)), x);
          	else
          		tmp = -1.0;
          	end
          	return tmp
          end
          
          code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], 1.0, If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.2], N[(-0.3333333333333333 * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], -1.0]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          \mathbf{if}\;-2 \cdot x \leq -20:\\
          \;\;\;\;1\\
          
          \mathbf{elif}\;-2 \cdot x \leq 0.2:\\
          \;\;\;\;\mathsf{fma}\left(-0.3333333333333333, x \cdot \left(x \cdot x\right), x\right)\\
          
          \mathbf{else}:\\
          \;\;\;\;-1\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 3 regimes
          2. if (*.f64 #s(literal -2 binary64) x) < -20

            1. Initial program 100.0%

              \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
            2. Add Preprocessing
            3. Taylor expanded in x around 0

              \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
            4. Step-by-step derivation
              1. metadata-evalN/A

                \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
              2. cancel-sign-sub-invN/A

                \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
              3. --lowering--.f64N/A

                \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
              4. count-2N/A

                \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
              5. +-lowering-+.f641.6

                \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
            5. Simplified1.6%

              \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
            6. Applied egg-rr97.4%

              \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{\mathsf{fma}\left(x, 2, 4\right)}, \mathsf{fma}\left(x, 2, 2\right), -1\right)} \]
            7. Taylor expanded in x around inf

              \[\leadsto \color{blue}{1} \]
            8. Step-by-step derivation
              1. Simplified99.4%

                \[\leadsto \color{blue}{1} \]

              if -20 < (*.f64 #s(literal -2 binary64) x) < 0.20000000000000001

              1. Initial program 10.6%

                \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
              2. Add Preprocessing
              3. Taylor expanded in x around 0

                \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
              4. Step-by-step derivation
                1. distribute-lft-inN/A

                  \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
                2. *-rgt-identityN/A

                  \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
                3. +-commutativeN/A

                  \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
                4. *-commutativeN/A

                  \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
                5. associate-*r*N/A

                  \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
                6. *-commutativeN/A

                  \[\leadsto \color{blue}{\frac{-1}{3} \cdot \left(x \cdot {x}^{2}\right)} + x \]
                7. accelerator-lowering-fma.f64N/A

                  \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{-1}{3}, x \cdot {x}^{2}, x\right)} \]
                8. *-lowering-*.f64N/A

                  \[\leadsto \mathsf{fma}\left(\frac{-1}{3}, \color{blue}{x \cdot {x}^{2}}, x\right) \]
                9. unpow2N/A

                  \[\leadsto \mathsf{fma}\left(\frac{-1}{3}, x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
                10. *-lowering-*.f6499.3

                  \[\leadsto \mathsf{fma}\left(-0.3333333333333333, x \cdot \color{blue}{\left(x \cdot x\right)}, x\right) \]
              5. Simplified99.3%

                \[\leadsto \color{blue}{\mathsf{fma}\left(-0.3333333333333333, x \cdot \left(x \cdot x\right), x\right)} \]

              if 0.20000000000000001 < (*.f64 #s(literal -2 binary64) x)

              1. Initial program 100.0%

                \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
              2. Add Preprocessing
              3. Taylor expanded in x around 0

                \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
              4. Step-by-step derivation
                1. metadata-evalN/A

                  \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                2. cancel-sign-sub-invN/A

                  \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                3. --lowering--.f64N/A

                  \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                4. count-2N/A

                  \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                5. +-lowering-+.f6496.0

                  \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
              5. Simplified96.0%

                \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
              6. Taylor expanded in x around inf

                \[\leadsto \color{blue}{-1} \]
              7. Step-by-step derivation
                1. Simplified100.0%

                  \[\leadsto \color{blue}{-1} \]
              8. Recombined 3 regimes into one program.
              9. Add Preprocessing

              Alternative 9: 99.4% accurate, 5.3× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -20:\\ \;\;\;\;1\\ \mathbf{elif}\;-2 \cdot x \leq 0.2:\\ \;\;\;\;x\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \end{array} \]
              (FPCore (x y)
               :precision binary64
               (if (<= (* -2.0 x) -20.0) 1.0 (if (<= (* -2.0 x) 0.2) x -1.0)))
              double code(double x, double y) {
              	double tmp;
              	if ((-2.0 * x) <= -20.0) {
              		tmp = 1.0;
              	} else if ((-2.0 * x) <= 0.2) {
              		tmp = x;
              	} else {
              		tmp = -1.0;
              	}
              	return tmp;
              }
              
              real(8) function code(x, y)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: y
                  real(8) :: tmp
                  if (((-2.0d0) * x) <= (-20.0d0)) then
                      tmp = 1.0d0
                  else if (((-2.0d0) * x) <= 0.2d0) then
                      tmp = x
                  else
                      tmp = -1.0d0
                  end if
                  code = tmp
              end function
              
              public static double code(double x, double y) {
              	double tmp;
              	if ((-2.0 * x) <= -20.0) {
              		tmp = 1.0;
              	} else if ((-2.0 * x) <= 0.2) {
              		tmp = x;
              	} else {
              		tmp = -1.0;
              	}
              	return tmp;
              }
              
              def code(x, y):
              	tmp = 0
              	if (-2.0 * x) <= -20.0:
              		tmp = 1.0
              	elif (-2.0 * x) <= 0.2:
              		tmp = x
              	else:
              		tmp = -1.0
              	return tmp
              
              function code(x, y)
              	tmp = 0.0
              	if (Float64(-2.0 * x) <= -20.0)
              		tmp = 1.0;
              	elseif (Float64(-2.0 * x) <= 0.2)
              		tmp = x;
              	else
              		tmp = -1.0;
              	end
              	return tmp
              end
              
              function tmp_2 = code(x, y)
              	tmp = 0.0;
              	if ((-2.0 * x) <= -20.0)
              		tmp = 1.0;
              	elseif ((-2.0 * x) <= 0.2)
              		tmp = x;
              	else
              		tmp = -1.0;
              	end
              	tmp_2 = tmp;
              end
              
              code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -20.0], 1.0, If[LessEqual[N[(-2.0 * x), $MachinePrecision], 0.2], x, -1.0]]
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              \mathbf{if}\;-2 \cdot x \leq -20:\\
              \;\;\;\;1\\
              
              \mathbf{elif}\;-2 \cdot x \leq 0.2:\\
              \;\;\;\;x\\
              
              \mathbf{else}:\\
              \;\;\;\;-1\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 3 regimes
              2. if (*.f64 #s(literal -2 binary64) x) < -20

                1. Initial program 100.0%

                  \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                2. Add Preprocessing
                3. Taylor expanded in x around 0

                  \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
                4. Step-by-step derivation
                  1. metadata-evalN/A

                    \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                  2. cancel-sign-sub-invN/A

                    \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                  3. --lowering--.f64N/A

                    \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                  4. count-2N/A

                    \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                  5. +-lowering-+.f641.6

                    \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                5. Simplified1.6%

                  \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
                6. Applied egg-rr97.4%

                  \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{\mathsf{fma}\left(x, 2, 4\right)}, \mathsf{fma}\left(x, 2, 2\right), -1\right)} \]
                7. Taylor expanded in x around inf

                  \[\leadsto \color{blue}{1} \]
                8. Step-by-step derivation
                  1. Simplified99.4%

                    \[\leadsto \color{blue}{1} \]

                  if -20 < (*.f64 #s(literal -2 binary64) x) < 0.20000000000000001

                  1. Initial program 10.6%

                    \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                  2. Add Preprocessing
                  3. Taylor expanded in x around 0

                    \[\leadsto \color{blue}{x} \]
                  4. Step-by-step derivation
                    1. Simplified98.1%

                      \[\leadsto \color{blue}{x} \]

                    if 0.20000000000000001 < (*.f64 #s(literal -2 binary64) x)

                    1. Initial program 100.0%

                      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                    2. Add Preprocessing
                    3. Taylor expanded in x around 0

                      \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
                    4. Step-by-step derivation
                      1. metadata-evalN/A

                        \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                      2. cancel-sign-sub-invN/A

                        \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                      3. --lowering--.f64N/A

                        \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                      4. count-2N/A

                        \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                      5. +-lowering-+.f6496.0

                        \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                    5. Simplified96.0%

                      \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
                    6. Taylor expanded in x around inf

                      \[\leadsto \color{blue}{-1} \]
                    7. Step-by-step derivation
                      1. Simplified100.0%

                        \[\leadsto \color{blue}{-1} \]
                    8. Recombined 3 regimes into one program.
                    9. Add Preprocessing

                    Alternative 10: 51.6% accurate, 10.2× speedup?

                    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -5 \cdot 10^{-311}:\\ \;\;\;\;1\\ \mathbf{else}:\\ \;\;\;\;-1\\ \end{array} \end{array} \]
                    (FPCore (x y) :precision binary64 (if (<= (* -2.0 x) -5e-311) 1.0 -1.0))
                    double code(double x, double y) {
                    	double tmp;
                    	if ((-2.0 * x) <= -5e-311) {
                    		tmp = 1.0;
                    	} else {
                    		tmp = -1.0;
                    	}
                    	return tmp;
                    }
                    
                    real(8) function code(x, y)
                        real(8), intent (in) :: x
                        real(8), intent (in) :: y
                        real(8) :: tmp
                        if (((-2.0d0) * x) <= (-5d-311)) then
                            tmp = 1.0d0
                        else
                            tmp = -1.0d0
                        end if
                        code = tmp
                    end function
                    
                    public static double code(double x, double y) {
                    	double tmp;
                    	if ((-2.0 * x) <= -5e-311) {
                    		tmp = 1.0;
                    	} else {
                    		tmp = -1.0;
                    	}
                    	return tmp;
                    }
                    
                    def code(x, y):
                    	tmp = 0
                    	if (-2.0 * x) <= -5e-311:
                    		tmp = 1.0
                    	else:
                    		tmp = -1.0
                    	return tmp
                    
                    function code(x, y)
                    	tmp = 0.0
                    	if (Float64(-2.0 * x) <= -5e-311)
                    		tmp = 1.0;
                    	else
                    		tmp = -1.0;
                    	end
                    	return tmp
                    end
                    
                    function tmp_2 = code(x, y)
                    	tmp = 0.0;
                    	if ((-2.0 * x) <= -5e-311)
                    		tmp = 1.0;
                    	else
                    		tmp = -1.0;
                    	end
                    	tmp_2 = tmp;
                    end
                    
                    code[x_, y_] := If[LessEqual[N[(-2.0 * x), $MachinePrecision], -5e-311], 1.0, -1.0]
                    
                    \begin{array}{l}
                    
                    \\
                    \begin{array}{l}
                    \mathbf{if}\;-2 \cdot x \leq -5 \cdot 10^{-311}:\\
                    \;\;\;\;1\\
                    
                    \mathbf{else}:\\
                    \;\;\;\;-1\\
                    
                    
                    \end{array}
                    \end{array}
                    
                    Derivation
                    1. Split input into 2 regimes
                    2. if (*.f64 #s(literal -2 binary64) x) < -5.00000000000023e-311

                      1. Initial program 56.4%

                        \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around 0

                        \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
                      4. Step-by-step derivation
                        1. metadata-evalN/A

                          \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                        2. cancel-sign-sub-invN/A

                          \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                        3. --lowering--.f64N/A

                          \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                        4. count-2N/A

                          \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                        5. +-lowering-+.f645.4

                          \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                      5. Simplified5.4%

                        \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
                      6. Applied egg-rr52.6%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(\frac{2}{\mathsf{fma}\left(x, 2, 4\right)}, \mathsf{fma}\left(x, 2, 2\right), -1\right)} \]
                      7. Taylor expanded in x around inf

                        \[\leadsto \color{blue}{1} \]
                      8. Step-by-step derivation
                        1. Simplified53.0%

                          \[\leadsto \color{blue}{1} \]

                        if -5.00000000000023e-311 < (*.f64 #s(literal -2 binary64) x)

                        1. Initial program 49.6%

                          \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                        2. Add Preprocessing
                        3. Taylor expanded in x around 0

                          \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
                        4. Step-by-step derivation
                          1. metadata-evalN/A

                            \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                          2. cancel-sign-sub-invN/A

                            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                          3. --lowering--.f64N/A

                            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                          4. count-2N/A

                            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                          5. +-lowering-+.f6446.9

                            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                        5. Simplified46.9%

                          \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
                        6. Taylor expanded in x around inf

                          \[\leadsto \color{blue}{-1} \]
                        7. Step-by-step derivation
                          1. Simplified47.3%

                            \[\leadsto \color{blue}{-1} \]
                        8. Recombined 2 regimes into one program.
                        9. Add Preprocessing

                        Alternative 11: 27.1% accurate, 123.0× speedup?

                        \[\begin{array}{l} \\ -1 \end{array} \]
                        (FPCore (x y) :precision binary64 -1.0)
                        double code(double x, double y) {
                        	return -1.0;
                        }
                        
                        real(8) function code(x, y)
                            real(8), intent (in) :: x
                            real(8), intent (in) :: y
                            code = -1.0d0
                        end function
                        
                        public static double code(double x, double y) {
                        	return -1.0;
                        }
                        
                        def code(x, y):
                        	return -1.0
                        
                        function code(x, y)
                        	return -1.0
                        end
                        
                        function tmp = code(x, y)
                        	tmp = -1.0;
                        end
                        
                        code[x_, y_] := -1.0
                        
                        \begin{array}{l}
                        
                        \\
                        -1
                        \end{array}
                        
                        Derivation
                        1. Initial program 52.8%

                          \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                        2. Add Preprocessing
                        3. Taylor expanded in x around 0

                          \[\leadsto \frac{2}{\color{blue}{2 + -2 \cdot x}} - 1 \]
                        4. Step-by-step derivation
                          1. metadata-evalN/A

                            \[\leadsto \frac{2}{2 + \color{blue}{\left(\mathsf{neg}\left(2\right)\right)} \cdot x} - 1 \]
                          2. cancel-sign-sub-invN/A

                            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                          3. --lowering--.f64N/A

                            \[\leadsto \frac{2}{\color{blue}{2 - 2 \cdot x}} - 1 \]
                          4. count-2N/A

                            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                          5. +-lowering-+.f6427.3

                            \[\leadsto \frac{2}{2 - \color{blue}{\left(x + x\right)}} - 1 \]
                        5. Simplified27.3%

                          \[\leadsto \frac{2}{\color{blue}{2 - \left(x + x\right)}} - 1 \]
                        6. Taylor expanded in x around inf

                          \[\leadsto \color{blue}{-1} \]
                        7. Step-by-step derivation
                          1. Simplified25.8%

                            \[\leadsto \color{blue}{-1} \]
                          2. Add Preprocessing

                          Reproduce

                          ?
                          herbie shell --seed 2024204 
                          (FPCore (x y)
                            :name "Logistic function from Lakshay Garg"
                            :precision binary64
                            (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))