Logistic function from Lakshay Garg

Percentage Accurate: 53.6% → 99.1%
Time: 6.8s
Alternatives: 8
Speedup: 0.9×

Specification

?
\[\begin{array}{l} \\ \frac{2}{1 + e^{-2 \cdot x}} - 1 \end{array} \]
(FPCore (x y) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x, double y) {
	return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x, double y) {
	return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x, y):
	return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x, y)
	return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0)
end
function tmp = code(x, y)
	tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
end
code[x_, y_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{2}{1 + e^{-2 \cdot x}} - 1 \end{array} \]
(FPCore (x y) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x, double y) {
	return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x, double y) {
	return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x, y):
	return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x, y)
	return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0)
end
function tmp = code(x, y)
	tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
end
code[x_, y_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}

\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}

Alternative 1: 99.1% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -400000 \lor \neg \left(-2 \cdot x \leq 10^{-8}\right):\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \end{array} \]
(FPCore (x y)
 :precision binary64
 (if (or (<= (* -2.0 x) -400000.0) (not (<= (* -2.0 x) 1e-8)))
   (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0)
   (fma (* (* x x) x) -0.3333333333333333 x)))
double code(double x, double y) {
	double tmp;
	if (((-2.0 * x) <= -400000.0) || !((-2.0 * x) <= 1e-8)) {
		tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
	} else {
		tmp = fma(((x * x) * x), -0.3333333333333333, x);
	}
	return tmp;
}
function code(x, y)
	tmp = 0.0
	if ((Float64(-2.0 * x) <= -400000.0) || !(Float64(-2.0 * x) <= 1e-8))
		tmp = Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0);
	else
		tmp = fma(Float64(Float64(x * x) * x), -0.3333333333333333, x);
	end
	return tmp
end
code[x_, y_] := If[Or[LessEqual[N[(-2.0 * x), $MachinePrecision], -400000.0], N[Not[LessEqual[N[(-2.0 * x), $MachinePrecision], 1e-8]], $MachinePrecision]], N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;-2 \cdot x \leq -400000 \lor \neg \left(-2 \cdot x \leq 10^{-8}\right):\\
\;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\

\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (*.f64 #s(literal -2 binary64) x) < -4e5 or 1e-8 < (*.f64 #s(literal -2 binary64) x)

    1. Initial program 100.0%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing

    if -4e5 < (*.f64 #s(literal -2 binary64) x) < 1e-8

    1. Initial program 7.8%

      \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
    2. Add Preprocessing
    3. Taylor expanded in x around 0

      \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
    4. Step-by-step derivation
      1. distribute-lft-inN/A

        \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
      2. *-rgt-identityN/A

        \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
      3. +-commutativeN/A

        \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
      4. *-commutativeN/A

        \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
      5. associate-*r*N/A

        \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
      6. lower-fma.f64N/A

        \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
      7. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
      8. pow-plusN/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
      9. lower-pow.f64N/A

        \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
      10. metadata-eval100.0

        \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
    5. Applied rewrites100.0%

      \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
    6. Step-by-step derivation
      1. Applied rewrites100.0%

        \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
    7. Recombined 2 regimes into one program.
    8. Final simplification100.0%

      \[\leadsto \begin{array}{l} \mathbf{if}\;-2 \cdot x \leq -400000 \lor \neg \left(-2 \cdot x \leq 10^{-8}\right):\\ \;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \]
    9. Add Preprocessing

    Alternative 2: 74.6% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;{\left(\mathsf{fma}\left(x, x, 1\right) \cdot \left(1 - x\right)\right)}^{-1} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \end{array} \]
    (FPCore (x y)
     :precision binary64
     (if (<= (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 0.1)
       (- (pow (* (fma x x 1.0) (- 1.0 x)) -1.0) 1.0)
       (fma (* (* x x) x) -0.3333333333333333 x)))
    double code(double x, double y) {
    	double tmp;
    	if ((2.0 / (1.0 + exp((-2.0 * x)))) <= 0.1) {
    		tmp = pow((fma(x, x, 1.0) * (1.0 - x)), -1.0) - 1.0;
    	} else {
    		tmp = fma(((x * x) * x), -0.3333333333333333, x);
    	}
    	return tmp;
    }
    
    function code(x, y)
    	tmp = 0.0
    	if (Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) <= 0.1)
    		tmp = Float64((Float64(fma(x, x, 1.0) * Float64(1.0 - x)) ^ -1.0) - 1.0);
    	else
    		tmp = fma(Float64(Float64(x * x) * x), -0.3333333333333333, x);
    	end
    	return tmp
    end
    
    code[x_, y_] := If[LessEqual[N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.1], N[(N[Power[N[(N[(x * x + 1.0), $MachinePrecision] * N[(1.0 - x), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\
    \;\;\;\;{\left(\mathsf{fma}\left(x, x, 1\right) \cdot \left(1 - x\right)\right)}^{-1} - 1\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 2 regimes
    2. if (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x)))) < 0.10000000000000001

      1. Initial program 100.0%

        \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
      2. Add Preprocessing
      3. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
      4. Step-by-step derivation
        1. lower-+.f645.6

          \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
      5. Applied rewrites5.6%

        \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
      6. Step-by-step derivation
        1. Applied rewrites5.1%

          \[\leadsto \frac{1}{\color{blue}{\frac{1 - x}{1 - x \cdot x}}} - 1 \]
        2. Taylor expanded in x around 0

          \[\leadsto \frac{1}{1 + \color{blue}{x \cdot \left(x \cdot \left(1 + -1 \cdot x\right) - 1\right)}} - 1 \]
        3. Step-by-step derivation
          1. Applied rewrites97.9%

            \[\leadsto \frac{1}{\mathsf{fma}\left(x, x, 1\right) \cdot \color{blue}{\left(1 - x\right)}} - 1 \]

          if 0.10000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))))

          1. Initial program 36.1%

            \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
          2. Add Preprocessing
          3. Taylor expanded in x around 0

            \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
          4. Step-by-step derivation
            1. distribute-lft-inN/A

              \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
            2. *-rgt-identityN/A

              \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
            3. +-commutativeN/A

              \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
            4. *-commutativeN/A

              \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
            5. associate-*r*N/A

              \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
            6. lower-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
            7. *-commutativeN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
            8. pow-plusN/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
            9. lower-pow.f64N/A

              \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
            10. metadata-eval69.6

              \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
          5. Applied rewrites69.6%

            \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
          6. Step-by-step derivation
            1. Applied rewrites69.6%

              \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
          7. Recombined 2 regimes into one program.
          8. Final simplification77.3%

            \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;{\left(\mathsf{fma}\left(x, x, 1\right) \cdot \left(1 - x\right)\right)}^{-1} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \]
          9. Add Preprocessing

          Alternative 3: 74.5% accurate, 0.5× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;{\left(\mathsf{fma}\left(x - 1, x, 1\right)\right)}^{-1} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \end{array} \]
          (FPCore (x y)
           :precision binary64
           (if (<= (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 0.1)
             (- (pow (fma (- x 1.0) x 1.0) -1.0) 1.0)
             (fma (* (* x x) x) -0.3333333333333333 x)))
          double code(double x, double y) {
          	double tmp;
          	if ((2.0 / (1.0 + exp((-2.0 * x)))) <= 0.1) {
          		tmp = pow(fma((x - 1.0), x, 1.0), -1.0) - 1.0;
          	} else {
          		tmp = fma(((x * x) * x), -0.3333333333333333, x);
          	}
          	return tmp;
          }
          
          function code(x, y)
          	tmp = 0.0
          	if (Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) <= 0.1)
          		tmp = Float64((fma(Float64(x - 1.0), x, 1.0) ^ -1.0) - 1.0);
          	else
          		tmp = fma(Float64(Float64(x * x) * x), -0.3333333333333333, x);
          	end
          	return tmp
          end
          
          code[x_, y_] := If[LessEqual[N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.1], N[(N[Power[N[(N[(x - 1.0), $MachinePrecision] * x + 1.0), $MachinePrecision], -1.0], $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\
          \;\;\;\;{\left(\mathsf{fma}\left(x - 1, x, 1\right)\right)}^{-1} - 1\\
          
          \mathbf{else}:\\
          \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x)))) < 0.10000000000000001

            1. Initial program 100.0%

              \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
            2. Add Preprocessing
            3. Taylor expanded in x around 0

              \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
            4. Step-by-step derivation
              1. lower-+.f645.6

                \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
            5. Applied rewrites5.6%

              \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
            6. Step-by-step derivation
              1. Applied rewrites5.1%

                \[\leadsto \frac{1}{\color{blue}{\frac{1 - x}{1 - x \cdot x}}} - 1 \]
              2. Taylor expanded in x around 0

                \[\leadsto \frac{1}{1 + \color{blue}{x \cdot \left(x - 1\right)}} - 1 \]
              3. Step-by-step derivation
                1. Applied rewrites97.8%

                  \[\leadsto \frac{1}{\mathsf{fma}\left(x - 1, \color{blue}{x}, 1\right)} - 1 \]

                if 0.10000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))))

                1. Initial program 36.1%

                  \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                2. Add Preprocessing
                3. Taylor expanded in x around 0

                  \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
                4. Step-by-step derivation
                  1. distribute-lft-inN/A

                    \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
                  2. *-rgt-identityN/A

                    \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
                  3. +-commutativeN/A

                    \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
                  4. *-commutativeN/A

                    \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
                  5. associate-*r*N/A

                    \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
                  6. lower-fma.f64N/A

                    \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
                  7. *-commutativeN/A

                    \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
                  8. pow-plusN/A

                    \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                  9. lower-pow.f64N/A

                    \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                  10. metadata-eval69.6

                    \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
                5. Applied rewrites69.6%

                  \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
                6. Step-by-step derivation
                  1. Applied rewrites69.6%

                    \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
                7. Recombined 2 regimes into one program.
                8. Final simplification77.3%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;{\left(\mathsf{fma}\left(x - 1, x, 1\right)\right)}^{-1} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \]
                9. Add Preprocessing

                Alternative 4: 74.6% accurate, 0.7× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;\frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(x \cdot x\right) \cdot -1.7777777777777777}{2 - -1.3333333333333333 \cdot x}, x, -2\right), x, 2\right)} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \end{array} \]
                (FPCore (x y)
                 :precision binary64
                 (if (<= (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 0.1)
                   (-
                    (/
                     2.0
                     (fma
                      (fma
                       (/ (* (* x x) -1.7777777777777777) (- 2.0 (* -1.3333333333333333 x)))
                       x
                       -2.0)
                      x
                      2.0))
                    1.0)
                   (fma (* (* x x) x) -0.3333333333333333 x)))
                double code(double x, double y) {
                	double tmp;
                	if ((2.0 / (1.0 + exp((-2.0 * x)))) <= 0.1) {
                		tmp = (2.0 / fma(fma((((x * x) * -1.7777777777777777) / (2.0 - (-1.3333333333333333 * x))), x, -2.0), x, 2.0)) - 1.0;
                	} else {
                		tmp = fma(((x * x) * x), -0.3333333333333333, x);
                	}
                	return tmp;
                }
                
                function code(x, y)
                	tmp = 0.0
                	if (Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) <= 0.1)
                		tmp = Float64(Float64(2.0 / fma(fma(Float64(Float64(Float64(x * x) * -1.7777777777777777) / Float64(2.0 - Float64(-1.3333333333333333 * x))), x, -2.0), x, 2.0)) - 1.0);
                	else
                		tmp = fma(Float64(Float64(x * x) * x), -0.3333333333333333, x);
                	end
                	return tmp
                end
                
                code[x_, y_] := If[LessEqual[N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.1], N[(N[(2.0 / N[(N[(N[(N[(N[(x * x), $MachinePrecision] * -1.7777777777777777), $MachinePrecision] / N[(2.0 - N[(-1.3333333333333333 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + -2.0), $MachinePrecision] * x + 2.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\
                \;\;\;\;\frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(x \cdot x\right) \cdot -1.7777777777777777}{2 - -1.3333333333333333 \cdot x}, x, -2\right), x, 2\right)} - 1\\
                
                \mathbf{else}:\\
                \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x)))) < 0.10000000000000001

                  1. Initial program 100.0%

                    \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                  2. Add Preprocessing
                  3. Taylor expanded in x around 0

                    \[\leadsto \frac{2}{\color{blue}{2 + x \cdot \left(x \cdot \left(2 + \frac{-4}{3} \cdot x\right) - 2\right)}} - 1 \]
                  4. Step-by-step derivation
                    1. +-commutativeN/A

                      \[\leadsto \frac{2}{\color{blue}{x \cdot \left(x \cdot \left(2 + \frac{-4}{3} \cdot x\right) - 2\right) + 2}} - 1 \]
                    2. *-commutativeN/A

                      \[\leadsto \frac{2}{\color{blue}{\left(x \cdot \left(2 + \frac{-4}{3} \cdot x\right) - 2\right) \cdot x} + 2} - 1 \]
                    3. lower-fma.f64N/A

                      \[\leadsto \frac{2}{\color{blue}{\mathsf{fma}\left(x \cdot \left(2 + \frac{-4}{3} \cdot x\right) - 2, x, 2\right)}} - 1 \]
                    4. sub-negN/A

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\color{blue}{x \cdot \left(2 + \frac{-4}{3} \cdot x\right) + \left(\mathsf{neg}\left(2\right)\right)}, x, 2\right)} - 1 \]
                    5. metadata-evalN/A

                      \[\leadsto \frac{2}{\mathsf{fma}\left(x \cdot \left(2 + \frac{-4}{3} \cdot x\right) + \color{blue}{-2}, x, 2\right)} - 1 \]
                    6. *-commutativeN/A

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\color{blue}{\left(2 + \frac{-4}{3} \cdot x\right) \cdot x} + -2, x, 2\right)} - 1 \]
                    7. lower-fma.f64N/A

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(2 + \frac{-4}{3} \cdot x, x, -2\right)}, x, 2\right)} - 1 \]
                    8. +-commutativeN/A

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{\frac{-4}{3} \cdot x + 2}, x, -2\right), x, 2\right)} - 1 \]
                    9. lower-fma.f6497.8

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\color{blue}{\mathsf{fma}\left(-1.3333333333333333, x, 2\right)}, x, -2\right), x, 2\right)} - 1 \]
                  5. Applied rewrites97.8%

                    \[\leadsto \frac{2}{\color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-1.3333333333333333, x, 2\right), x, -2\right), x, 2\right)}} - 1 \]
                  6. Step-by-step derivation
                    1. Applied rewrites97.8%

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{4 - 1.7777777777777777 \cdot \left(x \cdot x\right)}{2 - -1.3333333333333333 \cdot x}, x, -2\right), x, 2\right)} - 1 \]
                    2. Taylor expanded in x around inf

                      \[\leadsto \frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{\frac{-16}{9} \cdot {x}^{2}}{2 - \frac{-4}{3} \cdot x}, x, -2\right), x, 2\right)} - 1 \]
                    3. Step-by-step derivation
                      1. Applied rewrites97.9%

                        \[\leadsto \frac{2}{\mathsf{fma}\left(\mathsf{fma}\left(\frac{\left(x \cdot x\right) \cdot -1.7777777777777777}{2 - -1.3333333333333333 \cdot x}, x, -2\right), x, 2\right)} - 1 \]

                      if 0.10000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))))

                      1. Initial program 36.1%

                        \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around 0

                        \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
                      4. Step-by-step derivation
                        1. distribute-lft-inN/A

                          \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
                        2. *-rgt-identityN/A

                          \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
                        3. +-commutativeN/A

                          \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
                        4. *-commutativeN/A

                          \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
                        5. associate-*r*N/A

                          \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
                        6. lower-fma.f64N/A

                          \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
                        7. *-commutativeN/A

                          \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
                        8. pow-plusN/A

                          \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                        9. lower-pow.f64N/A

                          \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                        10. metadata-eval69.6

                          \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
                      5. Applied rewrites69.6%

                        \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
                      6. Step-by-step derivation
                        1. Applied rewrites69.6%

                          \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
                      7. Recombined 2 regimes into one program.
                      8. Add Preprocessing

                      Alternative 5: 74.3% accurate, 0.9× speedup?

                      \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\ \;\;\;\;\frac{-1}{x - 1} - 1\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\ \end{array} \end{array} \]
                      (FPCore (x y)
                       :precision binary64
                       (if (<= (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 0.1)
                         (- (/ -1.0 (- x 1.0)) 1.0)
                         (fma (* (* x x) x) -0.3333333333333333 x)))
                      double code(double x, double y) {
                      	double tmp;
                      	if ((2.0 / (1.0 + exp((-2.0 * x)))) <= 0.1) {
                      		tmp = (-1.0 / (x - 1.0)) - 1.0;
                      	} else {
                      		tmp = fma(((x * x) * x), -0.3333333333333333, x);
                      	}
                      	return tmp;
                      }
                      
                      function code(x, y)
                      	tmp = 0.0
                      	if (Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) <= 0.1)
                      		tmp = Float64(Float64(-1.0 / Float64(x - 1.0)) - 1.0);
                      	else
                      		tmp = fma(Float64(Float64(x * x) * x), -0.3333333333333333, x);
                      	end
                      	return tmp
                      end
                      
                      code[x_, y_] := If[LessEqual[N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.1], N[(N[(-1.0 / N[(x - 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]]
                      
                      \begin{array}{l}
                      
                      \\
                      \begin{array}{l}
                      \mathbf{if}\;\frac{2}{1 + e^{-2 \cdot x}} \leq 0.1:\\
                      \;\;\;\;\frac{-1}{x - 1} - 1\\
                      
                      \mathbf{else}:\\
                      \;\;\;\;\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)\\
                      
                      
                      \end{array}
                      \end{array}
                      
                      Derivation
                      1. Split input into 2 regimes
                      2. if (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x)))) < 0.10000000000000001

                        1. Initial program 100.0%

                          \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                        2. Add Preprocessing
                        3. Taylor expanded in x around 0

                          \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                        4. Step-by-step derivation
                          1. lower-+.f645.6

                            \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                        5. Applied rewrites5.6%

                          \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                        6. Step-by-step derivation
                          1. Applied rewrites5.1%

                            \[\leadsto \frac{x \cdot x - 1}{\color{blue}{x - 1}} - 1 \]
                          2. Taylor expanded in x around 0

                            \[\leadsto \frac{-1}{\color{blue}{x} - 1} - 1 \]
                          3. Step-by-step derivation
                            1. Applied rewrites97.0%

                              \[\leadsto \frac{-1}{\color{blue}{x} - 1} - 1 \]

                            if 0.10000000000000001 < (/.f64 #s(literal 2 binary64) (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))))

                            1. Initial program 36.1%

                              \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                            2. Add Preprocessing
                            3. Taylor expanded in x around 0

                              \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
                            4. Step-by-step derivation
                              1. distribute-lft-inN/A

                                \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
                              2. *-rgt-identityN/A

                                \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
                              3. +-commutativeN/A

                                \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
                              4. *-commutativeN/A

                                \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
                              5. associate-*r*N/A

                                \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
                              6. lower-fma.f64N/A

                                \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
                              7. *-commutativeN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
                              8. pow-plusN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                              9. lower-pow.f64N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                              10. metadata-eval69.6

                                \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
                            5. Applied rewrites69.6%

                              \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
                            6. Step-by-step derivation
                              1. Applied rewrites69.6%

                                \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
                            7. Recombined 2 regimes into one program.
                            8. Add Preprocessing

                            Alternative 6: 50.9% accurate, 7.2× speedup?

                            \[\begin{array}{l} \\ \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \end{array} \]
                            (FPCore (x y) :precision binary64 (fma (* (* x x) x) -0.3333333333333333 x))
                            double code(double x, double y) {
                            	return fma(((x * x) * x), -0.3333333333333333, x);
                            }
                            
                            function code(x, y)
                            	return fma(Float64(Float64(x * x) * x), -0.3333333333333333, x)
                            end
                            
                            code[x_, y_] := N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right)
                            \end{array}
                            
                            Derivation
                            1. Initial program 53.6%

                              \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                            2. Add Preprocessing
                            3. Taylor expanded in x around 0

                              \[\leadsto \color{blue}{x \cdot \left(1 + \frac{-1}{3} \cdot {x}^{2}\right)} \]
                            4. Step-by-step derivation
                              1. distribute-lft-inN/A

                                \[\leadsto \color{blue}{x \cdot 1 + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right)} \]
                              2. *-rgt-identityN/A

                                \[\leadsto \color{blue}{x} + x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) \]
                              3. +-commutativeN/A

                                \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{3} \cdot {x}^{2}\right) + x} \]
                              4. *-commutativeN/A

                                \[\leadsto x \cdot \color{blue}{\left({x}^{2} \cdot \frac{-1}{3}\right)} + x \]
                              5. associate-*r*N/A

                                \[\leadsto \color{blue}{\left(x \cdot {x}^{2}\right) \cdot \frac{-1}{3}} + x \]
                              6. lower-fma.f64N/A

                                \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot {x}^{2}, \frac{-1}{3}, x\right)} \]
                              7. *-commutativeN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{2} \cdot x}, \frac{-1}{3}, x\right) \]
                              8. pow-plusN/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                              9. lower-pow.f64N/A

                                \[\leadsto \mathsf{fma}\left(\color{blue}{{x}^{\left(2 + 1\right)}}, \frac{-1}{3}, x\right) \]
                              10. metadata-eval50.8

                                \[\leadsto \mathsf{fma}\left({x}^{\color{blue}{3}}, -0.3333333333333333, x\right) \]
                            5. Applied rewrites50.8%

                              \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{3}, -0.3333333333333333, x\right)} \]
                            6. Step-by-step derivation
                              1. Applied rewrites50.8%

                                \[\leadsto \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \]
                              2. Add Preprocessing

                              Alternative 7: 6.6% accurate, 17.6× speedup?

                              \[\begin{array}{l} \\ \left(1 + x\right) - 1 \end{array} \]
                              (FPCore (x y) :precision binary64 (- (+ 1.0 x) 1.0))
                              double code(double x, double y) {
                              	return (1.0 + x) - 1.0;
                              }
                              
                              real(8) function code(x, y)
                                  real(8), intent (in) :: x
                                  real(8), intent (in) :: y
                                  code = (1.0d0 + x) - 1.0d0
                              end function
                              
                              public static double code(double x, double y) {
                              	return (1.0 + x) - 1.0;
                              }
                              
                              def code(x, y):
                              	return (1.0 + x) - 1.0
                              
                              function code(x, y)
                              	return Float64(Float64(1.0 + x) - 1.0)
                              end
                              
                              function tmp = code(x, y)
                              	tmp = (1.0 + x) - 1.0;
                              end
                              
                              code[x_, y_] := N[(N[(1.0 + x), $MachinePrecision] - 1.0), $MachinePrecision]
                              
                              \begin{array}{l}
                              
                              \\
                              \left(1 + x\right) - 1
                              \end{array}
                              
                              Derivation
                              1. Initial program 53.6%

                                \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                              2. Add Preprocessing
                              3. Taylor expanded in x around 0

                                \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                              4. Step-by-step derivation
                                1. lower-+.f646.7

                                  \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                              5. Applied rewrites6.7%

                                \[\leadsto \color{blue}{\left(1 + x\right)} - 1 \]
                              6. Add Preprocessing

                              Alternative 8: 4.3% accurate, 30.8× speedup?

                              \[\begin{array}{l} \\ 1 - 1 \end{array} \]
                              (FPCore (x y) :precision binary64 (- 1.0 1.0))
                              double code(double x, double y) {
                              	return 1.0 - 1.0;
                              }
                              
                              real(8) function code(x, y)
                                  real(8), intent (in) :: x
                                  real(8), intent (in) :: y
                                  code = 1.0d0 - 1.0d0
                              end function
                              
                              public static double code(double x, double y) {
                              	return 1.0 - 1.0;
                              }
                              
                              def code(x, y):
                              	return 1.0 - 1.0
                              
                              function code(x, y)
                              	return Float64(1.0 - 1.0)
                              end
                              
                              function tmp = code(x, y)
                              	tmp = 1.0 - 1.0;
                              end
                              
                              code[x_, y_] := N[(1.0 - 1.0), $MachinePrecision]
                              
                              \begin{array}{l}
                              
                              \\
                              1 - 1
                              \end{array}
                              
                              Derivation
                              1. Initial program 53.6%

                                \[\frac{2}{1 + e^{-2 \cdot x}} - 1 \]
                              2. Add Preprocessing
                              3. Taylor expanded in x around 0

                                \[\leadsto \color{blue}{1} - 1 \]
                              4. Step-by-step derivation
                                1. Applied rewrites4.5%

                                  \[\leadsto \color{blue}{1} - 1 \]
                                2. Add Preprocessing

                                Reproduce

                                ?
                                herbie shell --seed 2024313 
                                (FPCore (x y)
                                  :name "Logistic function from Lakshay Garg"
                                  :precision binary64
                                  (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))