Jmat.Real.lambertw, newton loop step

Percentage Accurate: 77.6% → 98.3%
Time: 5.8s
Alternatives: 12
Speedup: 25.5×

Specification

?
\[\begin{array}{l} \\ \begin{array}{l} t_0 := wj \cdot e^{wj}\\ wj - \frac{t\_0 - x}{e^{wj} + t\_0} \end{array} \end{array} \]
(FPCore (wj x)
 :precision binary64
 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
	double t_0 = wj * exp(wj);
	return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
    real(8), intent (in) :: wj
    real(8), intent (in) :: x
    real(8) :: t_0
    t_0 = wj * exp(wj)
    code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
	double t_0 = wj * Math.exp(wj);
	return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x):
	t_0 = wj * math.exp(wj)
	return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x)
	t_0 = Float64(wj * exp(wj))
	return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0)))
end
function tmp = code(wj, x)
	t_0 = wj * exp(wj);
	tmp = wj - ((t_0 - x) / (exp(wj) + t_0));
end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 77.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := wj \cdot e^{wj}\\ wj - \frac{t\_0 - x}{e^{wj} + t\_0} \end{array} \end{array} \]
(FPCore (wj x)
 :precision binary64
 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
	double t_0 = wj * exp(wj);
	return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
    real(8), intent (in) :: wj
    real(8), intent (in) :: x
    real(8) :: t_0
    t_0 = wj * exp(wj)
    code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
	double t_0 = wj * Math.exp(wj);
	return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x):
	t_0 = wj * math.exp(wj)
	return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x)
	t_0 = Float64(wj * exp(wj))
	return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0)))
end
function tmp = code(wj, x)
	t_0 = wj * exp(wj);
	tmp = wj - ((t_0 - x) / (exp(wj) + t_0));
end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}

Alternative 1: 98.3% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{wj} \cdot wj\\ \mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 4 \cdot 10^{-14}:\\ \;\;\;\;\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \left(\frac{\frac{wj}{wj - -1}}{x} - \frac{e^{-wj}}{wj - -1}\right) \cdot x\\ \end{array} \end{array} \]
(FPCore (wj x)
 :precision binary64
 (let* ((t_0 (* (exp wj) wj)))
   (if (<= (- wj (/ (- t_0 x) (+ t_0 (exp wj)))) 4e-14)
     (fma (* (- 1.0 wj) wj) wj x)
     (- wj (* (- (/ (/ wj (- wj -1.0)) x) (/ (exp (- wj)) (- wj -1.0))) x)))))
double code(double wj, double x) {
	double t_0 = exp(wj) * wj;
	double tmp;
	if ((wj - ((t_0 - x) / (t_0 + exp(wj)))) <= 4e-14) {
		tmp = fma(((1.0 - wj) * wj), wj, x);
	} else {
		tmp = wj - ((((wj / (wj - -1.0)) / x) - (exp(-wj) / (wj - -1.0))) * x);
	}
	return tmp;
}
function code(wj, x)
	t_0 = Float64(exp(wj) * wj)
	tmp = 0.0
	if (Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) <= 4e-14)
		tmp = fma(Float64(Float64(1.0 - wj) * wj), wj, x);
	else
		tmp = Float64(wj - Float64(Float64(Float64(Float64(wj / Float64(wj - -1.0)) / x) - Float64(exp(Float64(-wj)) / Float64(wj - -1.0))) * x));
	end
	return tmp
end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-14], N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(N[(N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] - N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
\mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 4 \cdot 10^{-14}:\\
\;\;\;\;\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)\\

\mathbf{else}:\\
\;\;\;\;wj - \left(\frac{\frac{wj}{wj - -1}}{x} - \frac{e^{-wj}}{wj - -1}\right) \cdot x\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4e-14

    1. Initial program 69.0%

      \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
    2. Add Preprocessing
    3. Taylor expanded in wj around 0

      \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
    4. Applied rewrites97.8%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma}\left(wj \cdot \left(1 - wj\right), wj, x\right) \]
    6. Step-by-step derivation
      1. Applied rewrites98.0%

        \[\leadsto \mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right) \]

      if 4e-14 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj)))))

      1. Initial program 94.0%

        \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
      2. Add Preprocessing
      3. Taylor expanded in x around inf

        \[\leadsto wj - \color{blue}{x \cdot \left(\frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)} - \frac{1}{e^{wj} + wj \cdot e^{wj}}\right)} \]
      4. Step-by-step derivation
        1. sub-negN/A

          \[\leadsto wj - x \cdot \color{blue}{\left(\frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)} + \left(\mathsf{neg}\left(\frac{1}{e^{wj} + wj \cdot e^{wj}}\right)\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto wj - x \cdot \color{blue}{\left(\left(\mathsf{neg}\left(\frac{1}{e^{wj} + wj \cdot e^{wj}}\right)\right) + \frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)}\right)} \]
        3. neg-sub0N/A

          \[\leadsto wj - x \cdot \left(\color{blue}{\left(0 - \frac{1}{e^{wj} + wj \cdot e^{wj}}\right)} + \frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)}\right) \]
        4. associate-+l-N/A

          \[\leadsto wj - x \cdot \color{blue}{\left(0 - \left(\frac{1}{e^{wj} + wj \cdot e^{wj}} - \frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)}\right)\right)} \]
        5. unsub-negN/A

          \[\leadsto wj - x \cdot \left(0 - \color{blue}{\left(\frac{1}{e^{wj} + wj \cdot e^{wj}} + \left(\mathsf{neg}\left(\frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)}\right)\right)\right)}\right) \]
        6. mul-1-negN/A

          \[\leadsto wj - x \cdot \left(0 - \left(\frac{1}{e^{wj} + wj \cdot e^{wj}} + \color{blue}{-1 \cdot \frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)}}\right)\right) \]
        7. +-commutativeN/A

          \[\leadsto wj - x \cdot \left(0 - \color{blue}{\left(-1 \cdot \frac{wj \cdot e^{wj}}{x \cdot \left(e^{wj} + wj \cdot e^{wj}\right)} + \frac{1}{e^{wj} + wj \cdot e^{wj}}\right)}\right) \]
      5. Applied rewrites99.8%

        \[\leadsto wj - \color{blue}{\left(\frac{\frac{wj}{1 + wj}}{x} - \frac{e^{-wj}}{1 + wj}\right) \cdot x} \]
    7. Recombined 2 regimes into one program.
    8. Final simplification98.5%

      \[\leadsto \begin{array}{l} \mathbf{if}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq 4 \cdot 10^{-14}:\\ \;\;\;\;\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \left(\frac{\frac{wj}{wj - -1}}{x} - \frac{e^{-wj}}{wj - -1}\right) \cdot x\\ \end{array} \]
    9. Add Preprocessing

    Alternative 2: 84.7% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{wj} \cdot wj\\ t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\ \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\ \;\;\;\;\mathsf{fma}\left(-2, wj, 1\right) \cdot x\\ \mathbf{elif}\;t\_1 \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(-2 \cdot x, wj, x\right)\\ \end{array} \end{array} \]
    (FPCore (wj x)
     :precision binary64
     (let* ((t_0 (* (exp wj) wj)) (t_1 (- wj (/ (- t_0 x) (+ t_0 (exp wj))))))
       (if (<= t_1 -4e-266)
         (* (fma -2.0 wj 1.0) x)
         (if (<= t_1 0.0) (* wj wj) (fma (* -2.0 x) wj x)))))
    double code(double wj, double x) {
    	double t_0 = exp(wj) * wj;
    	double t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)));
    	double tmp;
    	if (t_1 <= -4e-266) {
    		tmp = fma(-2.0, wj, 1.0) * x;
    	} else if (t_1 <= 0.0) {
    		tmp = wj * wj;
    	} else {
    		tmp = fma((-2.0 * x), wj, x);
    	}
    	return tmp;
    }
    
    function code(wj, x)
    	t_0 = Float64(exp(wj) * wj)
    	t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj))))
    	tmp = 0.0
    	if (t_1 <= -4e-266)
    		tmp = Float64(fma(-2.0, wj, 1.0) * x);
    	elseif (t_1 <= 0.0)
    		tmp = Float64(wj * wj);
    	else
    		tmp = fma(Float64(-2.0 * x), wj, x);
    	end
    	return tmp
    end
    
    code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -4e-266], N[(N[(-2.0 * wj + 1.0), $MachinePrecision] * x), $MachinePrecision], If[LessEqual[t$95$1, 0.0], N[(wj * wj), $MachinePrecision], N[(N[(-2.0 * x), $MachinePrecision] * wj + x), $MachinePrecision]]]]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_0 := e^{wj} \cdot wj\\
    t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\
    \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\
    \;\;\;\;\mathsf{fma}\left(-2, wj, 1\right) \cdot x\\
    
    \mathbf{elif}\;t\_1 \leq 0:\\
    \;\;\;\;wj \cdot wj\\
    
    \mathbf{else}:\\
    \;\;\;\;\mathsf{fma}\left(-2 \cdot x, wj, x\right)\\
    
    
    \end{array}
    \end{array}
    
    Derivation
    1. Split input into 3 regimes
    2. if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -3.9999999999999999e-266

      1. Initial program 96.4%

        \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
      2. Add Preprocessing
      3. Taylor expanded in wj around 0

        \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
      4. Applied rewrites95.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
      5. Taylor expanded in wj around 0

        \[\leadsto \color{blue}{x + -2 \cdot \left(wj \cdot x\right)} \]
      6. Step-by-step derivation
        1. associate-*r*N/A

          \[\leadsto x + \color{blue}{\left(-2 \cdot wj\right) \cdot x} \]
        2. distribute-rgt1-inN/A

          \[\leadsto \color{blue}{\left(-2 \cdot wj + 1\right) \cdot x} \]
        3. +-commutativeN/A

          \[\leadsto \color{blue}{\left(1 + -2 \cdot wj\right)} \cdot x \]
        4. lower-*.f64N/A

          \[\leadsto \color{blue}{\left(1 + -2 \cdot wj\right) \cdot x} \]
        5. +-commutativeN/A

          \[\leadsto \color{blue}{\left(-2 \cdot wj + 1\right)} \cdot x \]
        6. lower-fma.f6496.8

          \[\leadsto \color{blue}{\mathsf{fma}\left(-2, wj, 1\right)} \cdot x \]
      7. Applied rewrites96.8%

        \[\leadsto \color{blue}{\mathsf{fma}\left(-2, wj, 1\right) \cdot x} \]

      if -3.9999999999999999e-266 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0

      1. Initial program 5.0%

        \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
      2. Add Preprocessing
      3. Taylor expanded in wj around 0

        \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
      4. Applied rewrites100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
      5. Taylor expanded in wj around 0

        \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
      6. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \color{blue}{wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right) + x} \]
      7. Applied rewrites100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(wj + x \cdot \mathsf{fma}\left(2.5, wj, -2\right), wj, x\right)} \]
      8. Taylor expanded in x around 0

        \[\leadsto {wj}^{\color{blue}{2}} \]
      9. Step-by-step derivation
        1. Applied rewrites58.0%

          \[\leadsto wj \cdot \color{blue}{wj} \]

        if 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj)))))

        1. Initial program 94.3%

          \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
        2. Add Preprocessing
        3. Taylor expanded in wj around 0

          \[\leadsto \color{blue}{x + -2 \cdot \left(wj \cdot x\right)} \]
        4. Step-by-step derivation
          1. +-commutativeN/A

            \[\leadsto \color{blue}{-2 \cdot \left(wj \cdot x\right) + x} \]
          2. *-commutativeN/A

            \[\leadsto \color{blue}{\left(wj \cdot x\right) \cdot -2} + x \]
          3. lower-fma.f64N/A

            \[\leadsto \color{blue}{\mathsf{fma}\left(wj \cdot x, -2, x\right)} \]
          4. *-commutativeN/A

            \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot wj}, -2, x\right) \]
          5. lower-*.f6486.1

            \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot wj}, -2, x\right) \]
        5. Applied rewrites86.1%

          \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot wj, -2, x\right)} \]
        6. Step-by-step derivation
          1. Applied rewrites86.1%

            \[\leadsto \mathsf{fma}\left(-2 \cdot x, \color{blue}{wj}, x\right) \]
        7. Recombined 3 regimes into one program.
        8. Final simplification84.1%

          \[\leadsto \begin{array}{l} \mathbf{if}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq -4 \cdot 10^{-266}:\\ \;\;\;\;\mathsf{fma}\left(-2, wj, 1\right) \cdot x\\ \mathbf{elif}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(-2 \cdot x, wj, x\right)\\ \end{array} \]
        9. Add Preprocessing

        Alternative 3: 84.7% accurate, 0.5× speedup?

        \[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{wj} \cdot wj\\ t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\ t_2 := \mathsf{fma}\left(-2, wj, 1\right) \cdot x\\ \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
        (FPCore (wj x)
         :precision binary64
         (let* ((t_0 (* (exp wj) wj))
                (t_1 (- wj (/ (- t_0 x) (+ t_0 (exp wj)))))
                (t_2 (* (fma -2.0 wj 1.0) x)))
           (if (<= t_1 -4e-266) t_2 (if (<= t_1 0.0) (* wj wj) t_2))))
        double code(double wj, double x) {
        	double t_0 = exp(wj) * wj;
        	double t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)));
        	double t_2 = fma(-2.0, wj, 1.0) * x;
        	double tmp;
        	if (t_1 <= -4e-266) {
        		tmp = t_2;
        	} else if (t_1 <= 0.0) {
        		tmp = wj * wj;
        	} else {
        		tmp = t_2;
        	}
        	return tmp;
        }
        
        function code(wj, x)
        	t_0 = Float64(exp(wj) * wj)
        	t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj))))
        	t_2 = Float64(fma(-2.0, wj, 1.0) * x)
        	tmp = 0.0
        	if (t_1 <= -4e-266)
        		tmp = t_2;
        	elseif (t_1 <= 0.0)
        		tmp = Float64(wj * wj);
        	else
        		tmp = t_2;
        	end
        	return tmp
        end
        
        code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(-2.0 * wj + 1.0), $MachinePrecision] * x), $MachinePrecision]}, If[LessEqual[t$95$1, -4e-266], t$95$2, If[LessEqual[t$95$1, 0.0], N[(wj * wj), $MachinePrecision], t$95$2]]]]]
        
        \begin{array}{l}
        
        \\
        \begin{array}{l}
        t_0 := e^{wj} \cdot wj\\
        t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\
        t_2 := \mathsf{fma}\left(-2, wj, 1\right) \cdot x\\
        \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\
        \;\;\;\;t\_2\\
        
        \mathbf{elif}\;t\_1 \leq 0:\\
        \;\;\;\;wj \cdot wj\\
        
        \mathbf{else}:\\
        \;\;\;\;t\_2\\
        
        
        \end{array}
        \end{array}
        
        Derivation
        1. Split input into 2 regimes
        2. if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -3.9999999999999999e-266 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj)))))

          1. Initial program 95.3%

            \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
          2. Add Preprocessing
          3. Taylor expanded in wj around 0

            \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
          4. Applied rewrites92.9%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
          5. Taylor expanded in wj around 0

            \[\leadsto \color{blue}{x + -2 \cdot \left(wj \cdot x\right)} \]
          6. Step-by-step derivation
            1. associate-*r*N/A

              \[\leadsto x + \color{blue}{\left(-2 \cdot wj\right) \cdot x} \]
            2. distribute-rgt1-inN/A

              \[\leadsto \color{blue}{\left(-2 \cdot wj + 1\right) \cdot x} \]
            3. +-commutativeN/A

              \[\leadsto \color{blue}{\left(1 + -2 \cdot wj\right)} \cdot x \]
            4. lower-*.f64N/A

              \[\leadsto \color{blue}{\left(1 + -2 \cdot wj\right) \cdot x} \]
            5. +-commutativeN/A

              \[\leadsto \color{blue}{\left(-2 \cdot wj + 1\right)} \cdot x \]
            6. lower-fma.f6491.2

              \[\leadsto \color{blue}{\mathsf{fma}\left(-2, wj, 1\right)} \cdot x \]
          7. Applied rewrites91.2%

            \[\leadsto \color{blue}{\mathsf{fma}\left(-2, wj, 1\right) \cdot x} \]

          if -3.9999999999999999e-266 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0

          1. Initial program 5.0%

            \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
          2. Add Preprocessing
          3. Taylor expanded in wj around 0

            \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
          4. Applied rewrites100.0%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
          5. Taylor expanded in wj around 0

            \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
          6. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \color{blue}{wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right) + x} \]
          7. Applied rewrites100.0%

            \[\leadsto \color{blue}{\mathsf{fma}\left(wj + x \cdot \mathsf{fma}\left(2.5, wj, -2\right), wj, x\right)} \]
          8. Taylor expanded in x around 0

            \[\leadsto {wj}^{\color{blue}{2}} \]
          9. Step-by-step derivation
            1. Applied rewrites58.0%

              \[\leadsto wj \cdot \color{blue}{wj} \]
          10. Recombined 2 regimes into one program.
          11. Final simplification84.1%

            \[\leadsto \begin{array}{l} \mathbf{if}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq -4 \cdot 10^{-266}:\\ \;\;\;\;\mathsf{fma}\left(-2, wj, 1\right) \cdot x\\ \mathbf{elif}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(-2, wj, 1\right) \cdot x\\ \end{array} \]
          12. Add Preprocessing

          Alternative 4: 82.0% accurate, 0.5× speedup?

          \[\begin{array}{l} \\ \begin{array}{l} t_0 := e^{wj} \cdot wj\\ t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\ t_2 := wj - \left(-x\right)\\ \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\ \;\;\;\;t\_2\\ \mathbf{elif}\;t\_1 \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;t\_2\\ \end{array} \end{array} \]
          (FPCore (wj x)
           :precision binary64
           (let* ((t_0 (* (exp wj) wj))
                  (t_1 (- wj (/ (- t_0 x) (+ t_0 (exp wj)))))
                  (t_2 (- wj (- x))))
             (if (<= t_1 -4e-266) t_2 (if (<= t_1 0.0) (* wj wj) t_2))))
          double code(double wj, double x) {
          	double t_0 = exp(wj) * wj;
          	double t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)));
          	double t_2 = wj - -x;
          	double tmp;
          	if (t_1 <= -4e-266) {
          		tmp = t_2;
          	} else if (t_1 <= 0.0) {
          		tmp = wj * wj;
          	} else {
          		tmp = t_2;
          	}
          	return tmp;
          }
          
          real(8) function code(wj, x)
              real(8), intent (in) :: wj
              real(8), intent (in) :: x
              real(8) :: t_0
              real(8) :: t_1
              real(8) :: t_2
              real(8) :: tmp
              t_0 = exp(wj) * wj
              t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)))
              t_2 = wj - -x
              if (t_1 <= (-4d-266)) then
                  tmp = t_2
              else if (t_1 <= 0.0d0) then
                  tmp = wj * wj
              else
                  tmp = t_2
              end if
              code = tmp
          end function
          
          public static double code(double wj, double x) {
          	double t_0 = Math.exp(wj) * wj;
          	double t_1 = wj - ((t_0 - x) / (t_0 + Math.exp(wj)));
          	double t_2 = wj - -x;
          	double tmp;
          	if (t_1 <= -4e-266) {
          		tmp = t_2;
          	} else if (t_1 <= 0.0) {
          		tmp = wj * wj;
          	} else {
          		tmp = t_2;
          	}
          	return tmp;
          }
          
          def code(wj, x):
          	t_0 = math.exp(wj) * wj
          	t_1 = wj - ((t_0 - x) / (t_0 + math.exp(wj)))
          	t_2 = wj - -x
          	tmp = 0
          	if t_1 <= -4e-266:
          		tmp = t_2
          	elif t_1 <= 0.0:
          		tmp = wj * wj
          	else:
          		tmp = t_2
          	return tmp
          
          function code(wj, x)
          	t_0 = Float64(exp(wj) * wj)
          	t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj))))
          	t_2 = Float64(wj - Float64(-x))
          	tmp = 0.0
          	if (t_1 <= -4e-266)
          		tmp = t_2;
          	elseif (t_1 <= 0.0)
          		tmp = Float64(wj * wj);
          	else
          		tmp = t_2;
          	end
          	return tmp
          end
          
          function tmp_2 = code(wj, x)
          	t_0 = exp(wj) * wj;
          	t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)));
          	t_2 = wj - -x;
          	tmp = 0.0;
          	if (t_1 <= -4e-266)
          		tmp = t_2;
          	elseif (t_1 <= 0.0)
          		tmp = wj * wj;
          	else
          		tmp = t_2;
          	end
          	tmp_2 = tmp;
          end
          
          code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(wj - (-x)), $MachinePrecision]}, If[LessEqual[t$95$1, -4e-266], t$95$2, If[LessEqual[t$95$1, 0.0], N[(wj * wj), $MachinePrecision], t$95$2]]]]]
          
          \begin{array}{l}
          
          \\
          \begin{array}{l}
          t_0 := e^{wj} \cdot wj\\
          t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\
          t_2 := wj - \left(-x\right)\\
          \mathbf{if}\;t\_1 \leq -4 \cdot 10^{-266}:\\
          \;\;\;\;t\_2\\
          
          \mathbf{elif}\;t\_1 \leq 0:\\
          \;\;\;\;wj \cdot wj\\
          
          \mathbf{else}:\\
          \;\;\;\;t\_2\\
          
          
          \end{array}
          \end{array}
          
          Derivation
          1. Split input into 2 regimes
          2. if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -3.9999999999999999e-266 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj)))))

            1. Initial program 95.3%

              \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
            2. Add Preprocessing
            3. Taylor expanded in wj around 0

              \[\leadsto wj - \color{blue}{-1 \cdot x} \]
            4. Step-by-step derivation
              1. mul-1-negN/A

                \[\leadsto wj - \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
              2. lower-neg.f6487.3

                \[\leadsto wj - \color{blue}{\left(-x\right)} \]
            5. Applied rewrites87.3%

              \[\leadsto wj - \color{blue}{\left(-x\right)} \]

            if -3.9999999999999999e-266 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0

            1. Initial program 5.0%

              \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
            2. Add Preprocessing
            3. Taylor expanded in wj around 0

              \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
            4. Applied rewrites100.0%

              \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
            5. Taylor expanded in wj around 0

              \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
            6. Step-by-step derivation
              1. +-commutativeN/A

                \[\leadsto \color{blue}{wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right) + x} \]
            7. Applied rewrites100.0%

              \[\leadsto \color{blue}{\mathsf{fma}\left(wj + x \cdot \mathsf{fma}\left(2.5, wj, -2\right), wj, x\right)} \]
            8. Taylor expanded in x around 0

              \[\leadsto {wj}^{\color{blue}{2}} \]
            9. Step-by-step derivation
              1. Applied rewrites58.0%

                \[\leadsto wj \cdot \color{blue}{wj} \]
            10. Recombined 2 regimes into one program.
            11. Final simplification81.0%

              \[\leadsto \begin{array}{l} \mathbf{if}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq -4 \cdot 10^{-266}:\\ \;\;\;\;wj - \left(-x\right)\\ \mathbf{elif}\;wj - \frac{e^{wj} \cdot wj - x}{e^{wj} \cdot wj + e^{wj}} \leq 0:\\ \;\;\;\;wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;wj - \left(-x\right)\\ \end{array} \]
            12. Add Preprocessing

            Alternative 5: 97.6% accurate, 6.6× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;wj \leq 0.0185:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \end{array} \]
            (FPCore (wj x)
             :precision binary64
             (if (<= wj 0.0185)
               (fma
                (fma
                 (fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
                 wj
                 (* -2.0 x))
                wj
                x)
               (- wj (/ wj (- wj -1.0)))))
            double code(double wj, double x) {
            	double tmp;
            	if (wj <= 0.0185) {
            		tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
            	} else {
            		tmp = wj - (wj / (wj - -1.0));
            	}
            	return tmp;
            }
            
            function code(wj, x)
            	tmp = 0.0
            	if (wj <= 0.0185)
            		tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x);
            	else
            		tmp = Float64(wj - Float64(wj / Float64(wj - -1.0)));
            	end
            	return tmp
            end
            
            code[wj_, x_] := If[LessEqual[wj, 0.0185], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            \mathbf{if}\;wj \leq 0.0185:\\
            \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
            
            \mathbf{else}:\\
            \;\;\;\;wj - \frac{wj}{wj - -1}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if wj < 0.0184999999999999991

              1. Initial program 76.5%

                \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
              2. Add Preprocessing
              3. Taylor expanded in wj around 0

                \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
              4. Applied rewrites96.9%

                \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]

              if 0.0184999999999999991 < wj

              1. Initial program 55.8%

                \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
              2. Add Preprocessing
              3. Taylor expanded in x around 0

                \[\leadsto wj - \color{blue}{\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]
              4. Step-by-step derivation
                1. distribute-rgt1-inN/A

                  \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]
                2. +-commutativeN/A

                  \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(1 + wj\right)} \cdot e^{wj}} \]
                3. times-fracN/A

                  \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj} \cdot \frac{e^{wj}}{e^{wj}}} \]
                4. *-inversesN/A

                  \[\leadsto wj - \frac{wj}{1 + wj} \cdot \color{blue}{1} \]
                5. associate-*l/N/A

                  \[\leadsto wj - \color{blue}{\frac{wj \cdot 1}{1 + wj}} \]
                6. *-rgt-identityN/A

                  \[\leadsto wj - \frac{\color{blue}{wj}}{1 + wj} \]
                7. lower-/.f64N/A

                  \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                8. lower-+.f6484.7

                  \[\leadsto wj - \frac{wj}{\color{blue}{1 + wj}} \]
              5. Applied rewrites84.7%

                \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
            3. Recombined 2 regimes into one program.
            4. Final simplification96.6%

              \[\leadsto \begin{array}{l} \mathbf{if}\;wj \leq 0.0185:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \]
            5. Add Preprocessing

            Alternative 6: 97.5% accurate, 7.0× speedup?

            \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;wj \leq 0.0185:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), -2\right) \cdot x, wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \end{array} \]
            (FPCore (wj x)
             :precision binary64
             (if (<= wj 0.0185)
               (fma
                (* (fma wj (+ (/ (- 1.0 wj) x) (fma -2.6666666666666665 wj 2.5)) -2.0) x)
                wj
                x)
               (- wj (/ wj (- wj -1.0)))))
            double code(double wj, double x) {
            	double tmp;
            	if (wj <= 0.0185) {
            		tmp = fma((fma(wj, (((1.0 - wj) / x) + fma(-2.6666666666666665, wj, 2.5)), -2.0) * x), wj, x);
            	} else {
            		tmp = wj - (wj / (wj - -1.0));
            	}
            	return tmp;
            }
            
            function code(wj, x)
            	tmp = 0.0
            	if (wj <= 0.0185)
            		tmp = fma(Float64(fma(wj, Float64(Float64(Float64(1.0 - wj) / x) + fma(-2.6666666666666665, wj, 2.5)), -2.0) * x), wj, x);
            	else
            		tmp = Float64(wj - Float64(wj / Float64(wj - -1.0)));
            	end
            	return tmp
            end
            
            code[wj_, x_] := If[LessEqual[wj, 0.0185], N[(N[(N[(wj * N[(N[(N[(1.0 - wj), $MachinePrecision] / x), $MachinePrecision] + N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * x), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
            
            \begin{array}{l}
            
            \\
            \begin{array}{l}
            \mathbf{if}\;wj \leq 0.0185:\\
            \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), -2\right) \cdot x, wj, x\right)\\
            
            \mathbf{else}:\\
            \;\;\;\;wj - \frac{wj}{wj - -1}\\
            
            
            \end{array}
            \end{array}
            
            Derivation
            1. Split input into 2 regimes
            2. if wj < 0.0184999999999999991

              1. Initial program 76.5%

                \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
              2. Add Preprocessing
              3. Taylor expanded in wj around 0

                \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
              4. Applied rewrites96.9%

                \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
              5. Taylor expanded in x around inf

                \[\leadsto \mathsf{fma}\left(x \cdot \left(\left(wj \cdot \left(\frac{5}{2} - \frac{8}{3} \cdot wj\right) + \frac{wj \cdot \left(1 - wj\right)}{x}\right) - 2\right), wj, x\right) \]
              6. Step-by-step derivation
                1. Applied rewrites96.9%

                  \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(wj, \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right) + \frac{1 - wj}{x}, -2\right) \cdot x, wj, x\right) \]

                if 0.0184999999999999991 < wj

                1. Initial program 55.8%

                  \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                2. Add Preprocessing
                3. Taylor expanded in x around 0

                  \[\leadsto wj - \color{blue}{\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]
                4. Step-by-step derivation
                  1. distribute-rgt1-inN/A

                    \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]
                  2. +-commutativeN/A

                    \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(1 + wj\right)} \cdot e^{wj}} \]
                  3. times-fracN/A

                    \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj} \cdot \frac{e^{wj}}{e^{wj}}} \]
                  4. *-inversesN/A

                    \[\leadsto wj - \frac{wj}{1 + wj} \cdot \color{blue}{1} \]
                  5. associate-*l/N/A

                    \[\leadsto wj - \color{blue}{\frac{wj \cdot 1}{1 + wj}} \]
                  6. *-rgt-identityN/A

                    \[\leadsto wj - \frac{\color{blue}{wj}}{1 + wj} \]
                  7. lower-/.f64N/A

                    \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                  8. lower-+.f6484.7

                    \[\leadsto wj - \frac{wj}{\color{blue}{1 + wj}} \]
                5. Applied rewrites84.7%

                  \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
              7. Recombined 2 regimes into one program.
              8. Final simplification96.5%

                \[\leadsto \begin{array}{l} \mathbf{if}\;wj \leq 0.0185:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), -2\right) \cdot x, wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \]
              9. Add Preprocessing

              Alternative 7: 96.9% accurate, 13.2× speedup?

              \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;wj \leq 0.015:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \end{array} \]
              (FPCore (wj x)
               :precision binary64
               (if (<= wj 0.015)
                 (fma (fma (fma 2.5 wj -2.0) x wj) wj x)
                 (- wj (/ wj (- wj -1.0)))))
              double code(double wj, double x) {
              	double tmp;
              	if (wj <= 0.015) {
              		tmp = fma(fma(fma(2.5, wj, -2.0), x, wj), wj, x);
              	} else {
              		tmp = wj - (wj / (wj - -1.0));
              	}
              	return tmp;
              }
              
              function code(wj, x)
              	tmp = 0.0
              	if (wj <= 0.015)
              		tmp = fma(fma(fma(2.5, wj, -2.0), x, wj), wj, x);
              	else
              		tmp = Float64(wj - Float64(wj / Float64(wj - -1.0)));
              	end
              	return tmp
              end
              
              code[wj_, x_] := If[LessEqual[wj, 0.015], N[(N[(N[(2.5 * wj + -2.0), $MachinePrecision] * x + wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
              
              \begin{array}{l}
              
              \\
              \begin{array}{l}
              \mathbf{if}\;wj \leq 0.015:\\
              \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)\\
              
              \mathbf{else}:\\
              \;\;\;\;wj - \frac{wj}{wj - -1}\\
              
              
              \end{array}
              \end{array}
              
              Derivation
              1. Split input into 2 regimes
              2. if wj < 0.014999999999999999

                1. Initial program 76.5%

                  \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                2. Add Preprocessing
                3. Taylor expanded in wj around 0

                  \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                4. Applied rewrites96.9%

                  \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
                5. Taylor expanded in x around inf

                  \[\leadsto \mathsf{fma}\left(x \cdot \left(\left(wj \cdot \left(\frac{5}{2} - \frac{8}{3} \cdot wj\right) + \frac{wj \cdot \left(1 - wj\right)}{x}\right) - 2\right), wj, x\right) \]
                6. Step-by-step derivation
                  1. Applied rewrites96.9%

                    \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(wj, \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right) + \frac{1 - wj}{x}, -2\right) \cdot x, wj, x\right) \]
                  2. Taylor expanded in wj around 0

                    \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                  3. Applied rewrites96.3%

                    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)} \]

                  if 0.014999999999999999 < wj

                  1. Initial program 55.8%

                    \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                  2. Add Preprocessing
                  3. Taylor expanded in x around 0

                    \[\leadsto wj - \color{blue}{\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]
                  4. Step-by-step derivation
                    1. distribute-rgt1-inN/A

                      \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]
                    2. +-commutativeN/A

                      \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(1 + wj\right)} \cdot e^{wj}} \]
                    3. times-fracN/A

                      \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj} \cdot \frac{e^{wj}}{e^{wj}}} \]
                    4. *-inversesN/A

                      \[\leadsto wj - \frac{wj}{1 + wj} \cdot \color{blue}{1} \]
                    5. associate-*l/N/A

                      \[\leadsto wj - \color{blue}{\frac{wj \cdot 1}{1 + wj}} \]
                    6. *-rgt-identityN/A

                      \[\leadsto wj - \frac{\color{blue}{wj}}{1 + wj} \]
                    7. lower-/.f64N/A

                      \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                    8. lower-+.f6484.7

                      \[\leadsto wj - \frac{wj}{\color{blue}{1 + wj}} \]
                  5. Applied rewrites84.7%

                    \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                7. Recombined 2 regimes into one program.
                8. Final simplification96.0%

                  \[\leadsto \begin{array}{l} \mathbf{if}\;wj \leq 0.015:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \]
                9. Add Preprocessing

                Alternative 8: 96.8% accurate, 13.8× speedup?

                \[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;wj \leq 0.0125:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \end{array} \]
                (FPCore (wj x)
                 :precision binary64
                 (if (<= wj 0.0125) (fma (fma -2.0 x wj) wj x) (- wj (/ wj (- wj -1.0)))))
                double code(double wj, double x) {
                	double tmp;
                	if (wj <= 0.0125) {
                		tmp = fma(fma(-2.0, x, wj), wj, x);
                	} else {
                		tmp = wj - (wj / (wj - -1.0));
                	}
                	return tmp;
                }
                
                function code(wj, x)
                	tmp = 0.0
                	if (wj <= 0.0125)
                		tmp = fma(fma(-2.0, x, wj), wj, x);
                	else
                		tmp = Float64(wj - Float64(wj / Float64(wj - -1.0)));
                	end
                	return tmp
                end
                
                code[wj_, x_] := If[LessEqual[wj, 0.0125], N[(N[(-2.0 * x + wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
                
                \begin{array}{l}
                
                \\
                \begin{array}{l}
                \mathbf{if}\;wj \leq 0.0125:\\
                \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right)\\
                
                \mathbf{else}:\\
                \;\;\;\;wj - \frac{wj}{wj - -1}\\
                
                
                \end{array}
                \end{array}
                
                Derivation
                1. Split input into 2 regimes
                2. if wj < 0.012500000000000001

                  1. Initial program 76.5%

                    \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                  2. Add Preprocessing
                  3. Taylor expanded in wj around 0

                    \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                  4. Applied rewrites96.9%

                    \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
                  5. Taylor expanded in x around inf

                    \[\leadsto \mathsf{fma}\left(x \cdot \left(\left(wj \cdot \left(\frac{5}{2} - \frac{8}{3} \cdot wj\right) + \frac{wj \cdot \left(1 - wj\right)}{x}\right) - 2\right), wj, x\right) \]
                  6. Step-by-step derivation
                    1. Applied rewrites96.9%

                      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(wj, \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right) + \frac{1 - wj}{x}, -2\right) \cdot x, wj, x\right) \]
                    2. Taylor expanded in wj around 0

                      \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                    3. Applied rewrites96.3%

                      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)} \]
                    4. Taylor expanded in wj around 0

                      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right) \]
                    5. Step-by-step derivation
                      1. Applied rewrites95.9%

                        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right) \]

                      if 0.012500000000000001 < wj

                      1. Initial program 55.8%

                        \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                      2. Add Preprocessing
                      3. Taylor expanded in x around 0

                        \[\leadsto wj - \color{blue}{\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]
                      4. Step-by-step derivation
                        1. distribute-rgt1-inN/A

                          \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]
                        2. +-commutativeN/A

                          \[\leadsto wj - \frac{wj \cdot e^{wj}}{\color{blue}{\left(1 + wj\right)} \cdot e^{wj}} \]
                        3. times-fracN/A

                          \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj} \cdot \frac{e^{wj}}{e^{wj}}} \]
                        4. *-inversesN/A

                          \[\leadsto wj - \frac{wj}{1 + wj} \cdot \color{blue}{1} \]
                        5. associate-*l/N/A

                          \[\leadsto wj - \color{blue}{\frac{wj \cdot 1}{1 + wj}} \]
                        6. *-rgt-identityN/A

                          \[\leadsto wj - \frac{\color{blue}{wj}}{1 + wj} \]
                        7. lower-/.f64N/A

                          \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                        8. lower-+.f6484.7

                          \[\leadsto wj - \frac{wj}{\color{blue}{1 + wj}} \]
                      5. Applied rewrites84.7%

                        \[\leadsto wj - \color{blue}{\frac{wj}{1 + wj}} \]
                    6. Recombined 2 regimes into one program.
                    7. Final simplification95.6%

                      \[\leadsto \begin{array}{l} \mathbf{if}\;wj \leq 0.0125:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right)\\ \mathbf{else}:\\ \;\;\;\;wj - \frac{wj}{wj - -1}\\ \end{array} \]
                    8. Add Preprocessing

                    Alternative 9: 95.5% accurate, 25.5× speedup?

                    \[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right) \end{array} \]
                    (FPCore (wj x) :precision binary64 (fma (fma -2.0 x wj) wj x))
                    double code(double wj, double x) {
                    	return fma(fma(-2.0, x, wj), wj, x);
                    }
                    
                    function code(wj, x)
                    	return fma(fma(-2.0, x, wj), wj, x)
                    end
                    
                    code[wj_, x_] := N[(N[(-2.0 * x + wj), $MachinePrecision] * wj + x), $MachinePrecision]
                    
                    \begin{array}{l}
                    
                    \\
                    \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right)
                    \end{array}
                    
                    Derivation
                    1. Initial program 75.9%

                      \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                    2. Add Preprocessing
                    3. Taylor expanded in wj around 0

                      \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                    4. Applied rewrites94.4%

                      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
                    5. Taylor expanded in x around inf

                      \[\leadsto \mathsf{fma}\left(x \cdot \left(\left(wj \cdot \left(\frac{5}{2} - \frac{8}{3} \cdot wj\right) + \frac{wj \cdot \left(1 - wj\right)}{x}\right) - 2\right), wj, x\right) \]
                    6. Step-by-step derivation
                      1. Applied rewrites94.4%

                        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(wj, \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right) + \frac{1 - wj}{x}, -2\right) \cdot x, wj, x\right) \]
                      2. Taylor expanded in wj around 0

                        \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                      3. Applied rewrites94.0%

                        \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)} \]
                      4. Taylor expanded in wj around 0

                        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right) \]
                      5. Step-by-step derivation
                        1. Applied rewrites93.6%

                          \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right) \]
                        2. Add Preprocessing

                        Alternative 10: 14.7% accurate, 55.2× speedup?

                        \[\begin{array}{l} \\ wj \cdot wj \end{array} \]
                        (FPCore (wj x) :precision binary64 (* wj wj))
                        double code(double wj, double x) {
                        	return wj * wj;
                        }
                        
                        real(8) function code(wj, x)
                            real(8), intent (in) :: wj
                            real(8), intent (in) :: x
                            code = wj * wj
                        end function
                        
                        public static double code(double wj, double x) {
                        	return wj * wj;
                        }
                        
                        def code(wj, x):
                        	return wj * wj
                        
                        function code(wj, x)
                        	return Float64(wj * wj)
                        end
                        
                        function tmp = code(wj, x)
                        	tmp = wj * wj;
                        end
                        
                        code[wj_, x_] := N[(wj * wj), $MachinePrecision]
                        
                        \begin{array}{l}
                        
                        \\
                        wj \cdot wj
                        \end{array}
                        
                        Derivation
                        1. Initial program 75.9%

                          \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                        2. Add Preprocessing
                        3. Taylor expanded in wj around 0

                          \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(\left(1 + -1 \cdot \left(wj \cdot \left(1 + \left(-3 \cdot x + \left(-2 \cdot \left(-4 \cdot x + \frac{3}{2} \cdot x\right) + \frac{2}{3} \cdot x\right)\right)\right)\right)\right) - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                        4. Applied rewrites94.4%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)} \]
                        5. Taylor expanded in wj around 0

                          \[\leadsto \color{blue}{x + wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right)} \]
                        6. Step-by-step derivation
                          1. +-commutativeN/A

                            \[\leadsto \color{blue}{wj \cdot \left(wj \cdot \left(1 - \left(-4 \cdot x + \frac{3}{2} \cdot x\right)\right) - 2 \cdot x\right) + x} \]
                        7. Applied rewrites94.0%

                          \[\leadsto \color{blue}{\mathsf{fma}\left(wj + x \cdot \mathsf{fma}\left(2.5, wj, -2\right), wj, x\right)} \]
                        8. Taylor expanded in x around 0

                          \[\leadsto {wj}^{\color{blue}{2}} \]
                        9. Step-by-step derivation
                          1. Applied rewrites15.8%

                            \[\leadsto wj \cdot \color{blue}{wj} \]
                          2. Add Preprocessing

                          Alternative 11: 4.2% accurate, 82.8× speedup?

                          \[\begin{array}{l} \\ -1 + wj \end{array} \]
                          (FPCore (wj x) :precision binary64 (+ -1.0 wj))
                          double code(double wj, double x) {
                          	return -1.0 + wj;
                          }
                          
                          real(8) function code(wj, x)
                              real(8), intent (in) :: wj
                              real(8), intent (in) :: x
                              code = (-1.0d0) + wj
                          end function
                          
                          public static double code(double wj, double x) {
                          	return -1.0 + wj;
                          }
                          
                          def code(wj, x):
                          	return -1.0 + wj
                          
                          function code(wj, x)
                          	return Float64(-1.0 + wj)
                          end
                          
                          function tmp = code(wj, x)
                          	tmp = -1.0 + wj;
                          end
                          
                          code[wj_, x_] := N[(-1.0 + wj), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          -1 + wj
                          \end{array}
                          
                          Derivation
                          1. Initial program 75.9%

                            \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                          2. Add Preprocessing
                          3. Taylor expanded in wj around inf

                            \[\leadsto \color{blue}{wj \cdot \left(1 - \frac{1}{wj}\right)} \]
                          4. Step-by-step derivation
                            1. sub-negN/A

                              \[\leadsto wj \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{1}{wj}\right)\right)\right)} \]
                            2. +-commutativeN/A

                              \[\leadsto wj \cdot \color{blue}{\left(\left(\mathsf{neg}\left(\frac{1}{wj}\right)\right) + 1\right)} \]
                            3. distribute-lft-inN/A

                              \[\leadsto \color{blue}{wj \cdot \left(\mathsf{neg}\left(\frac{1}{wj}\right)\right) + wj \cdot 1} \]
                            4. distribute-rgt-neg-outN/A

                              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(wj \cdot \frac{1}{wj}\right)\right)} + wj \cdot 1 \]
                            5. rgt-mult-inverseN/A

                              \[\leadsto \left(\mathsf{neg}\left(\color{blue}{1}\right)\right) + wj \cdot 1 \]
                            6. metadata-evalN/A

                              \[\leadsto \color{blue}{-1} + wj \cdot 1 \]
                            7. *-rgt-identityN/A

                              \[\leadsto -1 + \color{blue}{wj} \]
                            8. lower-+.f644.3

                              \[\leadsto \color{blue}{-1 + wj} \]
                          5. Applied rewrites4.3%

                            \[\leadsto \color{blue}{-1 + wj} \]
                          6. Add Preprocessing

                          Alternative 12: 3.3% accurate, 331.0× speedup?

                          \[\begin{array}{l} \\ -1 \end{array} \]
                          (FPCore (wj x) :precision binary64 -1.0)
                          double code(double wj, double x) {
                          	return -1.0;
                          }
                          
                          real(8) function code(wj, x)
                              real(8), intent (in) :: wj
                              real(8), intent (in) :: x
                              code = -1.0d0
                          end function
                          
                          public static double code(double wj, double x) {
                          	return -1.0;
                          }
                          
                          def code(wj, x):
                          	return -1.0
                          
                          function code(wj, x)
                          	return -1.0
                          end
                          
                          function tmp = code(wj, x)
                          	tmp = -1.0;
                          end
                          
                          code[wj_, x_] := -1.0
                          
                          \begin{array}{l}
                          
                          \\
                          -1
                          \end{array}
                          
                          Derivation
                          1. Initial program 75.9%

                            \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
                          2. Add Preprocessing
                          3. Taylor expanded in wj around inf

                            \[\leadsto \color{blue}{wj \cdot \left(1 - \frac{1}{wj}\right)} \]
                          4. Step-by-step derivation
                            1. sub-negN/A

                              \[\leadsto wj \cdot \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{1}{wj}\right)\right)\right)} \]
                            2. +-commutativeN/A

                              \[\leadsto wj \cdot \color{blue}{\left(\left(\mathsf{neg}\left(\frac{1}{wj}\right)\right) + 1\right)} \]
                            3. distribute-lft-inN/A

                              \[\leadsto \color{blue}{wj \cdot \left(\mathsf{neg}\left(\frac{1}{wj}\right)\right) + wj \cdot 1} \]
                            4. distribute-rgt-neg-outN/A

                              \[\leadsto \color{blue}{\left(\mathsf{neg}\left(wj \cdot \frac{1}{wj}\right)\right)} + wj \cdot 1 \]
                            5. rgt-mult-inverseN/A

                              \[\leadsto \left(\mathsf{neg}\left(\color{blue}{1}\right)\right) + wj \cdot 1 \]
                            6. metadata-evalN/A

                              \[\leadsto \color{blue}{-1} + wj \cdot 1 \]
                            7. *-rgt-identityN/A

                              \[\leadsto -1 + \color{blue}{wj} \]
                            8. lower-+.f644.3

                              \[\leadsto \color{blue}{-1 + wj} \]
                          5. Applied rewrites4.3%

                            \[\leadsto \color{blue}{-1 + wj} \]
                          6. Taylor expanded in wj around 0

                            \[\leadsto -1 \]
                          7. Step-by-step derivation
                            1. Applied rewrites3.1%

                              \[\leadsto -1 \]
                            2. Add Preprocessing

                            Developer Target 1: 78.6% accurate, 1.4× speedup?

                            \[\begin{array}{l} \\ wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right) \end{array} \]
                            (FPCore (wj x)
                             :precision binary64
                             (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
                            double code(double wj, double x) {
                            	return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
                            }
                            
                            real(8) function code(wj, x)
                                real(8), intent (in) :: wj
                                real(8), intent (in) :: x
                                code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
                            end function
                            
                            public static double code(double wj, double x) {
                            	return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
                            }
                            
                            def code(wj, x):
                            	return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
                            
                            function code(wj, x)
                            	return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj))))))
                            end
                            
                            function tmp = code(wj, x)
                            	tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
                            end
                            
                            code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
                            \end{array}
                            

                            Reproduce

                            ?
                            herbie shell --seed 2024332 
                            (FPCore (wj x)
                              :name "Jmat.Real.lambertw, newton loop step"
                              :precision binary64
                            
                              :alt
                              (! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
                            
                              (- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))