
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 4e-18)
(+
x
(*
wj
(+
(* x -2.0)
(* wj (- 1.0 (+ wj (* x (+ -2.5 (* wj 2.6666666666666665)))))))))
(+ (/ x (* (exp wj) (+ wj 1.0))) (+ wj (/ wj (- -1.0 wj)))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-18) {
tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665))))))));
} else {
tmp = (x / (exp(wj) * (wj + 1.0))) + (wj + (wj / (-1.0 - wj)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj * exp(wj)
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4d-18) then
tmp = x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - (wj + (x * ((-2.5d0) + (wj * 2.6666666666666665d0))))))))
else
tmp = (x / (exp(wj) * (wj + 1.0d0))) + (wj + (wj / ((-1.0d0) - wj)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double tmp;
if ((wj + ((x - t_0) / (Math.exp(wj) + t_0))) <= 4e-18) {
tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665))))))));
} else {
tmp = (x / (Math.exp(wj) * (wj + 1.0))) + (wj + (wj / (-1.0 - wj)));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) tmp = 0 if (wj + ((x - t_0) / (math.exp(wj) + t_0))) <= 4e-18: tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665)))))))) else: tmp = (x / (math.exp(wj) * (wj + 1.0))) + (wj + (wj / (-1.0 - wj))) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 4e-18) tmp = Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - Float64(wj + Float64(x * Float64(-2.5 + Float64(wj * 2.6666666666666665))))))))); else tmp = Float64(Float64(x / Float64(exp(wj) * Float64(wj + 1.0))) + Float64(wj + Float64(wj / Float64(-1.0 - wj)))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); tmp = 0.0; if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-18) tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665)))))))); else tmp = (x / (exp(wj) * (wj + 1.0))) + (wj + (wj / (-1.0 - wj))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-18], N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - N[(wj + N[(x * N[(-2.5 + N[(wj * 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 4 \cdot 10^{-18}:\\
\;\;\;\;x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - \left(wj + x \cdot \left(-2.5 + wj \cdot 2.6666666666666665\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)} + \left(wj + \frac{wj}{-1 - wj}\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4.0000000000000003e-18Initial program 68.3%
Taylor expanded in wj around 0
Simplified99.4%
Taylor expanded in wj around 0
distribute-rgt-inN/A
*-lft-identityN/A
associate--l+N/A
sub-negN/A
*-commutativeN/A
distribute-rgt-outN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
distribute-lft-neg-inN/A
associate-*l*N/A
distribute-neg-inN/A
distribute-rgt-inN/A
+-commutativeN/A
mul-1-negN/A
+-lowering-+.f64N/A
mul-1-negN/A
neg-sub0N/A
Simplified99.4%
if 4.0000000000000003e-18 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 93.5%
Taylor expanded in x around 0
+-commutativeN/A
associate--l+N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
Simplified99.8%
Final simplification99.5%
(FPCore (wj x)
:precision binary64
(+
x
(*
wj
(+
(* x -2.0)
(* wj (- 1.0 (+ wj (* x (+ -2.5 (* wj 2.6666666666666665))))))))))
double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665))))))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - (wj + (x * ((-2.5d0) + (wj * 2.6666666666666665d0))))))))
end function
public static double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665))))))));
}
def code(wj, x): return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665))))))))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - Float64(wj + Float64(x * Float64(-2.5 + Float64(wj * 2.6666666666666665))))))))) end
function tmp = code(wj, x) tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj + (x * (-2.5 + (wj * 2.6666666666666665)))))))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - N[(wj + N[(x * N[(-2.5 + N[(wj * 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - \left(wj + x \cdot \left(-2.5 + wj \cdot 2.6666666666666665\right)\right)\right)\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in wj around 0
distribute-rgt-inN/A
*-lft-identityN/A
associate--l+N/A
sub-negN/A
*-commutativeN/A
distribute-rgt-outN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
distribute-lft-neg-inN/A
associate-*l*N/A
distribute-neg-inN/A
distribute-rgt-inN/A
+-commutativeN/A
mul-1-negN/A
+-lowering-+.f64N/A
mul-1-negN/A
neg-sub0N/A
Simplified97.4%
Final simplification97.4%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ (* x -2.0) (* wj (- 1.0 (- wj (* x 2.5))))))))
double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj - (x * 2.5))))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - (wj - (x * 2.5d0))))))
end function
public static double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj - (x * 2.5))))));
}
def code(wj, x): return x + (wj * ((x * -2.0) + (wj * (1.0 - (wj - (x * 2.5))))))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - Float64(wj - Float64(x * 2.5))))))) end
function tmp = code(wj, x) tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - (wj - (x * 2.5)))))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - N[(wj - N[(x * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - \left(wj - x \cdot 2.5\right)\right)\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in x around 0
Simplified97.2%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ (* x -2.0) (- wj (* wj wj))))))
double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj - (wj * wj))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((x * (-2.0d0)) + (wj - (wj * wj))))
end function
public static double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj - (wj * wj))));
}
def code(wj, x): return x + (wj * ((x * -2.0) + (wj - (wj * wj))))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj - Float64(wj * wj))))) end
function tmp = code(wj, x) tmp = x + (wj * ((x * -2.0) + (wj - (wj * wj)))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(x \cdot -2 + \left(wj - wj \cdot wj\right)\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in x around 0
Simplified97.1%
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f6497.1%
Applied egg-rr97.1%
Final simplification97.1%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ (* x -2.0) (* wj (- 1.0 wj))))))
double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - wj))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - wj))))
end function
public static double code(double wj, double x) {
return x + (wj * ((x * -2.0) + (wj * (1.0 - wj))));
}
def code(wj, x): return x + (wj * ((x * -2.0) + (wj * (1.0 - wj))))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - wj))))) end
function tmp = code(wj, x) tmp = x + (wj * ((x * -2.0) + (wj * (1.0 - wj)))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - wj\right)\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in x around 0
Simplified97.1%
(FPCore (wj x) :precision binary64 (+ x (* wj (- wj (* wj wj)))))
double code(double wj, double x) {
return x + (wj * (wj - (wj * wj)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj - (wj * wj)))
end function
public static double code(double wj, double x) {
return x + (wj * (wj - (wj * wj)));
}
def code(wj, x): return x + (wj * (wj - (wj * wj)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj - Float64(wj * wj)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj - (wj * wj))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj - wj \cdot wj\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f6496.6%
Simplified96.6%
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
neg-sub0N/A
--lowering--.f6496.6%
Applied egg-rr96.6%
Final simplification96.6%
(FPCore (wj x) :precision binary64 (+ x (* wj (* wj (- 1.0 wj)))))
double code(double wj, double x) {
return x + (wj * (wj * (1.0 - wj)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj * (1.0d0 - wj)))
end function
public static double code(double wj, double x) {
return x + (wj * (wj * (1.0 - wj)));
}
def code(wj, x): return x + (wj * (wj * (1.0 - wj)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj * Float64(1.0 - wj)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj * (1.0 - wj))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - wj\right)\right)
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified97.4%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
--lowering--.f6496.6%
Simplified96.6%
(FPCore (wj x) :precision binary64 (+ x (* wj wj)))
double code(double wj, double x) {
return x + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * wj)
end function
public static double code(double wj, double x) {
return x + (wj * wj);
}
def code(wj, x): return x + (wj * wj)
function code(wj, x) return Float64(x + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = x + (wj * wj); end
code[wj_, x_] := N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot wj
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt-outN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
metadata-evalN/A
metadata-eval96.8%
Simplified96.8%
Taylor expanded in x around 0
Simplified96.3%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 76.1%
Taylor expanded in wj around 0
Simplified84.7%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 76.1%
Taylor expanded in wj around inf
Simplified4.7%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024191
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))