(FPCore (wj x) :precision binary64 (- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))) (t_1 (/ x (* (exp wj) (+ wj 1.0)))))
(if (<= (- wj (/ (- t_0 x) (+ (exp wj) t_0))) 6.460728134164632e-9)
(fma 1.0 t_1 (- (- (fma wj wj (pow wj 4.0)) (pow wj 5.0)) (pow wj 3.0)))
(fma 1.0 t_1 (- (fma wj (/ 1.0 (+ wj 1.0)) (- wj)))))))double code(double wj, double x) {
return wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))));
}
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = x / (exp(wj) * (wj + 1.0));
double tmp;
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 6.460728134164632e-9) {
tmp = fma(1.0, t_1, ((fma(wj, wj, pow(wj, 4.0)) - pow(wj, 5.0)) - pow(wj, 3.0)));
} else {
tmp = fma(1.0, t_1, -fma(wj, (1.0 / (wj + 1.0)), -wj));
}
return tmp;
}
function code(wj, x) return Float64(wj - Float64(Float64(Float64(wj * exp(wj)) - x) / Float64(exp(wj) + Float64(wj * exp(wj))))) end
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) <= 6.460728134164632e-9) tmp = fma(1.0, t_1, Float64(Float64(fma(wj, wj, (wj ^ 4.0)) - (wj ^ 5.0)) - (wj ^ 3.0))); else tmp = fma(1.0, t_1, Float64(-fma(wj, Float64(1.0 / Float64(wj + 1.0)), Float64(-wj)))); end return tmp end
code[wj_, x_] := N[(wj - N[(N[(N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 6.460728134164632e-9], N[(1.0 * t$95$1 + N[(N[(N[(wj * wj + N[Power[wj, 4.0], $MachinePrecision]), $MachinePrecision] - N[Power[wj, 5.0], $MachinePrecision]), $MachinePrecision] - N[Power[wj, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(1.0 * t$95$1 + (-N[(wj * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + (-wj)), $MachinePrecision])), $MachinePrecision]]]]
wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := \frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{if}\;wj - \frac{t_0 - x}{e^{wj} + t_0} \leq 6.460728134164632 \cdot 10^{-9}:\\
\;\;\;\;\mathsf{fma}\left(1, t_1, \left(\mathsf{fma}\left(wj, wj, {wj}^{4}\right) - {wj}^{5}\right) - {wj}^{3}\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(1, t_1, -\mathsf{fma}\left(wj, \frac{1}{wj + 1}, -wj\right)\right)\\
\end{array}




Bits error versus wj




Bits error versus x
| Original | 13.5 |
|---|---|
| Target | 12.9 |
| Herbie | 0.2 |
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 6.4607281341646e-9Initial program 17.8
Simplified17.8
Applied egg-rr9.3
Taylor expanded in wj around 0 0.2
Simplified0.2
if 6.4607281341646e-9 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 2.2
Simplified0.2
Applied egg-rr0.2
Applied egg-rr0.2
Final simplification0.2
herbie shell --seed 2022150
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))