
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ t_0 (exp wj)))) 5e-24)
(fma
wj
(fma
wj
(- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj))
(* x -2.0))
x)
(* x (- (- (/ wj x) (/ (exp (- wj)) (- -1.0 wj))) (/ wj (fma wj x x)))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (t_0 + exp(wj)))) <= 5e-24) {
tmp = fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
} else {
tmp = x * (((wj / x) - (exp(-wj) / (-1.0 - wj))) - (wj / fma(wj, x, x)));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(t_0 + exp(wj)))) <= 5e-24) tmp = fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x); else tmp = Float64(x * Float64(Float64(Float64(wj / x) - Float64(exp(Float64(-wj)) / Float64(-1.0 - wj))) - Float64(wj / fma(wj, x, x)))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e-24], N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(x * N[(N[(N[(wj / x), $MachinePrecision] - N[(N[Exp[(-wj)], $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(wj / N[(wj * x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{t\_0 + e^{wj}} \leq 5 \cdot 10^{-24}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\left(\frac{wj}{x} - \frac{e^{-wj}}{-1 - wj}\right) - \frac{wj}{\mathsf{fma}\left(wj, x, x\right)}\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4.9999999999999998e-24Initial program 66.0%
Taylor expanded in wj around 0
Applied rewrites98.9%
if 4.9999999999999998e-24 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 97.1%
Applied rewrites39.1%
Taylor expanded in x around inf
lower-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
associate-/r*N/A
lower-/.f64N/A
rec-expN/A
lower-exp.f64N/A
lower-neg.f64N/A
lower-+.f64N/A
lower-/.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
*-rgt-identityN/A
Applied rewrites99.4%
Final simplification99.0%
(FPCore (wj x) :precision binary64 (fma wj (fma wj (- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj)) (* x -2.0)) x))
double code(double wj, double x) {
return fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
}
function code(wj, x) return fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x) end
code[wj_, x_] := N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)
\end{array}
Initial program 77.9%
Taylor expanded in wj around 0
Applied rewrites96.2%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024219
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))