
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 4e-26)
(fma
wj
(* x (fma wj (+ (/ (- 1.0 wj) x) (fma wj -2.6666666666666665 2.5)) -2.0))
x)
(fma (- (/ wj (fma x wj x)) (/ (exp (- wj)) (+ wj 1.0))) (- x) wj))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-26) {
tmp = fma(wj, (x * fma(wj, (((1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x);
} else {
tmp = fma(((wj / fma(x, wj, x)) - (exp(-wj) / (wj + 1.0))), -x, wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 4e-26) tmp = fma(wj, Float64(x * fma(wj, Float64(Float64(Float64(1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x); else tmp = fma(Float64(Float64(wj / fma(x, wj, x)) - Float64(exp(Float64(-wj)) / Float64(wj + 1.0))), Float64(-x), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-26], N[(wj * N[(x * N[(wj * N[(N[(N[(1.0 - wj), $MachinePrecision] / x), $MachinePrecision] + N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(N[(N[(wj / N[(x * wj + x), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * (-x) + wj), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 4 \cdot 10^{-26}:\\
\;\;\;\;\mathsf{fma}\left(wj, x \cdot \mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{wj}{\mathsf{fma}\left(x, wj, x\right)} - \frac{e^{-wj}}{wj + 1}, -x, wj\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4.0000000000000002e-26Initial program 64.4%
Taylor expanded in wj around 0
Simplified98.2%
Taylor expanded in x around inf
*-lowering-*.f64N/A
sub-negN/A
+-commutativeN/A
associate-/l*N/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-lft-outN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6498.7
Simplified98.7%
if 4.0000000000000002e-26 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 93.8%
Taylor expanded in x around inf
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
unsub-negN/A
mul-1-negN/A
+-commutativeN/A
Simplified99.3%
sub-negN/A
+-commutativeN/A
*-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.3%
Final simplification98.9%
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj))) (t_1 (+ wj (/ (- x t_0) (+ (exp wj) t_0))))) (if (<= t_1 -1e-303) x (if (<= t_1 0.0) (* wj wj) x))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = wj + ((x - t_0) / (exp(wj) + t_0));
double tmp;
if (t_1 <= -1e-303) {
tmp = x;
} else if (t_1 <= 0.0) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = wj + ((x - t_0) / (exp(wj) + t_0))
if (t_1 <= (-1d-303)) then
tmp = x
else if (t_1 <= 0.0d0) then
tmp = wj * wj
else
tmp = x
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = wj + ((x - t_0) / (Math.exp(wj) + t_0));
double tmp;
if (t_1 <= -1e-303) {
tmp = x;
} else if (t_1 <= 0.0) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = wj + ((x - t_0) / (math.exp(wj) + t_0)) tmp = 0 if t_1 <= -1e-303: tmp = x elif t_1 <= 0.0: tmp = wj * wj else: tmp = x return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) tmp = 0.0 if (t_1 <= -1e-303) tmp = x; elseif (t_1 <= 0.0) tmp = Float64(wj * wj); else tmp = x; end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = wj + ((x - t_0) / (exp(wj) + t_0)); tmp = 0.0; if (t_1 <= -1e-303) tmp = x; elseif (t_1 <= 0.0) tmp = wj * wj; else tmp = x; end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -1e-303], x, If[LessEqual[t$95$1, 0.0], N[(wj * wj), $MachinePrecision], x]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := wj + \frac{x - t\_0}{e^{wj} + t\_0}\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{-303}:\\
\;\;\;\;x\\
\mathbf{elif}\;t\_1 \leq 0:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -9.99999999999999931e-304 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 94.7%
Taylor expanded in wj around 0
Simplified91.4%
if -9.99999999999999931e-304 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0Initial program 5.6%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f645.6
Simplified5.6%
Taylor expanded in wj around 0
unpow2N/A
*-lowering-*.f6458.4
Simplified58.4%
Final simplification84.1%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 4e-26)
(fma
wj
(* x (fma wj (+ (/ (- 1.0 wj) x) (fma wj -2.6666666666666665 2.5)) -2.0))
x)
(- wj (fma wj (/ 1.0 (+ wj 1.0)) (/ x (* (exp wj) (- -1.0 wj))))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-26) {
tmp = fma(wj, (x * fma(wj, (((1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x);
} else {
tmp = wj - fma(wj, (1.0 / (wj + 1.0)), (x / (exp(wj) * (-1.0 - wj))));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 4e-26) tmp = fma(wj, Float64(x * fma(wj, Float64(Float64(Float64(1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x); else tmp = Float64(wj - fma(wj, Float64(1.0 / Float64(wj + 1.0)), Float64(x / Float64(exp(wj) * Float64(-1.0 - wj))))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-26], N[(wj * N[(x * N[(wj * N[(N[(N[(1.0 - wj), $MachinePrecision] / x), $MachinePrecision] + N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj - N[(wj * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 4 \cdot 10^{-26}:\\
\;\;\;\;\mathsf{fma}\left(wj, x \cdot \mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \mathsf{fma}\left(wj, \frac{1}{wj + 1}, \frac{x}{e^{wj} \cdot \left(-1 - wj\right)}\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4.0000000000000002e-26Initial program 64.4%
Taylor expanded in wj around 0
Simplified98.2%
Taylor expanded in x around inf
*-lowering-*.f64N/A
sub-negN/A
+-commutativeN/A
associate-/l*N/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-lft-outN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6498.7
Simplified98.7%
if 4.0000000000000002e-26 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 93.8%
div-subN/A
sub-negN/A
associate-/l*N/A
distribute-rgt1-inN/A
*-commutativeN/A
associate-/r*N/A
*-inversesN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
neg-lowering-neg.f64N/A
distribute-rgt1-inN/A
*-commutativeN/A
*-lowering-*.f64N/A
exp-lowering-exp.f64N/A
+-lowering-+.f6499.3
Applied egg-rr99.3%
Final simplification98.9%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.00086)
(fma
wj
(* x (fma wj (+ (/ (- 1.0 wj) x) (fma wj -2.6666666666666665 2.5)) -2.0))
x)
(fma (- wj) (/ 1.0 (+ wj 1.0)) wj)))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00086) {
tmp = fma(wj, (x * fma(wj, (((1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x);
} else {
tmp = fma(-wj, (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.00086) tmp = fma(wj, Float64(x * fma(wj, Float64(Float64(Float64(1.0 - wj) / x) + fma(wj, -2.6666666666666665, 2.5)), -2.0)), x); else tmp = fma(Float64(-wj), Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.00086], N[(wj * N[(x * N[(wj * N[(N[(N[(1.0 - wj), $MachinePrecision] / x), $MachinePrecision] + N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[((-wj) * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00086:\\
\;\;\;\;\mathsf{fma}\left(wj, x \cdot \mathsf{fma}\left(wj, \frac{1 - wj}{x} + \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-wj, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if wj < 8.59999999999999979e-4Initial program 75.9%
Taylor expanded in wj around 0
Simplified97.0%
Taylor expanded in x around inf
*-lowering-*.f64N/A
sub-negN/A
+-commutativeN/A
associate-/l*N/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-lft-outN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f6497.3
Simplified97.3%
if 8.59999999999999979e-4 < wj Initial program 30.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f6496.9
Simplified96.9%
sub-negN/A
+-commutativeN/A
div-invN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-lowering-neg.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f6497.4
Applied egg-rr97.4%
Final simplification97.3%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00038) (fma wj (- wj (* wj wj)) x) (fma (- wj) (/ 1.0 (+ wj 1.0)) wj)))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00038) {
tmp = fma(wj, (wj - (wj * wj)), x);
} else {
tmp = fma(-wj, (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.00038) tmp = fma(wj, Float64(wj - Float64(wj * wj)), x); else tmp = fma(Float64(-wj), Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.00038], N[(wj * N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[((-wj) * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00038:\\
\;\;\;\;\mathsf{fma}\left(wj, wj - wj \cdot wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-wj, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if wj < 3.8000000000000002e-4Initial program 75.9%
Taylor expanded in wj around 0
Simplified97.0%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
distribute-rgt-inN/A
*-lft-identityN/A
neg-mul-1N/A
distribute-lft-neg-inN/A
unpow2N/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6496.8
Simplified96.8%
if 3.8000000000000002e-4 < wj Initial program 30.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f6496.9
Simplified96.9%
sub-negN/A
+-commutativeN/A
div-invN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-lowering-neg.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f6497.4
Applied egg-rr97.4%
Final simplification96.8%
(FPCore (wj x) :precision binary64 (if (<= wj 0.000165) (fma wj (- wj (* wj wj)) x) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.000165) {
tmp = fma(wj, (wj - (wj * wj)), x);
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.000165) tmp = fma(wj, Float64(wj - Float64(wj * wj)), x); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.000165], N[(wj * N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.000165:\\
\;\;\;\;\mathsf{fma}\left(wj, wj - wj \cdot wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 1.65e-4Initial program 75.9%
Taylor expanded in wj around 0
Simplified97.0%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
distribute-rgt-inN/A
*-lft-identityN/A
neg-mul-1N/A
distribute-lft-neg-inN/A
unpow2N/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6496.8
Simplified96.8%
if 1.65e-4 < wj Initial program 30.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f6496.9
Simplified96.9%
Final simplification96.8%
(FPCore (wj x) :precision binary64 (fma wj (- wj (* wj wj)) x))
double code(double wj, double x) {
return fma(wj, (wj - (wj * wj)), x);
}
function code(wj, x) return fma(wj, Float64(wj - Float64(wj * wj)), x) end
code[wj_, x_] := N[(wj * N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj - wj \cdot wj, x\right)
\end{array}
Initial program 74.8%
Taylor expanded in wj around 0
Simplified95.1%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
distribute-rgt-inN/A
*-lft-identityN/A
neg-mul-1N/A
distribute-lft-neg-inN/A
unpow2N/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6494.9
Simplified94.9%
(FPCore (wj x) :precision binary64 (fma wj wj x))
double code(double wj, double x) {
return fma(wj, wj, x);
}
function code(wj, x) return fma(wj, wj, x) end
code[wj_, x_] := N[(wj * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj, x\right)
\end{array}
Initial program 74.8%
Taylor expanded in wj around 0
Simplified95.1%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
distribute-rgt-inN/A
*-lft-identityN/A
neg-mul-1N/A
distribute-lft-neg-inN/A
unpow2N/A
unsub-negN/A
--lowering--.f64N/A
unpow2N/A
*-lowering-*.f6494.9
Simplified94.9%
Taylor expanded in wj around 0
+-commutativeN/A
unpow2N/A
accelerator-lowering-fma.f6494.6
Simplified94.6%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 74.8%
Taylor expanded in wj around 0
Simplified80.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 74.8%
Taylor expanded in wj around inf
Simplified4.3%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 74.8%
Taylor expanded in wj around inf
Simplified4.1%
Taylor expanded in wj around 0
Simplified3.0%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024205
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))