
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -4e-5)
(+ wj (/ (- wj (/ x (exp wj))) (- -1.0 wj)))
(if (<= wj 0.0017)
(+
x
(*
wj
(+
(* x -2.0)
(*
wj
(+
(+
1.0
(*
wj
(- (- -1.0 (* x 0.6666666666666666)) (+ (* x -3.0) (* x 5.0)))))
(* x 2.5))))))
(+ wj (/ wj (- -1.0 wj))))))
double code(double wj, double x) {
double tmp;
if (wj <= -4e-5) {
tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj));
} else if (wj <= 0.0017) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-4d-5)) then
tmp = wj + ((wj - (x / exp(wj))) / ((-1.0d0) - wj))
else if (wj <= 0.0017d0) then
tmp = x + (wj * ((x * (-2.0d0)) + (wj * ((1.0d0 + (wj * (((-1.0d0) - (x * 0.6666666666666666d0)) - ((x * (-3.0d0)) + (x * 5.0d0))))) + (x * 2.5d0)))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -4e-5) {
tmp = wj + ((wj - (x / Math.exp(wj))) / (-1.0 - wj));
} else if (wj <= 0.0017) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -4e-5: tmp = wj + ((wj - (x / math.exp(wj))) / (-1.0 - wj)) elif wj <= 0.0017: tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -4e-5) tmp = Float64(wj + Float64(Float64(wj - Float64(x / exp(wj))) / Float64(-1.0 - wj))); elseif (wj <= 0.0017) tmp = Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(Float64(-1.0 - Float64(x * 0.6666666666666666)) - Float64(Float64(x * -3.0) + Float64(x * 5.0))))) + Float64(x * 2.5)))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -4e-5) tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj)); elseif (wj <= 0.0017) tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -4e-5], N[(wj + N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 0.0017], N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(N[(1.0 + N[(wj * N[(N[(-1.0 - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision] - N[(N[(x * -3.0), $MachinePrecision] + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4 \cdot 10^{-5}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{-1 - wj}\\
\mathbf{elif}\;wj \leq 0.0017:\\
\;\;\;\;x + wj \cdot \left(x \cdot -2 + wj \cdot \left(\left(1 + wj \cdot \left(\left(-1 - x \cdot 0.6666666666666666\right) - \left(x \cdot -3 + x \cdot 5\right)\right)\right) + x \cdot 2.5\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < -4.00000000000000033e-5Initial program 60.4%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified93.7%
if -4.00000000000000033e-5 < wj < 0.00169999999999999991Initial program 75.2%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified99.9%
if 0.00169999999999999991 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 1e-11)
(+
x
(*
wj
(+
(* x -2.0)
(*
wj
(+
(+
1.0
(*
wj
(- (- -1.0 (* x 0.6666666666666666)) (+ (* x -3.0) (* x 5.0)))))
(* x 2.5))))))
(+ wj (* (/ (- wj (/ x (exp wj))) (- 1.0 (* wj wj))) (+ wj -1.0))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 1e-11) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (((wj - (x / exp(wj))) / (1.0 - (wj * wj))) * (wj + -1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj * exp(wj)
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 1d-11) then
tmp = x + (wj * ((x * (-2.0d0)) + (wj * ((1.0d0 + (wj * (((-1.0d0) - (x * 0.6666666666666666d0)) - ((x * (-3.0d0)) + (x * 5.0d0))))) + (x * 2.5d0)))))
else
tmp = wj + (((wj - (x / exp(wj))) / (1.0d0 - (wj * wj))) * (wj + (-1.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double tmp;
if ((wj + ((x - t_0) / (Math.exp(wj) + t_0))) <= 1e-11) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (((wj - (x / Math.exp(wj))) / (1.0 - (wj * wj))) * (wj + -1.0));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) tmp = 0 if (wj + ((x - t_0) / (math.exp(wj) + t_0))) <= 1e-11: tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))) else: tmp = wj + (((wj - (x / math.exp(wj))) / (1.0 - (wj * wj))) * (wj + -1.0)) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 1e-11) tmp = Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(Float64(-1.0 - Float64(x * 0.6666666666666666)) - Float64(Float64(x * -3.0) + Float64(x * 5.0))))) + Float64(x * 2.5)))))); else tmp = Float64(wj + Float64(Float64(Float64(wj - Float64(x / exp(wj))) / Float64(1.0 - Float64(wj * wj))) * Float64(wj + -1.0))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); tmp = 0.0; if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 1e-11) tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))); else tmp = wj + (((wj - (x / exp(wj))) / (1.0 - (wj * wj))) * (wj + -1.0)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1e-11], N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(N[(1.0 + N[(wj * N[(N[(-1.0 - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision] - N[(N[(x * -3.0), $MachinePrecision] + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(wj + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 10^{-11}:\\
\;\;\;\;x + wj \cdot \left(x \cdot -2 + wj \cdot \left(\left(1 + wj \cdot \left(\left(-1 - x \cdot 0.6666666666666666\right) - \left(x \cdot -3 + x \cdot 5\right)\right)\right) + x \cdot 2.5\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{1 - wj \cdot wj} \cdot \left(wj + -1\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 9.99999999999999939e-12Initial program 66.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.2%
if 9.99999999999999939e-12 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 93.4%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified99.3%
flip--N/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f6499.4%
Applied egg-rr99.4%
Final simplification98.5%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.0146)
(+
x
(*
wj
(+
(* x -2.0)
(*
wj
(+
(+
1.0
(* wj (- (- -1.0 (* x 0.6666666666666666)) (+ (* x -3.0) (* x 5.0)))))
(* x 2.5))))))
(+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.0146) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.0146d0) then
tmp = x + (wj * ((x * (-2.0d0)) + (wj * ((1.0d0 + (wj * (((-1.0d0) - (x * 0.6666666666666666d0)) - ((x * (-3.0d0)) + (x * 5.0d0))))) + (x * 2.5d0)))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.0146) {
tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5)))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.0146: tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.0146) tmp = Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(Float64(-1.0 - Float64(x * 0.6666666666666666)) - Float64(Float64(x * -3.0) + Float64(x * 5.0))))) + Float64(x * 2.5)))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.0146) tmp = x + (wj * ((x * -2.0) + (wj * ((1.0 + (wj * ((-1.0 - (x * 0.6666666666666666)) - ((x * -3.0) + (x * 5.0))))) + (x * 2.5))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.0146], N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(N[(1.0 + N[(wj * N[(N[(-1.0 - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision] - N[(N[(x * -3.0), $MachinePrecision] + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.0146:\\
\;\;\;\;x + wj \cdot \left(x \cdot -2 + wj \cdot \left(\left(1 + wj \cdot \left(\left(-1 - x \cdot 0.6666666666666666\right) - \left(x \cdot -3 + x \cdot 5\right)\right)\right) + x \cdot 2.5\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.0146000000000000001Initial program 74.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.0%
if 0.0146000000000000001 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification98.1%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.038)
(/
1.0
(/
1.0
(+
x
(*
wj
(+
(* x -2.0)
(*
wj
(- 1.0 (- (* wj (+ 1.0 (* x 2.6666666666666665))) (* x 2.5)))))))))
(+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.038) {
tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - ((wj * (1.0 + (x * 2.6666666666666665))) - (x * 2.5))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.038d0) then
tmp = 1.0d0 / (1.0d0 / (x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - ((wj * (1.0d0 + (x * 2.6666666666666665d0))) - (x * 2.5d0))))))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.038) {
tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - ((wj * (1.0 + (x * 2.6666666666666665))) - (x * 2.5))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.038: tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - ((wj * (1.0 + (x * 2.6666666666666665))) - (x * 2.5)))))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.038) tmp = Float64(1.0 / Float64(1.0 / Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - Float64(Float64(wj * Float64(1.0 + Float64(x * 2.6666666666666665))) - Float64(x * 2.5))))))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.038) tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - ((wj * (1.0 + (x * 2.6666666666666665))) - (x * 2.5)))))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.038], N[(1.0 / N[(1.0 / N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - N[(N[(wj * N[(1.0 + N[(x * 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.038:\\
\;\;\;\;\frac{1}{\frac{1}{x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - \left(wj \cdot \left(1 + x \cdot 2.6666666666666665\right) - x \cdot 2.5\right)\right)\right)}}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.0379999999999999991Initial program 74.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.0%
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr58.3%
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
Applied egg-rr97.8%
if 0.0379999999999999991 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification97.9%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00039) (/ 1.0 (/ 1.0 (+ x (* wj (+ (* x -2.0) (* wj (- 1.0 wj))))))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - wj))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00039d0) then
tmp = 1.0d0 / (1.0d0 / (x + (wj * ((x * (-2.0d0)) + (wj * (1.0d0 - wj))))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - wj))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00039: tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - wj)))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00039) tmp = Float64(1.0 / Float64(1.0 / Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(wj * Float64(1.0 - wj))))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00039) tmp = 1.0 / (1.0 / (x + (wj * ((x * -2.0) + (wj * (1.0 - wj)))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00039], N[(1.0 / N[(1.0 / N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00039:\\
\;\;\;\;\frac{1}{\frac{1}{x + wj \cdot \left(x \cdot -2 + wj \cdot \left(1 - wj\right)\right)}}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 3.89999999999999993e-4Initial program 74.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.0%
flip-+N/A
/-lowering-/.f64N/A
Applied egg-rr58.3%
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
Applied egg-rr97.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
--lowering--.f6497.6%
Simplified97.6%
if 3.89999999999999993e-4 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification97.6%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00037) (+ x (* wj (* wj (- 1.0 wj)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00037) {
tmp = x + (wj * (wj * (1.0 - wj)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00037d0) then
tmp = x + (wj * (wj * (1.0d0 - wj)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00037) {
tmp = x + (wj * (wj * (1.0 - wj)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00037: tmp = x + (wj * (wj * (1.0 - wj))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00037) tmp = Float64(x + Float64(wj * Float64(wj * Float64(1.0 - wj)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00037) tmp = x + (wj * (wj * (1.0 - wj))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00037], N[(x + N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00037:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - wj\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 3.6999999999999999e-4Initial program 74.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
--lowering--.f6497.2%
Simplified97.2%
if 3.6999999999999999e-4 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification97.3%
(FPCore (wj x) :precision binary64 (if (<= wj 1.2e-5) (+ x (* wj wj)) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 1.2e-5) {
tmp = x + (wj * wj);
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.2d-5) then
tmp = x + (wj * wj)
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.2e-5) {
tmp = x + (wj * wj);
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.2e-5: tmp = x + (wj * wj) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.2e-5) tmp = Float64(x + Float64(wj * wj)); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.2e-5) tmp = x + (wj * wj); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.2e-5], N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1.2 \cdot 10^{-5}:\\
\;\;\;\;x + wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 1.2e-5Initial program 74.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified98.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
--lowering--.f6497.2%
Simplified97.2%
Taylor expanded in wj around 0
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
*-lowering-*.f6496.1%
Simplified96.1%
if 1.2e-5 < wj Initial program 20.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified100.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64100.0%
Simplified100.0%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (if (<= wj -1.25e-29) (* wj wj) x))
double code(double wj, double x) {
double tmp;
if (wj <= -1.25e-29) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-1.25d-29)) then
tmp = wj * wj
else
tmp = x
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -1.25e-29) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -1.25e-29: tmp = wj * wj else: tmp = x return tmp
function code(wj, x) tmp = 0.0 if (wj <= -1.25e-29) tmp = Float64(wj * wj); else tmp = x; end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -1.25e-29) tmp = wj * wj; else tmp = x; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -1.25e-29], N[(wj * wj), $MachinePrecision], x]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1.25 \cdot 10^{-29}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if wj < -1.24999999999999996e-29Initial program 60.4%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified77.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
--lowering--.f6469.3%
Simplified69.3%
Taylor expanded in wj around inf
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
/-lowering-/.f6449.8%
Simplified49.8%
Taylor expanded in wj around 0
unpow2N/A
*-lowering-*.f6439.6%
Simplified39.6%
if -1.24999999999999996e-29 < wj Initial program 75.0%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified76.7%
Taylor expanded in wj around 0
Simplified88.7%
(FPCore (wj x) :precision binary64 (+ x (* wj wj)))
double code(double wj, double x) {
return x + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * wj)
end function
public static double code(double wj, double x) {
return x + (wj * wj);
}
def code(wj, x): return x + (wj * wj)
function code(wj, x) return Float64(x + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = x + (wj * wj); end
code[wj_, x_] := N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot wj
\end{array}
Initial program 73.8%
Taylor expanded in wj around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
Simplified96.2%
Taylor expanded in x around 0
*-lowering-*.f64N/A
--lowering--.f6495.3%
Simplified95.3%
Taylor expanded in wj around 0
+-commutativeN/A
+-lowering-+.f64N/A
unpow2N/A
*-lowering-*.f6494.4%
Simplified94.4%
Final simplification94.4%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 73.8%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified76.1%
Taylor expanded in wj around 0
Simplified82.8%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 73.8%
sub-negN/A
+-lowering-+.f64N/A
distribute-rgt1-inN/A
associate-/l/N/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
div-subN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
exp-lowering-exp.f64N/A
+-commutativeN/A
*-inversesN/A
distribute-neg-inN/A
Simplified76.1%
Taylor expanded in wj around inf
Simplified4.5%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 73.8%
Taylor expanded in wj around inf
Simplified4.5%
Taylor expanded in wj around 0
Simplified3.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024139
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))