Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}
\]
↓
\[\begin{array}{l}
\mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\
\;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\
\end{array}
\]
(FPCore (wj x)
:precision binary64
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj)))))) ↓
(FPCore (wj x)
:precision binary64
(if (<= wj 1.1e-13)
(+
(* (- 1.0 (+ (* x -4.0) (* x 1.5))) (pow wj 2.0))
(+ x (* -2.0 (* x wj))))
(+ wj (/ (- (/ x (exp wj)) wj) (+ 1.0 wj))))) double code(double wj, double x) {
return wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))));
}
↓
double code(double wj, double x) {
double tmp;
if (wj <= 1.1e-13) {
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * pow(wj, 2.0)) + (x + (-2.0 * (x * wj)));
} else {
tmp = wj + (((x / exp(wj)) - wj) / (1.0 + wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))))
end function
↓
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.1d-13) then
tmp = ((1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0))) * (wj ** 2.0d0)) + (x + ((-2.0d0) * (x * wj)))
else
tmp = wj + (((x / exp(wj)) - wj) / (1.0d0 + wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
return wj - (((wj * Math.exp(wj)) - x) / (Math.exp(wj) + (wj * Math.exp(wj))));
}
↓
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.1e-13) {
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * Math.pow(wj, 2.0)) + (x + (-2.0 * (x * wj)));
} else {
tmp = wj + (((x / Math.exp(wj)) - wj) / (1.0 + wj));
}
return tmp;
}
def code(wj, x):
return wj - (((wj * math.exp(wj)) - x) / (math.exp(wj) + (wj * math.exp(wj))))
↓
def code(wj, x):
tmp = 0
if wj <= 1.1e-13:
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * math.pow(wj, 2.0)) + (x + (-2.0 * (x * wj)))
else:
tmp = wj + (((x / math.exp(wj)) - wj) / (1.0 + wj))
return tmp
function code(wj, x)
return Float64(wj - Float64(Float64(Float64(wj * exp(wj)) - x) / Float64(exp(wj) + Float64(wj * exp(wj)))))
end
↓
function code(wj, x)
tmp = 0.0
if (wj <= 1.1e-13)
tmp = Float64(Float64(Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5))) * (wj ^ 2.0)) + Float64(x + Float64(-2.0 * Float64(x * wj))));
else
tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(1.0 + wj)));
end
return tmp
end
function tmp = code(wj, x)
tmp = wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))));
end
↓
function tmp_2 = code(wj, x)
tmp = 0.0;
if (wj <= 1.1e-13)
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * (wj ^ 2.0)) + (x + (-2.0 * (x * wj)));
else
tmp = wj + (((x / exp(wj)) - wj) / (1.0 + wj));
end
tmp_2 = tmp;
end
code[wj_, x_] := N[(wj - N[(N[(N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
↓
code[wj_, x_] := If[LessEqual[wj, 1.1e-13], N[(N[(N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-2.0 * N[(x * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(1.0 + wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}
↓
\begin{array}{l}
\mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\
\;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\
\end{array}