
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 20 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (exp (- wj))) (t_1 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_1) (+ (exp wj) t_1))) 4e-9)
(*
x
(+
(/ t_0 (+ wj 1.0))
(*
(pow wj 2.0)
(- (/ 1.0 x) (* wj (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x)))))))))
(+ wj (/ (- wj (* x t_0)) (- -1.0 wj))))))
double code(double wj, double x) {
double t_0 = exp(-wj);
double t_1 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_1) / (exp(wj) + t_1))) <= 4e-9) {
tmp = x * ((t_0 / (wj + 1.0)) + (pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))));
} else {
tmp = wj + ((wj - (x * t_0)) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = exp(-wj)
t_1 = wj * exp(wj)
if ((wj + ((x - t_1) / (exp(wj) + t_1))) <= 4d-9) then
tmp = x * ((t_0 / (wj + 1.0d0)) + ((wj ** 2.0d0) * ((1.0d0 / x) - (wj * ((1.0d0 / x) + (wj * ((wj / x) + ((-1.0d0) / x))))))))
else
tmp = wj + ((wj - (x * t_0)) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = Math.exp(-wj);
double t_1 = wj * Math.exp(wj);
double tmp;
if ((wj + ((x - t_1) / (Math.exp(wj) + t_1))) <= 4e-9) {
tmp = x * ((t_0 / (wj + 1.0)) + (Math.pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))));
} else {
tmp = wj + ((wj - (x * t_0)) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = math.exp(-wj) t_1 = wj * math.exp(wj) tmp = 0 if (wj + ((x - t_1) / (math.exp(wj) + t_1))) <= 4e-9: tmp = x * ((t_0 / (wj + 1.0)) + (math.pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x)))))))) else: tmp = wj + ((wj - (x * t_0)) / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = exp(Float64(-wj)) t_1 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_1) / Float64(exp(wj) + t_1))) <= 4e-9) tmp = Float64(x * Float64(Float64(t_0 / Float64(wj + 1.0)) + Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) - Float64(wj * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))))))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x * t_0)) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = exp(-wj); t_1 = wj * exp(wj); tmp = 0.0; if ((wj + ((x - t_1) / (exp(wj) + t_1))) <= 4e-9) tmp = x * ((t_0 / (wj + 1.0)) + ((wj ^ 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x)))))))); else tmp = wj + ((wj - (x * t_0)) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[Exp[(-wj)], $MachinePrecision]}, Block[{t$95$1 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$1), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-9], N[(x * N[(N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] - N[(wj * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-wj}\\
t_1 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_1}{e^{wj} + t\_1} \leq 4 \cdot 10^{-9}:\\
\;\;\;\;x \cdot \left(\frac{t\_0}{wj + 1} + {wj}^{2} \cdot \left(\frac{1}{x} - wj \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - x \cdot t\_0}{-1 - wj}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 4.00000000000000025e-9Initial program 72.7%
distribute-rgt1-in73.9%
associate-/l/73.9%
div-sub72.8%
associate-/l*72.8%
*-inverses73.9%
*-rgt-identity73.9%
Simplified73.9%
Taylor expanded in x around inf 74.5%
associate--l+88.2%
associate-/r*88.2%
exp-neg88.2%
+-commutative88.2%
+-commutative88.2%
Simplified88.2%
Taylor expanded in wj around 0 99.9%
if 4.00000000000000025e-9 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 92.8%
distribute-rgt1-in97.4%
associate-/l/97.5%
div-sub92.9%
associate-/l*92.9%
*-inverses99.8%
*-rgt-identity99.8%
Simplified99.8%
clear-num99.7%
associate-/r/99.7%
rec-exp99.8%
Applied egg-rr99.8%
Final simplification99.9%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ (exp (- wj)) (+ wj 1.0))))
(if (<= wj 8.8e-5)
(*
x
(+ t_0 (* (pow wj 2.0) (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x)))))))
(* x (+ t_0 (/ (+ wj (/ wj (- -1.0 wj))) x))))))
double code(double wj, double x) {
double t_0 = exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= 8.8e-5) {
tmp = x * (t_0 + (pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))));
} else {
tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = exp(-wj) / (wj + 1.0d0)
if (wj <= 8.8d-5) then
tmp = x * (t_0 + ((wj ** 2.0d0) * ((1.0d0 / x) + (wj * ((wj / x) + ((-1.0d0) / x))))))
else
tmp = x * (t_0 + ((wj + (wj / ((-1.0d0) - wj))) / x))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = Math.exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= 8.8e-5) {
tmp = x * (t_0 + (Math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))));
} else {
tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
def code(wj, x): t_0 = math.exp(-wj) / (wj + 1.0) tmp = 0 if wj <= 8.8e-5: tmp = x * (t_0 + (math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x)))))) else: tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x)) return tmp
function code(wj, x) t_0 = Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) tmp = 0.0 if (wj <= 8.8e-5) tmp = Float64(x * Float64(t_0 + Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))))); else tmp = Float64(x * Float64(t_0 + Float64(Float64(wj + Float64(wj / Float64(-1.0 - wj))) / x))); end return tmp end
function tmp_2 = code(wj, x) t_0 = exp(-wj) / (wj + 1.0); tmp = 0.0; if (wj <= 8.8e-5) tmp = x * (t_0 + ((wj ^ 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x)))))); else tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 8.8e-5], N[(x * N[(t$95$0 + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(t$95$0 + N[(N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{e^{-wj}}{wj + 1}\\
\mathbf{if}\;wj \leq 8.8 \cdot 10^{-5}:\\
\;\;\;\;x \cdot \left(t\_0 + {wj}^{2} \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(t\_0 + \frac{wj + \frac{wj}{-1 - wj}}{x}\right)\\
\end{array}
\end{array}
if wj < 8.7999999999999998e-5Initial program 79.7%
distribute-rgt1-in82.1%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in x around inf 82.5%
associate--l+91.9%
associate-/r*91.9%
exp-neg91.9%
+-commutative91.9%
+-commutative91.9%
Simplified91.9%
Taylor expanded in wj around 0 99.2%
if 8.7999999999999998e-5 < wj Initial program 74.8%
distribute-rgt1-in74.8%
associate-/l/75.7%
div-sub75.7%
associate-/l*75.7%
*-inverses97.9%
*-rgt-identity97.9%
Simplified97.9%
Taylor expanded in x around inf 97.6%
associate--l+97.8%
associate-/r*97.8%
exp-neg97.8%
+-commutative97.8%
+-commutative97.8%
Simplified97.8%
Taylor expanded in x around 0 98.3%
+-commutative98.3%
Simplified98.3%
Final simplification99.1%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ (exp (- wj)) (+ wj 1.0))))
(if (<= wj -1.2e-5)
(* x (+ t_0 (- (* wj (/ 1.0 x)) (/ wj (fma wj x x)))))
(if (<= wj 8.9e-26)
(*
x
(+
(*
(pow wj 2.0)
(- (/ 1.0 x) (* wj (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x)))))))
(+ 1.0 (* wj -2.0))))
(* x (+ t_0 (/ (+ wj (/ wj (- -1.0 wj))) x)))))))
double code(double wj, double x) {
double t_0 = exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= -1.2e-5) {
tmp = x * (t_0 + ((wj * (1.0 / x)) - (wj / fma(wj, x, x))));
} else if (wj <= 8.9e-26) {
tmp = x * ((pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))) + (1.0 + (wj * -2.0)));
} else {
tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) tmp = 0.0 if (wj <= -1.2e-5) tmp = Float64(x * Float64(t_0 + Float64(Float64(wj * Float64(1.0 / x)) - Float64(wj / fma(wj, x, x))))); elseif (wj <= 8.9e-26) tmp = Float64(x * Float64(Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) - Float64(wj * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))))) + Float64(1.0 + Float64(wj * -2.0)))); else tmp = Float64(x * Float64(t_0 + Float64(Float64(wj + Float64(wj / Float64(-1.0 - wj))) / x))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -1.2e-5], N[(x * N[(t$95$0 + N[(N[(wj * N[(1.0 / x), $MachinePrecision]), $MachinePrecision] - N[(wj / N[(wj * x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 8.9e-26], N[(x * N[(N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] - N[(wj * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(t$95$0 + N[(N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{e^{-wj}}{wj + 1}\\
\mathbf{if}\;wj \leq -1.2 \cdot 10^{-5}:\\
\;\;\;\;x \cdot \left(t\_0 + \left(wj \cdot \frac{1}{x} - \frac{wj}{\mathsf{fma}\left(wj, x, x\right)}\right)\right)\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x \cdot \left({wj}^{2} \cdot \left(\frac{1}{x} - wj \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right)\right) + \left(1 + wj \cdot -2\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(t\_0 + \frac{wj + \frac{wj}{-1 - wj}}{x}\right)\\
\end{array}
\end{array}
if wj < -1.2e-5Initial program 13.8%
distribute-rgt1-in99.6%
associate-/l/99.8%
div-sub14.1%
associate-/l*14.1%
*-inverses99.8%
*-rgt-identity99.8%
Simplified99.8%
Taylor expanded in x around inf 99.6%
associate--l+99.6%
associate-/r*99.6%
exp-neg99.6%
+-commutative99.6%
+-commutative99.6%
Simplified99.6%
div-inv100.0%
fma-neg100.0%
distribute-rgt-in100.0%
*-un-lft-identity100.0%
fma-define100.0%
Applied egg-rr100.0%
fma-undefine100.0%
unsub-neg100.0%
Simplified100.0%
if -1.2e-5 < wj < 8.9000000000000001e-26Initial program 81.1%
distribute-rgt1-in81.1%
associate-/l/81.1%
div-sub81.1%
associate-/l*81.1%
*-inverses81.1%
*-rgt-identity81.1%
Simplified81.1%
Taylor expanded in x around inf 81.5%
associate--l+91.5%
associate-/r*91.5%
exp-neg91.5%
+-commutative91.5%
+-commutative91.5%
Simplified91.5%
Taylor expanded in wj around 0 99.9%
Taylor expanded in wj around 0 99.9%
*-commutative99.9%
Simplified99.9%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in x around inf 97.5%
associate--l+97.6%
associate-/r*97.6%
exp-neg97.7%
+-commutative97.7%
+-commutative97.7%
Simplified97.7%
Taylor expanded in x around 0 98.0%
+-commutative98.0%
Simplified98.0%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -1.2e-5)
(- wj (/ (- (/ x (exp wj)) wj) (- -1.0 wj)))
(if (<= wj 8.9e-26)
(*
x
(+
(*
(pow wj 2.0)
(- (/ 1.0 x) (* wj (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x)))))))
(+ 1.0 (* wj -2.0))))
(* x (+ (/ (exp (- wj)) (+ wj 1.0)) (/ (+ wj (/ wj (- -1.0 wj))) x))))))
double code(double wj, double x) {
double tmp;
if (wj <= -1.2e-5) {
tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 8.9e-26) {
tmp = x * ((pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))) + (1.0 + (wj * -2.0)));
} else {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-1.2d-5)) then
tmp = wj - (((x / exp(wj)) - wj) / ((-1.0d0) - wj))
else if (wj <= 8.9d-26) then
tmp = x * (((wj ** 2.0d0) * ((1.0d0 / x) - (wj * ((1.0d0 / x) + (wj * ((wj / x) + ((-1.0d0) / x))))))) + (1.0d0 + (wj * (-2.0d0))))
else
tmp = x * ((exp(-wj) / (wj + 1.0d0)) + ((wj + (wj / ((-1.0d0) - wj))) / x))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -1.2e-5) {
tmp = wj - (((x / Math.exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 8.9e-26) {
tmp = x * ((Math.pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))) + (1.0 + (wj * -2.0)));
} else {
tmp = x * ((Math.exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -1.2e-5: tmp = wj - (((x / math.exp(wj)) - wj) / (-1.0 - wj)) elif wj <= 8.9e-26: tmp = x * ((math.pow(wj, 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))) + (1.0 + (wj * -2.0))) else: tmp = x * ((math.exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -1.2e-5) tmp = Float64(wj - Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(-1.0 - wj))); elseif (wj <= 8.9e-26) tmp = Float64(x * Float64(Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) - Float64(wj * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))))) + Float64(1.0 + Float64(wj * -2.0)))); else tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj + Float64(wj / Float64(-1.0 - wj))) / x))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -1.2e-5) tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj)); elseif (wj <= 8.9e-26) tmp = x * (((wj ^ 2.0) * ((1.0 / x) - (wj * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))) + (1.0 + (wj * -2.0))); else tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -1.2e-5], N[(wj - N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 8.9e-26], N[(x * N[(N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] - N[(wj * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1.2 \cdot 10^{-5}:\\
\;\;\;\;wj - \frac{\frac{x}{e^{wj}} - wj}{-1 - wj}\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x \cdot \left({wj}^{2} \cdot \left(\frac{1}{x} - wj \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right)\right) + \left(1 + wj \cdot -2\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \frac{wj + \frac{wj}{-1 - wj}}{x}\right)\\
\end{array}
\end{array}
if wj < -1.2e-5Initial program 13.8%
distribute-rgt1-in99.6%
associate-/l/99.8%
div-sub14.1%
associate-/l*14.1%
*-inverses99.8%
*-rgt-identity99.8%
Simplified99.8%
if -1.2e-5 < wj < 8.9000000000000001e-26Initial program 81.1%
distribute-rgt1-in81.1%
associate-/l/81.1%
div-sub81.1%
associate-/l*81.1%
*-inverses81.1%
*-rgt-identity81.1%
Simplified81.1%
Taylor expanded in x around inf 81.5%
associate--l+91.5%
associate-/r*91.5%
exp-neg91.5%
+-commutative91.5%
+-commutative91.5%
Simplified91.5%
Taylor expanded in wj around 0 99.9%
Taylor expanded in wj around 0 99.9%
*-commutative99.9%
Simplified99.9%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in x around inf 97.5%
associate--l+97.6%
associate-/r*97.6%
exp-neg97.7%
+-commutative97.7%
+-commutative97.7%
Simplified97.7%
Taylor expanded in x around 0 98.0%
+-commutative98.0%
Simplified98.0%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -4.2e-6)
(- wj (/ (- (/ x (exp wj)) wj) (- -1.0 wj)))
(if (<= wj 8.9e-26)
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))
(* x (+ (/ (exp (- wj)) (+ wj 1.0)) (/ (+ wj (/ wj (- -1.0 wj))) x))))))
double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-6) {
tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-4.2d-6)) then
tmp = wj - (((x / exp(wj)) - wj) / ((-1.0d0) - wj))
else if (wj <= 8.9d-26) then
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
else
tmp = x * ((exp(-wj) / (wj + 1.0d0)) + ((wj + (wj / ((-1.0d0) - wj))) / x))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-6) {
tmp = wj - (((x / Math.exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = x * ((Math.exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -4.2e-6: tmp = wj - (((x / math.exp(wj)) - wj) / (-1.0 - wj)) elif wj <= 8.9e-26: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) else: tmp = x * ((math.exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -4.2e-6) tmp = Float64(wj - Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(-1.0 - wj))); elseif (wj <= 8.9e-26) tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); else tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj + Float64(wj / Float64(-1.0 - wj))) / x))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -4.2e-6) tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj)); elseif (wj <= 8.9e-26) tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); else tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj + (wj / (-1.0 - wj))) / x)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -4.2e-6], N[(wj - N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 8.9e-26], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{\frac{x}{e^{wj}} - wj}{-1 - wj}\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \frac{wj + \frac{wj}{-1 - wj}}{x}\right)\\
\end{array}
\end{array}
if wj < -4.1999999999999996e-6Initial program 21.6%
distribute-rgt1-in96.6%
associate-/l/97.0%
div-sub22.0%
associate-/l*22.0%
*-inverses97.0%
*-rgt-identity97.0%
Simplified97.0%
if -4.1999999999999996e-6 < wj < 8.9000000000000001e-26Initial program 81.1%
distribute-rgt1-in81.1%
associate-/l/81.1%
div-sub81.1%
associate-/l*81.1%
*-inverses81.1%
*-rgt-identity81.1%
Simplified81.1%
Taylor expanded in wj around 0 81.1%
associate-*r*81.1%
neg-mul-181.1%
distribute-rgt-out81.1%
metadata-eval81.1%
Simplified81.1%
Taylor expanded in wj around 0 99.9%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in x around inf 97.5%
associate--l+97.6%
associate-/r*97.6%
exp-neg97.7%
+-commutative97.7%
+-commutative97.7%
Simplified97.7%
Taylor expanded in x around 0 98.0%
+-commutative98.0%
Simplified98.0%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ (exp (- wj)) (+ wj 1.0))))
(if (<= wj -1.5e-8)
(* x (+ t_0 (+ (/ wj x) (/ wj (* x (- -1.0 wj))))))
(if (<= wj 8.9e-26)
(+ x (* wj (+ (* wj (- 1.0 (* x -2.5))) (* x -2.0))))
(* x (+ t_0 (/ (+ wj (/ wj (- -1.0 wj))) x)))))))
double code(double wj, double x) {
double t_0 = exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= -1.5e-8) {
tmp = x * (t_0 + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 8.9e-26) {
tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
} else {
tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = exp(-wj) / (wj + 1.0d0)
if (wj <= (-1.5d-8)) then
tmp = x * (t_0 + ((wj / x) + (wj / (x * ((-1.0d0) - wj)))))
else if (wj <= 8.9d-26) then
tmp = x + (wj * ((wj * (1.0d0 - (x * (-2.5d0)))) + (x * (-2.0d0))))
else
tmp = x * (t_0 + ((wj + (wj / ((-1.0d0) - wj))) / x))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = Math.exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= -1.5e-8) {
tmp = x * (t_0 + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 8.9e-26) {
tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
} else {
tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x));
}
return tmp;
}
def code(wj, x): t_0 = math.exp(-wj) / (wj + 1.0) tmp = 0 if wj <= -1.5e-8: tmp = x * (t_0 + ((wj / x) + (wj / (x * (-1.0 - wj))))) elif wj <= 8.9e-26: tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0))) else: tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x)) return tmp
function code(wj, x) t_0 = Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) tmp = 0.0 if (wj <= -1.5e-8) tmp = Float64(x * Float64(t_0 + Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); elseif (wj <= 8.9e-26) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - Float64(x * -2.5))) + Float64(x * -2.0)))); else tmp = Float64(x * Float64(t_0 + Float64(Float64(wj + Float64(wj / Float64(-1.0 - wj))) / x))); end return tmp end
function tmp_2 = code(wj, x) t_0 = exp(-wj) / (wj + 1.0); tmp = 0.0; if (wj <= -1.5e-8) tmp = x * (t_0 + ((wj / x) + (wj / (x * (-1.0 - wj))))); elseif (wj <= 8.9e-26) tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0))); else tmp = x * (t_0 + ((wj + (wj / (-1.0 - wj))) / x)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -1.5e-8], N[(x * N[(t$95$0 + N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 8.9e-26], N[(x + N[(wj * N[(N[(wj * N[(1.0 - N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(t$95$0 + N[(N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{e^{-wj}}{wj + 1}\\
\mathbf{if}\;wj \leq -1.5 \cdot 10^{-8}:\\
\;\;\;\;x \cdot \left(t\_0 + \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\right)\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - x \cdot -2.5\right) + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(t\_0 + \frac{wj + \frac{wj}{-1 - wj}}{x}\right)\\
\end{array}
\end{array}
if wj < -1.49999999999999987e-8Initial program 27.1%
distribute-rgt1-in93.8%
associate-/l/93.9%
div-sub27.3%
associate-/l*27.3%
*-inverses93.9%
*-rgt-identity93.9%
Simplified93.9%
Taylor expanded in x around inf 95.4%
associate--l+95.4%
associate-/r*95.4%
exp-neg95.4%
+-commutative95.4%
+-commutative95.4%
Simplified95.4%
if -1.49999999999999987e-8 < wj < 8.9000000000000001e-26Initial program 81.2%
distribute-rgt1-in81.2%
associate-/l/81.2%
div-sub81.2%
associate-/l*81.2%
*-inverses81.2%
*-rgt-identity81.2%
Simplified81.2%
Taylor expanded in wj around 0 100.0%
cancel-sign-sub-inv100.0%
metadata-eval100.0%
distribute-rgt-out100.0%
metadata-eval100.0%
*-commutative100.0%
Simplified100.0%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in x around inf 97.5%
associate--l+97.6%
associate-/r*97.6%
exp-neg97.7%
+-commutative97.7%
+-commutative97.7%
Simplified97.7%
Taylor expanded in x around 0 98.0%
+-commutative98.0%
Simplified98.0%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (<= wj -4.2e-6)
(- wj (/ (- (/ x (exp wj)) wj) (- -1.0 wj)))
(if (<= wj 4.3e-6)
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))
(+ wj (/ (- wj (* x (exp (- wj)))) (- -1.0 wj))))))
double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-6) {
tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 4.3e-6) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + ((wj - (x * exp(-wj))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-4.2d-6)) then
tmp = wj - (((x / exp(wj)) - wj) / ((-1.0d0) - wj))
else if (wj <= 4.3d-6) then
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
else
tmp = wj + ((wj - (x * exp(-wj))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-6) {
tmp = wj - (((x / Math.exp(wj)) - wj) / (-1.0 - wj));
} else if (wj <= 4.3e-6) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + ((wj - (x * Math.exp(-wj))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -4.2e-6: tmp = wj - (((x / math.exp(wj)) - wj) / (-1.0 - wj)) elif wj <= 4.3e-6: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) else: tmp = wj + ((wj - (x * math.exp(-wj))) / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -4.2e-6) tmp = Float64(wj - Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(-1.0 - wj))); elseif (wj <= 4.3e-6) tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x * exp(Float64(-wj)))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -4.2e-6) tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj)); elseif (wj <= 4.3e-6) tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); else tmp = wj + ((wj - (x * exp(-wj))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -4.2e-6], N[(wj - N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 4.3e-6], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{\frac{x}{e^{wj}} - wj}{-1 - wj}\\
\mathbf{elif}\;wj \leq 4.3 \cdot 10^{-6}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - x \cdot e^{-wj}}{-1 - wj}\\
\end{array}
\end{array}
if wj < -4.1999999999999996e-6Initial program 21.6%
distribute-rgt1-in96.6%
associate-/l/97.0%
div-sub22.0%
associate-/l*22.0%
*-inverses97.0%
*-rgt-identity97.0%
Simplified97.0%
if -4.1999999999999996e-6 < wj < 4.30000000000000033e-6Initial program 81.7%
distribute-rgt1-in81.7%
associate-/l/81.7%
div-sub81.7%
associate-/l*81.7%
*-inverses81.7%
*-rgt-identity81.7%
Simplified81.7%
Taylor expanded in wj around 0 81.7%
associate-*r*81.7%
neg-mul-181.7%
distribute-rgt-out81.7%
metadata-eval81.7%
Simplified81.7%
Taylor expanded in wj around 0 99.9%
if 4.30000000000000033e-6 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
clear-num96.5%
associate-/r/96.3%
rec-exp96.5%
Applied egg-rr96.5%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (or (<= wj -2.95e-6) (not (<= wj 1.85e-6)))
(- wj (/ (- (/ x (exp wj)) wj) (- -1.0 wj)))
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))))
double code(double wj, double x) {
double tmp;
if ((wj <= -2.95e-6) || !(wj <= 1.85e-6)) {
tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj));
} else {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if ((wj <= (-2.95d-6)) .or. (.not. (wj <= 1.85d-6))) then
tmp = wj - (((x / exp(wj)) - wj) / ((-1.0d0) - wj))
else
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if ((wj <= -2.95e-6) || !(wj <= 1.85e-6)) {
tmp = wj - (((x / Math.exp(wj)) - wj) / (-1.0 - wj));
} else {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
}
return tmp;
}
def code(wj, x): tmp = 0 if (wj <= -2.95e-6) or not (wj <= 1.85e-6): tmp = wj - (((x / math.exp(wj)) - wj) / (-1.0 - wj)) else: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) return tmp
function code(wj, x) tmp = 0.0 if ((wj <= -2.95e-6) || !(wj <= 1.85e-6)) tmp = Float64(wj - Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(-1.0 - wj))); else tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if ((wj <= -2.95e-6) || ~((wj <= 1.85e-6))) tmp = wj - (((x / exp(wj)) - wj) / (-1.0 - wj)); else tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); end tmp_2 = tmp; end
code[wj_, x_] := If[Or[LessEqual[wj, -2.95e-6], N[Not[LessEqual[wj, 1.85e-6]], $MachinePrecision]], N[(wj - N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -2.95 \cdot 10^{-6} \lor \neg \left(wj \leq 1.85 \cdot 10^{-6}\right):\\
\;\;\;\;wj - \frac{\frac{x}{e^{wj}} - wj}{-1 - wj}\\
\mathbf{else}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\end{array}
\end{array}
if wj < -2.95000000000000013e-6 or 1.8500000000000001e-6 < wj Initial program 53.9%
distribute-rgt1-in85.5%
associate-/l/86.3%
div-sub54.7%
associate-/l*54.7%
*-inverses96.8%
*-rgt-identity96.8%
Simplified96.8%
if -2.95000000000000013e-6 < wj < 1.8500000000000001e-6Initial program 81.6%
distribute-rgt1-in81.6%
associate-/l/81.6%
div-sub81.6%
associate-/l*81.6%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 81.6%
associate-*r*81.6%
neg-mul-181.6%
distribute-rgt-out81.6%
metadata-eval81.6%
Simplified81.6%
Taylor expanded in wj around 0 99.9%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.165)
(/ x (* (exp wj) (+ wj 1.0)))
(if (<= wj 8.9e-26)
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))
(+
wj
(/
(-
(-
x
(*
wj
(+
x
(*
wj
(+
(- (* x 0.5) x)
(*
wj
(+
(- x (* x 0.5))
(+ (* x -0.5) (* x 0.16666666666666666)))))))))
wj)
(+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -0.165) {
tmp = x / (exp(wj) * (wj + 1.0));
} else if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-0.165d0)) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else if (wj <= 8.9d-26) then
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
else
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5d0) - x) + (wj * ((x - (x * 0.5d0)) + ((x * (-0.5d0)) + (x * 0.16666666666666666d0))))))))) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -0.165) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -0.165: tmp = x / (math.exp(wj) * (wj + 1.0)) elif wj <= 8.9e-26: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) else: tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -0.165) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); elseif (wj <= 8.9e-26) tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); else tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * Float64(x + Float64(wj * Float64(Float64(Float64(x * 0.5) - x) + Float64(wj * Float64(Float64(x - Float64(x * 0.5)) + Float64(Float64(x * -0.5) + Float64(x * 0.16666666666666666))))))))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -0.165) tmp = x / (exp(wj) * (wj + 1.0)); elseif (wj <= 8.9e-26) tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); else tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -0.165], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 8.9e-26], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x - N[(wj * N[(x + N[(wj * N[(N[(N[(x * 0.5), $MachinePrecision] - x), $MachinePrecision] + N[(wj * N[(N[(x - N[(x * 0.5), $MachinePrecision]), $MachinePrecision] + N[(N[(x * -0.5), $MachinePrecision] + N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.165:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot \left(x + wj \cdot \left(\left(x \cdot 0.5 - x\right) + wj \cdot \left(\left(x - x \cdot 0.5\right) + \left(x \cdot -0.5 + x \cdot 0.16666666666666666\right)\right)\right)\right)\right) - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -0.165000000000000008Initial program 0.0%
distribute-rgt1-in100.0%
associate-/l/100.0%
div-sub0.0%
associate-/l*0.0%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 100.0%
+-commutative100.0%
Simplified100.0%
if -0.165000000000000008 < wj < 8.9000000000000001e-26Initial program 81.2%
distribute-rgt1-in81.2%
associate-/l/81.2%
div-sub81.2%
associate-/l*81.2%
*-inverses81.2%
*-rgt-identity81.2%
Simplified81.2%
Taylor expanded in wj around 0 81.2%
associate-*r*81.2%
neg-mul-181.2%
distribute-rgt-out81.2%
metadata-eval81.2%
Simplified81.2%
Taylor expanded in wj around 0 99.4%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in wj around 0 87.0%
Final simplification98.6%
(FPCore (wj x)
:precision binary64
(if (<= wj 8.9e-26)
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))
(+
wj
(/
(-
(-
x
(*
wj
(+
x
(*
wj
(+
(- (* x 0.5) x)
(*
wj
(+ (- x (* x 0.5)) (+ (* x -0.5) (* x 0.16666666666666666)))))))))
wj)
(+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 8.9d-26) then
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
else
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5d0) - x) + (wj * ((x - (x * 0.5d0)) + ((x * (-0.5d0)) + (x * 0.16666666666666666d0))))))))) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 8.9e-26) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 8.9e-26: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) else: tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 8.9e-26) tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); else tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * Float64(x + Float64(wj * Float64(Float64(Float64(x * 0.5) - x) + Float64(wj * Float64(Float64(x - Float64(x * 0.5)) + Float64(Float64(x * -0.5) + Float64(x * 0.16666666666666666))))))))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 8.9e-26) tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); else tmp = wj + (((x - (wj * (x + (wj * (((x * 0.5) - x) + (wj * ((x - (x * 0.5)) + ((x * -0.5) + (x * 0.16666666666666666))))))))) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 8.9e-26], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x - N[(wj * N[(x + N[(wj * N[(N[(N[(x * 0.5), $MachinePrecision] - x), $MachinePrecision] + N[(wj * N[(N[(x - N[(x * 0.5), $MachinePrecision]), $MachinePrecision] + N[(N[(x * -0.5), $MachinePrecision] + N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot \left(x + wj \cdot \left(\left(x \cdot 0.5 - x\right) + wj \cdot \left(\left(x - x \cdot 0.5\right) + \left(x \cdot -0.5 + x \cdot 0.16666666666666666\right)\right)\right)\right)\right) - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 8.9000000000000001e-26Initial program 79.1%
distribute-rgt1-in81.6%
associate-/l/81.6%
div-sub79.1%
associate-/l*79.1%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 79.2%
associate-*r*79.2%
neg-mul-179.2%
distribute-rgt-out79.2%
metadata-eval79.2%
Simplified79.2%
Taylor expanded in wj around 0 97.0%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in wj around 0 87.0%
Final simplification96.3%
(FPCore (wj x)
:precision binary64
(if (<= wj 4.6e-5)
(-
x
(*
wj
(+
x
(+
x
(*
wj
(+
(* x -2.0)
(+ -1.0 (- (* wj (+ x (+ 1.0 (+ x (* x 0.5))))) (* x 0.5)))))))))
(+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 4.6d-5) then
tmp = x - (wj * (x + (x + (wj * ((x * (-2.0d0)) + ((-1.0d0) + ((wj * (x + (1.0d0 + (x + (x * 0.5d0))))) - (x * 0.5d0))))))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 4.6e-5: tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 4.6e-5) tmp = Float64(x - Float64(wj * Float64(x + Float64(x + Float64(wj * Float64(Float64(x * -2.0) + Float64(-1.0 + Float64(Float64(wj * Float64(x + Float64(1.0 + Float64(x + Float64(x * 0.5))))) - Float64(x * 0.5))))))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 4.6e-5) tmp = x - (wj * (x + (x + (wj * ((x * -2.0) + (-1.0 + ((wj * (x + (1.0 + (x + (x * 0.5))))) - (x * 0.5)))))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 4.6e-5], N[(x - N[(wj * N[(x + N[(x + N[(wj * N[(N[(x * -2.0), $MachinePrecision] + N[(-1.0 + N[(N[(wj * N[(x + N[(1.0 + N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 4.6 \cdot 10^{-5}:\\
\;\;\;\;x - wj \cdot \left(x + \left(x + wj \cdot \left(x \cdot -2 + \left(-1 + \left(wj \cdot \left(x + \left(1 + \left(x + x \cdot 0.5\right)\right)\right) - x \cdot 0.5\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 4.6e-5Initial program 79.7%
distribute-rgt1-in82.2%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in wj around 0 79.8%
associate-*r*79.8%
neg-mul-179.8%
distribute-rgt-out79.8%
metadata-eval79.8%
Simplified79.8%
Taylor expanded in wj around 0 97.0%
if 4.6e-5 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
Taylor expanded in x around 0 76.9%
+-commutative76.9%
Simplified76.9%
Final simplification96.3%
(FPCore (wj x) :precision binary64 (if (<= wj 8.9e-26) (- x (* wj (+ (* x 2.0) (* wj (+ wj -1.0))))) (+ wj (/ (- wj (+ x (* wj (* x (+ -1.0 (* wj 0.5)))))) (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 8.9e-26) {
tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0))));
} else {
tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 8.9d-26) then
tmp = x - (wj * ((x * 2.0d0) + (wj * (wj + (-1.0d0)))))
else
tmp = wj + ((wj - (x + (wj * (x * ((-1.0d0) + (wj * 0.5d0)))))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 8.9e-26) {
tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0))));
} else {
tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 8.9e-26: tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0)))) else: tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 8.9e-26) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(wj + -1.0))))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x + Float64(wj * Float64(x * Float64(-1.0 + Float64(wj * 0.5)))))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 8.9e-26) tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0)))); else tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 8.9e-26], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(wj + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x + N[(wj * N[(x * N[(-1.0 + N[(wj * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 8.9 \cdot 10^{-26}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(wj + -1\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \left(x + wj \cdot \left(x \cdot \left(-1 + wj \cdot 0.5\right)\right)\right)}{-1 - wj}\\
\end{array}
\end{array}
if wj < 8.9000000000000001e-26Initial program 79.1%
distribute-rgt1-in81.6%
associate-/l/81.6%
div-sub79.1%
associate-/l*79.1%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 97.0%
Taylor expanded in x around 0 96.9%
neg-mul-196.9%
unsub-neg96.9%
Simplified96.9%
if 8.9000000000000001e-26 < wj Initial program 85.3%
distribute-rgt1-in85.3%
associate-/l/86.0%
div-sub86.0%
associate-/l*86.0%
*-inverses97.7%
*-rgt-identity97.7%
Simplified97.7%
Taylor expanded in wj around 0 85.5%
associate-*r*85.5%
neg-mul-185.5%
distribute-rgt-out85.5%
metadata-eval85.5%
Simplified85.5%
Taylor expanded in x around 0 85.5%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (if (<= wj 4.6e-5) (- x (* wj (+ (* x 2.0) (* wj (+ wj -1.0))))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 4.6d-5) then
tmp = x - (wj * ((x * 2.0d0) + (wj * (wj + (-1.0d0)))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 4.6e-5: tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0)))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 4.6e-5) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(wj + -1.0))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 4.6e-5) tmp = x - (wj * ((x * 2.0) + (wj * (wj + -1.0)))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 4.6e-5], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(wj + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 4.6 \cdot 10^{-5}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(wj + -1\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 4.6e-5Initial program 79.7%
distribute-rgt1-in82.2%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in wj around 0 97.0%
Taylor expanded in x around 0 96.9%
neg-mul-196.9%
unsub-neg96.9%
Simplified96.9%
if 4.6e-5 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
Taylor expanded in x around 0 76.9%
+-commutative76.9%
Simplified76.9%
Final simplification96.1%
(FPCore (wj x) :precision binary64 (if (<= wj 4.6e-5) (* x (- 1.0 (* wj (- 2.0 (* wj 2.5))))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x * (1.0 - (wj * (2.0 - (wj * 2.5))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 4.6d-5) then
tmp = x * (1.0d0 - (wj * (2.0d0 - (wj * 2.5d0))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 4.6e-5) {
tmp = x * (1.0 - (wj * (2.0 - (wj * 2.5))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 4.6e-5: tmp = x * (1.0 - (wj * (2.0 - (wj * 2.5)))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 4.6e-5) tmp = Float64(x * Float64(1.0 - Float64(wj * Float64(2.0 - Float64(wj * 2.5))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 4.6e-5) tmp = x * (1.0 - (wj * (2.0 - (wj * 2.5)))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 4.6e-5], N[(x * N[(1.0 - N[(wj * N[(2.0 - N[(wj * 2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 4.6 \cdot 10^{-5}:\\
\;\;\;\;x \cdot \left(1 - wj \cdot \left(2 - wj \cdot 2.5\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 4.6e-5Initial program 79.7%
distribute-rgt1-in82.2%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in wj around 0 79.8%
associate-*r*79.8%
neg-mul-179.8%
distribute-rgt-out79.8%
metadata-eval79.8%
Simplified79.8%
Taylor expanded in x around inf 88.6%
Taylor expanded in wj around 0 88.5%
if 4.6e-5 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
Taylor expanded in x around 0 76.9%
+-commutative76.9%
Simplified76.9%
Final simplification88.1%
(FPCore (wj x) :precision binary64 (if (<= wj 7.8e-6) (* x (- (/ -1.0 (- -1.0 wj)) wj)) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 7.8e-6) {
tmp = x * ((-1.0 / (-1.0 - wj)) - wj);
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 7.8d-6) then
tmp = x * (((-1.0d0) / ((-1.0d0) - wj)) - wj)
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 7.8e-6) {
tmp = x * ((-1.0 / (-1.0 - wj)) - wj);
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 7.8e-6: tmp = x * ((-1.0 / (-1.0 - wj)) - wj) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 7.8e-6) tmp = Float64(x * Float64(Float64(-1.0 / Float64(-1.0 - wj)) - wj)); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 7.8e-6) tmp = x * ((-1.0 / (-1.0 - wj)) - wj); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 7.8e-6], N[(x * N[(N[(-1.0 / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 7.8 \cdot 10^{-6}:\\
\;\;\;\;x \cdot \left(\frac{-1}{-1 - wj} - wj\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 7.7999999999999999e-6Initial program 79.7%
distribute-rgt1-in82.2%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in wj around 0 79.8%
associate-*r*79.8%
neg-mul-179.8%
distribute-rgt-out79.8%
metadata-eval79.8%
Simplified79.8%
Taylor expanded in x around inf 88.6%
Taylor expanded in wj around 0 88.4%
neg-mul-188.4%
Simplified88.4%
if 7.7999999999999999e-6 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
Taylor expanded in x around 0 76.9%
+-commutative76.9%
Simplified76.9%
Final simplification88.0%
(FPCore (wj x) :precision binary64 (if (<= wj 7e-6) (+ x (* -2.0 (* wj x))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 7e-6) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 7d-6) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 7e-6) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 7e-6: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 7e-6) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 7e-6) tmp = x + (-2.0 * (wj * x)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 7e-6], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 7 \cdot 10^{-6}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 6.99999999999999989e-6Initial program 79.7%
distribute-rgt1-in82.2%
associate-/l/82.2%
div-sub79.7%
associate-/l*79.7%
*-inverses82.2%
*-rgt-identity82.2%
Simplified82.2%
Taylor expanded in wj around 0 88.4%
*-commutative88.4%
Simplified88.4%
if 6.99999999999999989e-6 < wj Initial program 75.2%
distribute-rgt1-in75.2%
associate-/l/76.3%
div-sub76.3%
associate-/l*76.3%
*-inverses96.3%
*-rgt-identity96.3%
Simplified96.3%
Taylor expanded in x around 0 76.9%
+-commutative76.9%
Simplified76.9%
Final simplification88.0%
(FPCore (wj x) :precision binary64 (* x (+ 1.0 (* wj -2.0))))
double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * (-2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
def code(wj, x): return x * (1.0 + (wj * -2.0))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * -2.0))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * -2.0)); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot -2\right)
\end{array}
Initial program 79.5%
distribute-rgt1-in81.9%
associate-/l/81.9%
div-sub79.6%
associate-/l*79.6%
*-inverses82.7%
*-rgt-identity82.7%
Simplified82.7%
Taylor expanded in wj around 0 85.4%
*-commutative85.4%
Simplified85.4%
Taylor expanded in x around 0 85.3%
*-commutative85.3%
Simplified85.3%
Final simplification85.3%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 79.5%
distribute-rgt1-in81.9%
associate-/l/81.9%
div-sub79.6%
associate-/l*79.6%
*-inverses82.7%
*-rgt-identity82.7%
Simplified82.7%
Taylor expanded in wj around 0 85.4%
*-commutative85.4%
Simplified85.4%
Final simplification85.4%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 79.5%
distribute-rgt1-in81.9%
associate-/l/81.9%
div-sub79.6%
associate-/l*79.6%
*-inverses82.7%
*-rgt-identity82.7%
Simplified82.7%
Taylor expanded in wj around inf 4.2%
Final simplification4.2%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.5%
distribute-rgt1-in81.9%
associate-/l/81.9%
div-sub79.6%
associate-/l*79.6%
*-inverses82.7%
*-rgt-identity82.7%
Simplified82.7%
Taylor expanded in wj around 0 84.8%
Final simplification84.8%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024080
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))