
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (- wj (/ x (exp wj))))
(t_1 (/ t_0 (+ wj 1.0)))
(t_2 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -3.5e-6)
(*
x
(+ (/ (exp (- wj)) (+ wj 1.0)) (+ (/ wj x) (/ wj (* x (- -1.0 wj))))))
(if (<= wj 3.55e-6)
(-
x
(*
wj
(+
(* x 2.0)
(*
wj
(+
t_2
(+
-1.0
(*
wj
(+
1.0
(+ (* x -3.0) (+ (* -2.0 t_2) (* x 0.6666666666666666)))))))))))
(+ (- wj t_1) (fma (/ -1.0 (+ wj 1.0)) t_0 t_1))))))
double code(double wj, double x) {
double t_0 = wj - (x / exp(wj));
double t_1 = t_0 / (wj + 1.0);
double t_2 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -3.5e-6) {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 3.55e-6) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_2 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_2) + (x * 0.6666666666666666))))))))));
} else {
tmp = (wj - t_1) + fma((-1.0 / (wj + 1.0)), t_0, t_1);
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj - Float64(x / exp(wj))) t_1 = Float64(t_0 / Float64(wj + 1.0)) t_2 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -3.5e-6) tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); elseif (wj <= 3.55e-6) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(t_2 + Float64(-1.0 + Float64(wj * Float64(1.0 + Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_2) + Float64(x * 0.6666666666666666))))))))))); else tmp = Float64(Float64(wj - t_1) + fma(Float64(-1.0 / Float64(wj + 1.0)), t_0, t_1)); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -3.5e-6], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 3.55e-6], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(t$95$2 + N[(-1.0 + N[(wj * N[(1.0 + N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$2), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(wj - t$95$1), $MachinePrecision] + N[(N[(-1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] * t$95$0 + t$95$1), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj - \frac{x}{e^{wj}}\\
t_1 := \frac{t\_0}{wj + 1}\\
t_2 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -3.5 \cdot 10^{-6}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\right)\\
\mathbf{elif}\;wj \leq 3.55 \cdot 10^{-6}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(t\_2 + \left(-1 + wj \cdot \left(1 + \left(x \cdot -3 + \left(-2 \cdot t\_2 + x \cdot 0.6666666666666666\right)\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\left(wj - t\_1\right) + \mathsf{fma}\left(\frac{-1}{wj + 1}, t\_0, t\_1\right)\\
\end{array}
\end{array}
if wj < -3.49999999999999995e-6Initial program 85.3%
distribute-rgt1-in99.8%
associate-/l/99.6%
div-sub85.3%
associate-/l*85.3%
*-inverses99.6%
*-rgt-identity99.6%
Simplified99.6%
Taylor expanded in x around inf 99.6%
associate--l+99.6%
associate-/r*99.8%
rec-exp99.8%
+-commutative99.8%
+-commutative99.8%
Simplified99.8%
if -3.49999999999999995e-6 < wj < 3.5499999999999999e-6Initial program 74.0%
distribute-rgt1-in74.0%
associate-/l/74.0%
div-sub74.0%
associate-/l*74.0%
*-inverses74.0%
*-rgt-identity74.0%
Simplified74.0%
Taylor expanded in wj around 0 99.9%
if 3.5499999999999999e-6 < wj Initial program 44.6%
distribute-rgt1-in44.4%
associate-/l/44.5%
div-sub44.5%
associate-/l*44.5%
*-inverses98.4%
*-rgt-identity98.4%
Simplified98.4%
*-un-lft-identity98.4%
div-inv98.3%
prod-diff98.5%
associate-/r/98.1%
clear-num98.2%
fma-neg98.2%
*-un-lft-identity98.2%
associate-/r/98.4%
clear-num98.5%
Applied egg-rr98.5%
distribute-neg-frac98.5%
metadata-eval98.5%
Simplified98.5%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -4.3e-6)
(*
x
(+ (/ (exp (- wj)) (+ wj 1.0)) (+ (/ wj x) (/ wj (* x (- -1.0 wj))))))
(if (<= wj 6.2e-6)
(-
x
(*
wj
(+
(* x 2.0)
(*
wj
(+
t_0
(+
-1.0
(*
wj
(+
1.0
(+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666)))))))))))
(+ wj (* (/ (- wj (/ x (exp wj))) (fma wj wj -1.0)) (- 1.0 wj)))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -4.3e-6) {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 6.2e-6) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
} else {
tmp = wj + (((wj - (x / exp(wj))) / fma(wj, wj, -1.0)) * (1.0 - wj));
}
return tmp;
}
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -4.3e-6) tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); elseif (wj <= 6.2e-6) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(t_0 + Float64(-1.0 + Float64(wj * Float64(1.0 + Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666))))))))))); else tmp = Float64(wj + Float64(Float64(Float64(wj - Float64(x / exp(wj))) / fma(wj, wj, -1.0)) * Float64(1.0 - wj))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -4.3e-6], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 6.2e-6], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(t$95$0 + N[(-1.0 + N[(wj * N[(1.0 + N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj * wj + -1.0), $MachinePrecision]), $MachinePrecision] * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -4.3 \cdot 10^{-6}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\right)\\
\mathbf{elif}\;wj \leq 6.2 \cdot 10^{-6}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(t\_0 + \left(-1 + wj \cdot \left(1 + \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{\mathsf{fma}\left(wj, wj, -1\right)} \cdot \left(1 - wj\right)\\
\end{array}
\end{array}
if wj < -4.30000000000000033e-6Initial program 85.3%
distribute-rgt1-in99.8%
associate-/l/99.6%
div-sub85.3%
associate-/l*85.3%
*-inverses99.6%
*-rgt-identity99.6%
Simplified99.6%
Taylor expanded in x around inf 99.6%
associate--l+99.6%
associate-/r*99.8%
rec-exp99.8%
+-commutative99.8%
+-commutative99.8%
Simplified99.8%
if -4.30000000000000033e-6 < wj < 6.1999999999999999e-6Initial program 74.0%
distribute-rgt1-in74.0%
associate-/l/74.0%
div-sub74.0%
associate-/l*74.0%
*-inverses74.0%
*-rgt-identity74.0%
Simplified74.0%
Taylor expanded in wj around 0 99.9%
if 6.1999999999999999e-6 < wj Initial program 44.6%
distribute-rgt1-in44.4%
associate-/l/44.5%
div-sub44.5%
associate-/l*44.5%
*-inverses98.4%
*-rgt-identity98.4%
Simplified98.4%
flip-+98.5%
associate-/r/98.5%
metadata-eval98.5%
fma-neg98.5%
metadata-eval98.5%
sub-neg98.5%
metadata-eval98.5%
Applied egg-rr98.5%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -3.2e-6)
(*
x
(+ (/ (exp (- wj)) (+ wj 1.0)) (+ (/ wj x) (/ wj (* x (- -1.0 wj))))))
(if (<= wj 3.55e-6)
(-
x
(*
wj
(+
(* x 2.0)
(*
wj
(+
t_0
(+
-1.0
(*
wj
(+
1.0
(+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666)))))))))))
(- wj (/ (- wj (/ x (exp wj))) (+ wj 1.0)))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -3.2e-6) {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 3.55e-6) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
} else {
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= (-3.2d-6)) then
tmp = x * ((exp(-wj) / (wj + 1.0d0)) + ((wj / x) + (wj / (x * ((-1.0d0) - wj)))))
else if (wj <= 3.55d-6) then
tmp = x - (wj * ((x * 2.0d0) + (wj * (t_0 + ((-1.0d0) + (wj * (1.0d0 + ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0))))))))))
else
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -3.2e-6) {
tmp = x * ((Math.exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
} else if (wj <= 3.55e-6) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
} else {
tmp = wj - ((wj - (x / Math.exp(wj))) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= -3.2e-6: tmp = x * ((math.exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj))))) elif wj <= 3.55e-6: tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))) else: tmp = wj - ((wj - (x / math.exp(wj))) / (wj + 1.0)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -3.2e-6) tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); elseif (wj <= 3.55e-6) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(t_0 + Float64(-1.0 + Float64(wj * Float64(1.0 + Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666))))))))))); else tmp = Float64(wj - Float64(Float64(wj - Float64(x / exp(wj))) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= -3.2e-6) tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj))))); elseif (wj <= 3.55e-6) tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))); else tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -3.2e-6], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 3.55e-6], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(t$95$0 + N[(-1.0 + N[(wj * N[(1.0 + N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -3.2 \cdot 10^{-6}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\right)\\
\mathbf{elif}\;wj \leq 3.55 \cdot 10^{-6}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(t\_0 + \left(-1 + wj \cdot \left(1 + \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - \frac{x}{e^{wj}}}{wj + 1}\\
\end{array}
\end{array}
if wj < -3.1999999999999999e-6Initial program 85.3%
distribute-rgt1-in99.8%
associate-/l/99.6%
div-sub85.3%
associate-/l*85.3%
*-inverses99.6%
*-rgt-identity99.6%
Simplified99.6%
Taylor expanded in x around inf 99.6%
associate--l+99.6%
associate-/r*99.8%
rec-exp99.8%
+-commutative99.8%
+-commutative99.8%
Simplified99.8%
if -3.1999999999999999e-6 < wj < 3.5499999999999999e-6Initial program 74.0%
distribute-rgt1-in74.0%
associate-/l/74.0%
div-sub74.0%
associate-/l*74.0%
*-inverses74.0%
*-rgt-identity74.0%
Simplified74.0%
Taylor expanded in wj around 0 99.9%
if 3.5499999999999999e-6 < wj Initial program 44.6%
distribute-rgt1-in44.4%
associate-/l/44.5%
div-sub44.5%
associate-/l*44.5%
*-inverses98.4%
*-rgt-identity98.4%
Simplified98.4%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (or (<= wj -3.5e-6) (not (<= wj 5.5e-6)))
(- wj (/ (- wj (/ x (exp wj))) (+ wj 1.0)))
(-
x
(*
wj
(+
(* x 2.0)
(*
wj
(+
t_0
(+
-1.0
(*
wj
(+
1.0
(+
(* x -3.0)
(+ (* -2.0 t_0) (* x 0.6666666666666666))))))))))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj <= -3.5e-6) || !(wj <= 5.5e-6)) {
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0));
} else {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if ((wj <= (-3.5d-6)) .or. (.not. (wj <= 5.5d-6))) then
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0d0))
else
tmp = x - (wj * ((x * 2.0d0) + (wj * (t_0 + ((-1.0d0) + (wj * (1.0d0 + ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0))))))))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj <= -3.5e-6) || !(wj <= 5.5e-6)) {
tmp = wj - ((wj - (x / Math.exp(wj))) / (wj + 1.0));
} else {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if (wj <= -3.5e-6) or not (wj <= 5.5e-6): tmp = wj - ((wj - (x / math.exp(wj))) / (wj + 1.0)) else: tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if ((wj <= -3.5e-6) || !(wj <= 5.5e-6)) tmp = Float64(wj - Float64(Float64(wj - Float64(x / exp(wj))) / Float64(wj + 1.0))); else tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(t_0 + Float64(-1.0 + Float64(wj * Float64(1.0 + Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666))))))))))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if ((wj <= -3.5e-6) || ~((wj <= 5.5e-6))) tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0)); else tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[wj, -3.5e-6], N[Not[LessEqual[wj, 5.5e-6]], $MachinePrecision]], N[(wj - N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(t$95$0 + N[(-1.0 + N[(wj * N[(1.0 + N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -3.5 \cdot 10^{-6} \lor \neg \left(wj \leq 5.5 \cdot 10^{-6}\right):\\
\;\;\;\;wj - \frac{wj - \frac{x}{e^{wj}}}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(t\_0 + \left(-1 + wj \cdot \left(1 + \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right)\right)\right)\\
\end{array}
\end{array}
if wj < -3.49999999999999995e-6 or 5.4999999999999999e-6 < wj Initial program 58.8%
distribute-rgt1-in63.8%
associate-/l/63.8%
div-sub58.8%
associate-/l*58.8%
*-inverses98.8%
*-rgt-identity98.8%
Simplified98.8%
if -3.49999999999999995e-6 < wj < 5.4999999999999999e-6Initial program 74.0%
distribute-rgt1-in74.0%
associate-/l/74.0%
div-sub74.0%
associate-/l*74.0%
*-inverses74.0%
*-rgt-identity74.0%
Simplified74.0%
Taylor expanded in wj around 0 99.9%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj 0.24)
(-
x
(*
wj
(+
(* x 2.0)
(*
wj
(+
t_0
(+
-1.0
(*
wj
(+
1.0
(+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666)))))))))))
(+ wj (/ wj (- -1.0 wj))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 0.24) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= 0.24d0) then
tmp = x - (wj * ((x * 2.0d0) + (wj * (t_0 + ((-1.0d0) + (wj * (1.0d0 + ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0))))))))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 0.24) {
tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666))))))))));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= 0.24: tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= 0.24) tmp = Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * Float64(t_0 + Float64(-1.0 + Float64(wj * Float64(1.0 + Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666))))))))))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= 0.24) tmp = x - (wj * ((x * 2.0) + (wj * (t_0 + (-1.0 + (wj * (1.0 + ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))))))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 0.24], N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * N[(t$95$0 + N[(-1.0 + N[(wj * N[(1.0 + N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq 0.24:\\
\;\;\;\;x - wj \cdot \left(x \cdot 2 + wj \cdot \left(t\_0 + \left(-1 + wj \cdot \left(1 + \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.23999999999999999Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 96.9%
if 0.23999999999999999 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification97.0%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.24)
(+
x
(*
wj
(-
(*
x
(* wj (+ 2.5 (+ (/ 1.0 x) (* wj (- (/ -1.0 x) 2.6666666666666665))))))
(* x 2.0))))
(+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * ((x * (wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665)))))) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.24d0) then
tmp = x + (wj * ((x * (wj * (2.5d0 + ((1.0d0 / x) + (wj * (((-1.0d0) / x) - 2.6666666666666665d0)))))) - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * ((x * (wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665)))))) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.24: tmp = x + (wj * ((x * (wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665)))))) - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.24) tmp = Float64(x + Float64(wj * Float64(Float64(x * Float64(wj * Float64(2.5 + Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(-1.0 / x) - 2.6666666666666665)))))) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.24) tmp = x + (wj * ((x * (wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665)))))) - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.24], N[(x + N[(wj * N[(N[(x * N[(wj * N[(2.5 + N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(-1.0 / x), $MachinePrecision] - 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.24:\\
\;\;\;\;x + wj \cdot \left(x \cdot \left(wj \cdot \left(2.5 + \left(\frac{1}{x} + wj \cdot \left(\frac{-1}{x} - 2.6666666666666665\right)\right)\right)\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.23999999999999999Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 96.9%
Taylor expanded in x around inf 96.9%
fma-define96.9%
*-commutative96.9%
associate-/l*96.8%
mul-1-neg96.8%
sub-neg96.8%
Simplified96.8%
Taylor expanded in wj around 0 96.8%
+-commutative96.8%
mul-1-neg96.8%
unsub-neg96.8%
+-commutative96.8%
Simplified96.8%
if 0.23999999999999999 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification96.9%
(FPCore (wj x) :precision binary64 (if (<= wj 0.24) (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.24d0) then
tmp = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.24: tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.24) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.24) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.24], N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.24:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.23999999999999999Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 96.9%
Taylor expanded in x around 0 96.4%
mul-1-neg96.4%
sub-neg96.4%
Simplified96.4%
if 0.23999999999999999 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification96.6%
(FPCore (wj x) :precision binary64 (if (<= wj 0.24) (+ x (* wj (+ wj (* x -2.0)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.24d0) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.24) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.24: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.24) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.24) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.24], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.24:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.23999999999999999Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 96.9%
Taylor expanded in x around 0 96.4%
mul-1-neg96.4%
sub-neg96.4%
Simplified96.4%
Taylor expanded in wj around 0 96.0%
*-commutative96.0%
Simplified96.0%
if 0.23999999999999999 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification96.1%
(FPCore (wj x) :precision binary64 (if (<= wj 1.4e-7) (/ x (+ 1.0 (* wj 2.0))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 1.4e-7) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.4d-7) then
tmp = x / (1.0d0 + (wj * 2.0d0))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.4e-7) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.4e-7: tmp = x / (1.0 + (wj * 2.0)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.4e-7) tmp = Float64(x / Float64(1.0 + Float64(wj * 2.0))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.4e-7) tmp = x / (1.0 + (wj * 2.0)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.4e-7], N[(x / N[(1.0 + N[(wj * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1.4 \cdot 10^{-7}:\\
\;\;\;\;\frac{x}{1 + wj \cdot 2}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 1.4000000000000001e-7Initial program 74.4%
distribute-rgt1-in74.8%
associate-/l/74.8%
div-sub74.4%
associate-/l*74.4%
*-inverses74.8%
*-rgt-identity74.8%
Simplified74.8%
Taylor expanded in x around inf 84.4%
+-commutative84.4%
rem-exp-log83.1%
+-commutative83.1%
log1p-undefine83.1%
exp-sum83.1%
Simplified83.1%
Taylor expanded in wj around 0 82.8%
*-commutative82.8%
Simplified82.8%
if 1.4000000000000001e-7 < wj Initial program 46.1%
distribute-rgt1-in46.0%
associate-/l/46.1%
div-sub46.1%
associate-/l*46.1%
*-inverses96.1%
*-rgt-identity96.1%
Simplified96.1%
Taylor expanded in x around 0 75.6%
+-commutative75.6%
Simplified75.6%
Final simplification82.4%
(FPCore (wj x) :precision binary64 (if (<= wj 1.4e-7) (+ x (* -2.0 (* wj x))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 1.4e-7) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.4d-7) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.4e-7) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.4e-7: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.4e-7) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.4e-7) tmp = x + (-2.0 * (wj * x)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.4e-7], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1.4 \cdot 10^{-7}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 1.4000000000000001e-7Initial program 74.4%
distribute-rgt1-in74.8%
associate-/l/74.8%
div-sub74.4%
associate-/l*74.4%
*-inverses74.8%
*-rgt-identity74.8%
Simplified74.8%
Taylor expanded in wj around 0 82.6%
*-commutative82.6%
Simplified82.6%
if 1.4000000000000001e-7 < wj Initial program 46.1%
distribute-rgt1-in46.0%
associate-/l/46.1%
div-sub46.1%
associate-/l*46.1%
*-inverses96.1%
*-rgt-identity96.1%
Simplified96.1%
Taylor expanded in x around 0 75.6%
+-commutative75.6%
Simplified75.6%
Final simplification82.3%
(FPCore (wj x) :precision binary64 (if (<= wj 0.95) (+ x (* -2.0 (* wj x))) (+ wj -1.0)))
double code(double wj, double x) {
double tmp;
if (wj <= 0.95) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + -1.0;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.95d0) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (-1.0d0)
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.95) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + -1.0;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.95: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + -1.0 return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.95) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + -1.0); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.95) tmp = x + (-2.0 * (wj * x)); else tmp = wj + -1.0; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.95], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + -1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.95:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + -1\\
\end{array}
\end{array}
if wj < 0.94999999999999996Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 81.4%
*-commutative81.4%
Simplified81.4%
if 0.94999999999999996 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in wj around inf 75.2%
Final simplification81.2%
(FPCore (wj x) :precision binary64 (if (<= wj 1.8) x (+ wj -1.0)))
double code(double wj, double x) {
double tmp;
if (wj <= 1.8) {
tmp = x;
} else {
tmp = wj + -1.0;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.8d0) then
tmp = x
else
tmp = wj + (-1.0d0)
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.8) {
tmp = x;
} else {
tmp = wj + -1.0;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.8: tmp = x else: tmp = wj + -1.0 return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.8) tmp = x; else tmp = Float64(wj + -1.0); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.8) tmp = x; else tmp = wj + -1.0; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.8], x, N[(wj + -1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1.8:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;wj + -1\\
\end{array}
\end{array}
if wj < 1.80000000000000004Initial program 74.7%
distribute-rgt1-in75.1%
associate-/l/75.1%
div-sub74.7%
associate-/l*74.7%
*-inverses75.1%
*-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 80.6%
if 1.80000000000000004 < wj Initial program 22.2%
distribute-rgt1-in22.2%
associate-/l/22.2%
div-sub22.2%
associate-/l*22.2%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in wj around inf 75.2%
Final simplification80.4%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 72.9%
distribute-rgt1-in73.2%
associate-/l/73.2%
div-sub72.9%
associate-/l*72.9%
*-inverses76.0%
*-rgt-identity76.0%
Simplified76.0%
Taylor expanded in wj around 0 77.9%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 72.9%
distribute-rgt1-in73.2%
associate-/l/73.2%
div-sub72.9%
associate-/l*72.9%
*-inverses76.0%
*-rgt-identity76.0%
Simplified76.0%
Taylor expanded in wj around inf 5.4%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 72.9%
distribute-rgt1-in73.2%
associate-/l/73.2%
div-sub72.9%
associate-/l*72.9%
*-inverses76.0%
*-rgt-identity76.0%
Simplified76.0%
Taylor expanded in wj around inf 5.9%
Taylor expanded in wj around 0 3.2%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024123
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))