
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))) (t_1 (+ (* x -4.0) (* x 1.5))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 2e-18)
(+
x
(+
(* -2.0 (* wj x))
(+
(*
(pow wj 3.0)
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_1) (* x 0.6666666666666666)))))
(* (pow wj 2.0) (- 1.0 t_1)))))
(- wj (/ (- wj (/ x (exp wj))) (/ (fma wj wj -1.0) (+ wj -1.0)))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2e-18) {
tmp = x + ((-2.0 * (wj * x)) + ((pow(wj, 3.0) * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666))))) + (pow(wj, 2.0) * (1.0 - t_1))));
} else {
tmp = wj - ((wj - (x / exp(wj))) / (fma(wj, wj, -1.0) / (wj + -1.0)));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 2e-18) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + Float64(Float64((wj ^ 3.0) * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_1) + Float64(x * 0.6666666666666666))))) + Float64((wj ^ 2.0) * Float64(1.0 - t_1))))); else tmp = Float64(wj - Float64(Float64(wj - Float64(x / exp(wj))) / Float64(fma(wj, wj, -1.0) / Float64(wj + -1.0)))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-18], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[(N[(N[Power[wj, 3.0], $MachinePrecision] * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$1), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(1.0 - t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(wj * wj + -1.0), $MachinePrecision] / N[(wj + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj + \frac{x - t_0}{e^{wj} + t_0} \leq 2 \cdot 10^{-18}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + \left({wj}^{3} \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t_1 + x \cdot 0.6666666666666666\right)\right)\right) + {wj}^{2} \cdot \left(1 - t_1\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - \frac{x}{e^{wj}}}{\frac{\mathsf{fma}\left(wj, wj, -1\right)}{wj + -1}}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 2.0000000000000001e-18Initial program 76.1%
div-sub76.1%
distribute-rgt1-in76.1%
times-frac76.1%
*-inverses76.1%
associate-*l/76.1%
*-rgt-identity76.1%
distribute-rgt1-in76.1%
associate-/l/76.1%
div-sub76.1%
Simplified76.1%
Taylor expanded in wj around 0 99.3%
if 2.0000000000000001e-18 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 92.3%
div-sub92.3%
distribute-rgt1-in92.4%
times-frac92.2%
*-inverses98.1%
associate-*l/98.1%
*-rgt-identity98.1%
distribute-rgt1-in99.3%
associate-/l/99.3%
div-sub99.3%
Simplified99.3%
flip-+99.3%
associate-/r/99.4%
metadata-eval99.4%
fma-neg99.4%
metadata-eval99.4%
sub-neg99.4%
metadata-eval99.4%
Applied egg-rr99.4%
associate-*l/99.4%
associate-/l*99.3%
Simplified99.3%
Final simplification99.3%
(FPCore (wj x)
:precision binary64
(if (<= wj 3.7e-6)
(+
x
(+
(* -2.0 (* wj x))
(- (* (pow wj 2.0) (- 1.0 (+ (* x -4.0) (* x 1.5)))) (pow wj 3.0))))
(+ wj (/ (- (* x (exp (- wj))) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 3.7e-6) {
tmp = x + ((-2.0 * (wj * x)) + ((pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))) - pow(wj, 3.0)));
} else {
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 3.7d-6) then
tmp = x + (((-2.0d0) * (wj * x)) + (((wj ** 2.0d0) * (1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0)))) - (wj ** 3.0d0)))
else
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 3.7e-6) {
tmp = x + ((-2.0 * (wj * x)) + ((Math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))) - Math.pow(wj, 3.0)));
} else {
tmp = wj + (((x * Math.exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 3.7e-6: tmp = x + ((-2.0 * (wj * x)) + ((math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))) - math.pow(wj, 3.0))) else: tmp = wj + (((x * math.exp(-wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 3.7e-6) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + Float64(Float64((wj ^ 2.0) * Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - (wj ^ 3.0)))); else tmp = Float64(wj + Float64(Float64(Float64(x * exp(Float64(-wj))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 3.7e-6) tmp = x + ((-2.0 * (wj * x)) + (((wj ^ 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))) - (wj ^ 3.0))); else tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 3.7e-6], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[(N[(N[Power[wj, 2.0], $MachinePrecision] * N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Power[wj, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3.7 \cdot 10^{-6}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + \left({wj}^{2} \cdot \left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) - {wj}^{3}\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{x \cdot e^{-wj} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 3.7000000000000002e-6Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in wj around 0 99.2%
Taylor expanded in x around 0 99.0%
if 3.7000000000000002e-6 < wj Initial program 55.9%
div-sub55.9%
distribute-rgt1-in56.1%
times-frac55.4%
*-inverses97.1%
associate-*l/97.1%
*-rgt-identity97.1%
distribute-rgt1-in96.7%
associate-/l/96.7%
div-sub97.0%
Simplified97.0%
clear-num97.1%
associate-/r/97.1%
rec-exp97.2%
Applied egg-rr97.2%
Final simplification98.9%
(FPCore (wj x)
:precision binary64
(if (<= wj 5.2e-9)
(+
x
(+ (* -2.0 (* wj x)) (* (pow wj 2.0) (- 1.0 (+ (* x -4.0) (* x 1.5))))))
(+ wj (/ (- (* x (exp (- wj))) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + (pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))));
} else {
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 5.2d-9) then
tmp = x + (((-2.0d0) * (wj * x)) + ((wj ** 2.0d0) * (1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0)))))
else
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + (Math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))));
} else {
tmp = wj + (((x * Math.exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 5.2e-9: tmp = x + ((-2.0 * (wj * x)) + (math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5))))) else: tmp = wj + (((x * math.exp(-wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 5.2e-9) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + Float64((wj ^ 2.0) * Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5)))))); else tmp = Float64(wj + Float64(Float64(Float64(x * exp(Float64(-wj))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 5.2e-9) tmp = x + ((-2.0 * (wj * x)) + ((wj ^ 2.0) * (1.0 - ((x * -4.0) + (x * 1.5))))); else tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 5.2e-9], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 5.2 \cdot 10^{-9}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2} \cdot \left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{x \cdot e^{-wj} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 5.2000000000000002e-9Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in wj around 0 98.7%
if 5.2000000000000002e-9 < wj Initial program 55.9%
div-sub55.9%
distribute-rgt1-in56.1%
times-frac55.4%
*-inverses97.1%
associate-*l/97.1%
*-rgt-identity97.1%
distribute-rgt1-in96.7%
associate-/l/96.7%
div-sub97.0%
Simplified97.0%
clear-num97.1%
associate-/r/97.1%
rec-exp97.2%
Applied egg-rr97.2%
Final simplification98.6%
(FPCore (wj x) :precision binary64 (if (<= wj 5.1e-9) (+ x (+ (* -2.0 (* wj x)) (pow wj 2.0))) (+ wj (/ (- (* x (exp (- wj))) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 5.1e-9) {
tmp = x + ((-2.0 * (wj * x)) + pow(wj, 2.0));
} else {
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 5.1d-9) then
tmp = x + (((-2.0d0) * (wj * x)) + (wj ** 2.0d0))
else
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 5.1e-9) {
tmp = x + ((-2.0 * (wj * x)) + Math.pow(wj, 2.0));
} else {
tmp = wj + (((x * Math.exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 5.1e-9: tmp = x + ((-2.0 * (wj * x)) + math.pow(wj, 2.0)) else: tmp = wj + (((x * math.exp(-wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 5.1e-9) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + (wj ^ 2.0))); else tmp = Float64(wj + Float64(Float64(Float64(x * exp(Float64(-wj))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 5.1e-9) tmp = x + ((-2.0 * (wj * x)) + (wj ^ 2.0)); else tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 5.1e-9], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 5.1 \cdot 10^{-9}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2}\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{x \cdot e^{-wj} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 5.10000000000000017e-9Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.5%
if 5.10000000000000017e-9 < wj Initial program 55.9%
div-sub55.9%
distribute-rgt1-in56.1%
times-frac55.4%
*-inverses97.1%
associate-*l/97.1%
*-rgt-identity97.1%
distribute-rgt1-in96.7%
associate-/l/96.7%
div-sub97.0%
Simplified97.0%
clear-num97.1%
associate-/r/97.1%
rec-exp97.2%
Applied egg-rr97.2%
Final simplification98.4%
(FPCore (wj x) :precision binary64 (if (<= wj 5.2e-9) (+ x (+ (* -2.0 (* wj x)) (pow wj 2.0))) (+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + pow(wj, 2.0));
} else {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 5.2d-9) then
tmp = x + (((-2.0d0) * (wj * x)) + (wj ** 2.0d0))
else
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + Math.pow(wj, 2.0));
} else {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 5.2e-9: tmp = x + ((-2.0 * (wj * x)) + math.pow(wj, 2.0)) else: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 5.2e-9) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + (wj ^ 2.0))); else tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 5.2e-9) tmp = x + ((-2.0 * (wj * x)) + (wj ^ 2.0)); else tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 5.2e-9], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 5.2 \cdot 10^{-9}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2}\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 5.2000000000000002e-9Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.5%
if 5.2000000000000002e-9 < wj Initial program 55.9%
div-sub55.9%
distribute-rgt1-in56.1%
times-frac55.4%
*-inverses97.1%
associate-*l/97.1%
*-rgt-identity97.1%
distribute-rgt1-in96.7%
associate-/l/96.7%
div-sub97.0%
Simplified97.0%
Final simplification98.4%
(FPCore (wj x) :precision binary64 (if (<= wj 5.2e-9) (+ x (+ (* -2.0 (* wj x)) (pow wj 2.0))) (+ wj (/ (- (- x (* wj x)) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + pow(wj, 2.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 5.2d-9) then
tmp = x + (((-2.0d0) * (wj * x)) + (wj ** 2.0d0))
else
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 5.2e-9) {
tmp = x + ((-2.0 * (wj * x)) + Math.pow(wj, 2.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 5.2e-9: tmp = x + ((-2.0 * (wj * x)) + math.pow(wj, 2.0)) else: tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 5.2e-9) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + (wj ^ 2.0))); else tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * x)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 5.2e-9) tmp = x + ((-2.0 * (wj * x)) + (wj ^ 2.0)); else tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 5.2e-9], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x - N[(wj * x), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 5.2 \cdot 10^{-9}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2}\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot x\right) - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 5.2000000000000002e-9Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.5%
if 5.2000000000000002e-9 < wj Initial program 55.9%
div-sub55.9%
distribute-rgt1-in56.1%
times-frac55.4%
*-inverses97.1%
associate-*l/97.1%
*-rgt-identity97.1%
distribute-rgt1-in96.7%
associate-/l/96.7%
div-sub97.0%
Simplified97.0%
Taylor expanded in wj around 0 81.0%
mul-1-neg81.0%
unsub-neg81.0%
*-commutative81.0%
Simplified81.0%
Final simplification97.6%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00145) (/ x (* (exp wj) (+ wj 1.0))) (+ wj (/ (- (- x (* wj x)) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00145) {
tmp = x / (exp(wj) * (wj + 1.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00145d0) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00145) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00145: tmp = x / (math.exp(wj) * (wj + 1.0)) else: tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00145) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); else tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * x)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00145) tmp = x / (exp(wj) * (wj + 1.0)); else tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00145], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x - N[(wj * x), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00145:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot x\right) - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 0.00145Initial program 83.0%
div-sub83.0%
distribute-rgt1-in83.0%
times-frac83.0%
*-inverses83.0%
associate-*l/83.0%
*-rgt-identity83.0%
distribute-rgt1-in83.3%
associate-/l/83.3%
div-sub83.3%
Simplified83.3%
Taylor expanded in x around inf 91.0%
+-commutative91.0%
Simplified91.0%
if 0.00145 < wj Initial program 41.6%
div-sub41.6%
distribute-rgt1-in41.9%
times-frac40.9%
*-inverses96.4%
associate-*l/96.4%
*-rgt-identity96.4%
distribute-rgt1-in96.3%
associate-/l/96.3%
div-sub96.6%
Simplified96.6%
Taylor expanded in wj around 0 90.6%
mul-1-neg90.6%
unsub-neg90.6%
*-commutative90.6%
Simplified90.6%
Final simplification91.0%
(FPCore (wj x) :precision binary64 (if (<= wj 6.1e-34) (/ x (+ 1.0 (* wj 2.0))) (+ wj (/ (- (- x (* wj x)) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 6.1e-34) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 6.1d-34) then
tmp = x / (1.0d0 + (wj * 2.0d0))
else
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 6.1e-34) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 6.1e-34: tmp = x / (1.0 + (wj * 2.0)) else: tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 6.1e-34) tmp = Float64(x / Float64(1.0 + Float64(wj * 2.0))); else tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * x)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 6.1e-34) tmp = x / (1.0 + (wj * 2.0)); else tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 6.1e-34], N[(x / N[(1.0 + N[(wj * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x - N[(wj * x), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 6.1 \cdot 10^{-34}:\\
\;\;\;\;\frac{x}{1 + wj \cdot 2}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot x\right) - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 6.0999999999999998e-34Initial program 82.8%
div-sub82.8%
distribute-rgt1-in82.8%
times-frac82.8%
*-inverses82.8%
associate-*l/82.8%
*-rgt-identity82.8%
distribute-rgt1-in83.2%
associate-/l/83.2%
div-sub83.2%
Simplified83.2%
Taylor expanded in x around inf 91.8%
+-commutative91.8%
Simplified91.8%
Taylor expanded in wj around 0 90.7%
*-commutative90.7%
Simplified90.7%
if 6.0999999999999998e-34 < wj Initial program 67.5%
div-sub67.5%
distribute-rgt1-in67.6%
times-frac67.2%
*-inverses89.9%
associate-*l/89.9%
*-rgt-identity89.9%
distribute-rgt1-in89.7%
associate-/l/89.7%
div-sub89.8%
Simplified89.8%
Taylor expanded in wj around 0 81.1%
mul-1-neg81.1%
unsub-neg81.1%
*-commutative81.1%
Simplified81.1%
Final simplification89.9%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00175) (/ x (+ 1.0 (* wj 2.0))) (+ wj (/ -1.0 (+ 1.0 (/ 1.0 wj))))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00175) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (-1.0 / (1.0 + (1.0 / wj)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00175d0) then
tmp = x / (1.0d0 + (wj * 2.0d0))
else
tmp = wj + ((-1.0d0) / (1.0d0 + (1.0d0 / wj)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00175) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj + (-1.0 / (1.0 + (1.0 / wj)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00175: tmp = x / (1.0 + (wj * 2.0)) else: tmp = wj + (-1.0 / (1.0 + (1.0 / wj))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00175) tmp = Float64(x / Float64(1.0 + Float64(wj * 2.0))); else tmp = Float64(wj + Float64(-1.0 / Float64(1.0 + Float64(1.0 / wj)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00175) tmp = x / (1.0 + (wj * 2.0)); else tmp = wj + (-1.0 / (1.0 + (1.0 / wj))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00175], N[(x / N[(1.0 + N[(wj * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(-1.0 / N[(1.0 + N[(1.0 / wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00175:\\
\;\;\;\;\frac{x}{1 + wj \cdot 2}\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{-1}{1 + \frac{1}{wj}}\\
\end{array}
\end{array}
if wj < 0.00175000000000000004Initial program 83.0%
div-sub83.0%
distribute-rgt1-in83.0%
times-frac83.0%
*-inverses83.0%
associate-*l/83.0%
*-rgt-identity83.0%
distribute-rgt1-in83.3%
associate-/l/83.3%
div-sub83.3%
Simplified83.3%
Taylor expanded in x around inf 91.0%
+-commutative91.0%
Simplified91.0%
Taylor expanded in wj around 0 89.4%
*-commutative89.4%
Simplified89.4%
if 0.00175000000000000004 < wj Initial program 41.6%
div-sub41.6%
distribute-rgt1-in41.9%
times-frac40.9%
*-inverses96.4%
associate-*l/96.4%
*-rgt-identity96.4%
distribute-rgt1-in96.3%
associate-/l/96.3%
div-sub96.6%
Simplified96.6%
flip-+96.3%
associate-/r/97.0%
metadata-eval97.0%
fma-neg97.0%
metadata-eval97.0%
sub-neg97.0%
metadata-eval97.0%
Applied egg-rr97.0%
associate-*l/97.4%
associate-/l*96.3%
Simplified96.3%
Taylor expanded in x around 0 84.7%
sub-neg84.7%
metadata-eval84.7%
unpow284.7%
fma-neg84.7%
metadata-eval84.7%
Simplified84.7%
associate-/l*83.9%
metadata-eval83.9%
fma-neg83.9%
metadata-eval83.9%
*-un-lft-identity83.9%
fma-def83.9%
metadata-eval83.9%
fma-neg83.9%
*-un-lft-identity83.9%
flip-+84.1%
clear-num84.1%
inv-pow84.1%
+-commutative84.1%
Applied egg-rr84.1%
unpow-184.1%
*-lft-identity84.1%
associate-*l/84.2%
+-commutative84.2%
distribute-lft-in84.8%
lft-mult-inverse84.8%
*-rgt-identity84.8%
Simplified84.8%
Final simplification89.2%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00146) (+ x (* -2.0 (* wj x))) (- wj (/ wj (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00146) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00146d0) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00146) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00146: tmp = x + (-2.0 * (wj * x)) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00146) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00146) tmp = x + (-2.0 * (wj * x)); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00146], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00146:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 0.0014599999999999999Initial program 83.0%
div-sub83.0%
distribute-rgt1-in83.0%
times-frac83.0%
*-inverses83.0%
associate-*l/83.0%
*-rgt-identity83.0%
distribute-rgt1-in83.3%
associate-/l/83.3%
div-sub83.3%
Simplified83.3%
Taylor expanded in wj around 0 89.4%
*-commutative89.4%
Simplified89.4%
if 0.0014599999999999999 < wj Initial program 41.6%
div-sub41.6%
distribute-rgt1-in41.9%
times-frac40.9%
*-inverses96.4%
associate-*l/96.4%
*-rgt-identity96.4%
distribute-rgt1-in96.3%
associate-/l/96.3%
div-sub96.6%
Simplified96.6%
Taylor expanded in x around 0 84.1%
+-commutative84.1%
Simplified84.1%
Final simplification89.2%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00145) (/ x (+ 1.0 (* wj 2.0))) (- wj (/ wj (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00145) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00145d0) then
tmp = x / (1.0d0 + (wj * 2.0d0))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00145) {
tmp = x / (1.0 + (wj * 2.0));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00145: tmp = x / (1.0 + (wj * 2.0)) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00145) tmp = Float64(x / Float64(1.0 + Float64(wj * 2.0))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00145) tmp = x / (1.0 + (wj * 2.0)); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00145], N[(x / N[(1.0 + N[(wj * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00145:\\
\;\;\;\;\frac{x}{1 + wj \cdot 2}\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 0.00145Initial program 83.0%
div-sub83.0%
distribute-rgt1-in83.0%
times-frac83.0%
*-inverses83.0%
associate-*l/83.0%
*-rgt-identity83.0%
distribute-rgt1-in83.3%
associate-/l/83.3%
div-sub83.3%
Simplified83.3%
Taylor expanded in x around inf 91.0%
+-commutative91.0%
Simplified91.0%
Taylor expanded in wj around 0 89.4%
*-commutative89.4%
Simplified89.4%
if 0.00145 < wj Initial program 41.6%
div-sub41.6%
distribute-rgt1-in41.9%
times-frac40.9%
*-inverses96.4%
associate-*l/96.4%
*-rgt-identity96.4%
distribute-rgt1-in96.3%
associate-/l/96.3%
div-sub96.6%
Simplified96.6%
Taylor expanded in x around 0 84.1%
+-commutative84.1%
Simplified84.1%
Final simplification89.2%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 81.5%
div-sub81.5%
distribute-rgt1-in81.5%
times-frac81.5%
*-inverses83.4%
associate-*l/83.4%
*-rgt-identity83.4%
distribute-rgt1-in83.8%
associate-/l/83.8%
div-sub83.8%
Simplified83.8%
Taylor expanded in wj around 0 86.5%
*-commutative86.5%
Simplified86.5%
Final simplification86.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 81.5%
div-sub81.5%
distribute-rgt1-in81.5%
times-frac81.5%
*-inverses83.4%
associate-*l/83.4%
*-rgt-identity83.4%
distribute-rgt1-in83.8%
associate-/l/83.8%
div-sub83.8%
Simplified83.8%
Taylor expanded in wj around inf 4.8%
Final simplification4.8%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 81.5%
div-sub81.5%
distribute-rgt1-in81.5%
times-frac81.5%
*-inverses83.4%
associate-*l/83.4%
*-rgt-identity83.4%
distribute-rgt1-in83.8%
associate-/l/83.8%
div-sub83.8%
Simplified83.8%
Taylor expanded in wj around 0 85.8%
Final simplification85.8%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2023333
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))