
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ x (exp wj))))
(if (<= wj -2.3e-6)
(+ wj (/ (- x (* wj (exp wj))) (* (exp wj) (+ wj 1.0))))
(if (<= wj 2.6e-6)
(+
x
(-
(* -2.0 (* wj x))
(+ (pow wj 3.0) (* wj (* wj (+ -1.0 (* x -2.5)))))))
(+
(+ wj (/ (- t_0 wj) (+ wj 1.0)))
(fma (- (exp (- (log1p wj)))) (- wj t_0) (/ wj (+ wj 1.0))))))))
double code(double wj, double x) {
double t_0 = x / exp(wj);
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + ((x - (wj * exp(wj))) / (exp(wj) * (wj + 1.0)));
} else if (wj <= 2.6e-6) {
tmp = x + ((-2.0 * (wj * x)) - (pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = (wj + ((t_0 - wj) / (wj + 1.0))) + fma(-exp(-log1p(wj)), (wj - t_0), (wj / (wj + 1.0)));
}
return tmp;
}
function code(wj, x) t_0 = Float64(x / exp(wj)) tmp = 0.0 if (wj <= -2.3e-6) tmp = Float64(wj + Float64(Float64(x - Float64(wj * exp(wj))) / Float64(exp(wj) * Float64(wj + 1.0)))); elseif (wj <= 2.6e-6) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) - Float64((wj ^ 3.0) + Float64(wj * Float64(wj * Float64(-1.0 + Float64(x * -2.5))))))); else tmp = Float64(Float64(wj + Float64(Float64(t_0 - wj) / Float64(wj + 1.0))) + fma(Float64(-exp(Float64(-log1p(wj)))), Float64(wj - t_0), Float64(wj / Float64(wj + 1.0)))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -2.3e-6], N[(wj + N[(N[(x - N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-6], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] - N[(N[Power[wj, 3.0], $MachinePrecision] + N[(wj * N[(wj * N[(-1.0 + N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(wj + N[(N[(t$95$0 - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[((-N[Exp[(-N[Log[1 + wj], $MachinePrecision])], $MachinePrecision]) * N[(wj - t$95$0), $MachinePrecision] + N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}}\\
\mathbf{if}\;wj \leq -2.3 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{x - wj \cdot e^{wj}}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-6}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) - \left({wj}^{3} + wj \cdot \left(wj \cdot \left(-1 + x \cdot -2.5\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\left(wj + \frac{t_0 - wj}{wj + 1}\right) + \mathsf{fma}\left(-e^{-\mathsf{log1p}\left(wj\right)}, wj - t_0, \frac{wj}{wj + 1}\right)\\
\end{array}
\end{array}
if wj < -2.3e-6Initial program 43.5%
distribute-rgt1-in99.0%
*-commutative99.0%
Simplified99.0%
if -2.3e-6 < wj < 2.60000000000000009e-6Initial program 73.7%
distribute-rgt1-in73.7%
associate-/l/73.7%
div-sub73.7%
associate-/l*73.7%
*-inverses73.7%
/-rgt-identity73.7%
Simplified73.7%
Taylor expanded in wj around 0 99.9%
Taylor expanded in x around 0 99.9%
add-cbrt-cube91.2%
pow1/384.0%
pow384.0%
distribute-rgt-out84.0%
metadata-eval84.0%
Applied egg-rr84.0%
unpow1/391.2%
rem-cbrt-cube99.9%
*-commutative99.9%
unpow299.9%
associate-*r*99.9%
Applied egg-rr99.9%
if 2.60000000000000009e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
*-un-lft-identity94.8%
div-inv94.8%
prod-diff94.5%
associate-/r/94.5%
clear-num94.5%
fma-neg94.5%
*-un-lft-identity94.5%
+-commutative94.5%
add-exp-log94.5%
log1p-udef94.3%
rec-exp95.8%
associate-/r/95.8%
clear-num95.8%
Applied egg-rr95.8%
Taylor expanded in x around 0 95.8%
+-commutative95.8%
Simplified95.8%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ wj (+ wj 1.0))))
(if (<= wj -2.3e-6)
(+ wj (/ (- x (* wj (exp wj))) (* (exp wj) (+ wj 1.0))))
(if (<= wj 0.00092)
(+
x
(-
(* -2.0 (* wj x))
(+ (pow wj 3.0) (* wj (* wj (+ -1.0 (* x -2.5)))))))
(/ (- (pow wj 2.0) (* t_0 t_0)) (+ wj t_0))))))
double code(double wj, double x) {
double t_0 = wj / (wj + 1.0);
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + ((x - (wj * exp(wj))) / (exp(wj) * (wj + 1.0)));
} else if (wj <= 0.00092) {
tmp = x + ((-2.0 * (wj * x)) - (pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = (pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0);
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj / (wj + 1.0d0)
if (wj <= (-2.3d-6)) then
tmp = wj + ((x - (wj * exp(wj))) / (exp(wj) * (wj + 1.0d0)))
else if (wj <= 0.00092d0) then
tmp = x + (((-2.0d0) * (wj * x)) - ((wj ** 3.0d0) + (wj * (wj * ((-1.0d0) + (x * (-2.5d0)))))))
else
tmp = ((wj ** 2.0d0) - (t_0 * t_0)) / (wj + t_0)
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj / (wj + 1.0);
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + ((x - (wj * Math.exp(wj))) / (Math.exp(wj) * (wj + 1.0)));
} else if (wj <= 0.00092) {
tmp = x + ((-2.0 * (wj * x)) - (Math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = (Math.pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0);
}
return tmp;
}
def code(wj, x): t_0 = wj / (wj + 1.0) tmp = 0 if wj <= -2.3e-6: tmp = wj + ((x - (wj * math.exp(wj))) / (math.exp(wj) * (wj + 1.0))) elif wj <= 0.00092: tmp = x + ((-2.0 * (wj * x)) - (math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))) else: tmp = (math.pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0) return tmp
function code(wj, x) t_0 = Float64(wj / Float64(wj + 1.0)) tmp = 0.0 if (wj <= -2.3e-6) tmp = Float64(wj + Float64(Float64(x - Float64(wj * exp(wj))) / Float64(exp(wj) * Float64(wj + 1.0)))); elseif (wj <= 0.00092) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) - Float64((wj ^ 3.0) + Float64(wj * Float64(wj * Float64(-1.0 + Float64(x * -2.5))))))); else tmp = Float64(Float64((wj ^ 2.0) - Float64(t_0 * t_0)) / Float64(wj + t_0)); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj / (wj + 1.0); tmp = 0.0; if (wj <= -2.3e-6) tmp = wj + ((x - (wj * exp(wj))) / (exp(wj) * (wj + 1.0))); elseif (wj <= 0.00092) tmp = x + ((-2.0 * (wj * x)) - ((wj ^ 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))); else tmp = ((wj ^ 2.0) - (t_0 * t_0)) / (wj + t_0); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -2.3e-6], N[(wj + N[(N[(x - N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 0.00092], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] - N[(N[Power[wj, 3.0], $MachinePrecision] + N[(wj * N[(wj * N[(-1.0 + N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[Power[wj, 2.0], $MachinePrecision] - N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] / N[(wj + t$95$0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{wj}{wj + 1}\\
\mathbf{if}\;wj \leq -2.3 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{x - wj \cdot e^{wj}}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{elif}\;wj \leq 0.00092:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) - \left({wj}^{3} + wj \cdot \left(wj \cdot \left(-1 + x \cdot -2.5\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{{wj}^{2} - t_0 \cdot t_0}{wj + t_0}\\
\end{array}
\end{array}
if wj < -2.3e-6Initial program 43.5%
distribute-rgt1-in99.0%
*-commutative99.0%
Simplified99.0%
if -2.3e-6 < wj < 9.2000000000000003e-4Initial program 73.7%
distribute-rgt1-in73.7%
associate-/l/73.7%
div-sub73.7%
associate-/l*73.7%
*-inverses73.7%
/-rgt-identity73.7%
Simplified73.7%
Taylor expanded in wj around 0 99.8%
Taylor expanded in x around 0 99.8%
add-cbrt-cube91.1%
pow1/384.0%
pow384.0%
distribute-rgt-out84.0%
metadata-eval84.0%
Applied egg-rr84.0%
unpow1/391.1%
rem-cbrt-cube99.8%
*-commutative99.8%
unpow299.8%
associate-*r*99.8%
Applied egg-rr99.8%
if 9.2000000000000003e-4 < wj Initial program 19.4%
distribute-rgt1-in19.4%
associate-/l/19.4%
div-sub19.4%
associate-/l*19.4%
*-inverses99.4%
/-rgt-identity99.4%
Simplified99.4%
Taylor expanded in x around 0 99.4%
+-commutative99.4%
Simplified99.4%
sub-neg99.4%
flip-+99.7%
unpow299.7%
Applied egg-rr99.7%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ wj (+ wj 1.0))))
(if (<= wj -2.3e-6)
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(if (<= wj 0.0026)
(+
x
(-
(* -2.0 (* wj x))
(+ (pow wj 3.0) (* wj (* wj (+ -1.0 (* x -2.5)))))))
(/ (- (pow wj 2.0) (* t_0 t_0)) (+ wj t_0))))))
double code(double wj, double x) {
double t_0 = wj / (wj + 1.0);
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 0.0026) {
tmp = x + ((-2.0 * (wj * x)) - (pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = (pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0);
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj / (wj + 1.0d0)
if (wj <= (-2.3d-6)) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else if (wj <= 0.0026d0) then
tmp = x + (((-2.0d0) * (wj * x)) - ((wj ** 3.0d0) + (wj * (wj * ((-1.0d0) + (x * (-2.5d0)))))))
else
tmp = ((wj ** 2.0d0) - (t_0 * t_0)) / (wj + t_0)
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj / (wj + 1.0);
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 0.0026) {
tmp = x + ((-2.0 * (wj * x)) - (Math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = (Math.pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0);
}
return tmp;
}
def code(wj, x): t_0 = wj / (wj + 1.0) tmp = 0 if wj <= -2.3e-6: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) elif wj <= 0.0026: tmp = x + ((-2.0 * (wj * x)) - (math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))) else: tmp = (math.pow(wj, 2.0) - (t_0 * t_0)) / (wj + t_0) return tmp
function code(wj, x) t_0 = Float64(wj / Float64(wj + 1.0)) tmp = 0.0 if (wj <= -2.3e-6) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); elseif (wj <= 0.0026) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) - Float64((wj ^ 3.0) + Float64(wj * Float64(wj * Float64(-1.0 + Float64(x * -2.5))))))); else tmp = Float64(Float64((wj ^ 2.0) - Float64(t_0 * t_0)) / Float64(wj + t_0)); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj / (wj + 1.0); tmp = 0.0; if (wj <= -2.3e-6) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); elseif (wj <= 0.0026) tmp = x + ((-2.0 * (wj * x)) - ((wj ^ 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))); else tmp = ((wj ^ 2.0) - (t_0 * t_0)) / (wj + t_0); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -2.3e-6], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 0.0026], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] - N[(N[Power[wj, 3.0], $MachinePrecision] + N[(wj * N[(wj * N[(-1.0 + N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[Power[wj, 2.0], $MachinePrecision] - N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] / N[(wj + t$95$0), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{wj}{wj + 1}\\
\mathbf{if}\;wj \leq -2.3 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{elif}\;wj \leq 0.0026:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) - \left({wj}^{3} + wj \cdot \left(wj \cdot \left(-1 + x \cdot -2.5\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{{wj}^{2} - t_0 \cdot t_0}{wj + t_0}\\
\end{array}
\end{array}
if wj < -2.3e-6Initial program 43.5%
distribute-rgt1-in99.0%
associate-/l/98.9%
div-sub43.3%
associate-/l*43.3%
*-inverses98.9%
/-rgt-identity98.9%
Simplified98.9%
if -2.3e-6 < wj < 0.0025999999999999999Initial program 73.7%
distribute-rgt1-in73.7%
associate-/l/73.7%
div-sub73.7%
associate-/l*73.7%
*-inverses73.7%
/-rgt-identity73.7%
Simplified73.7%
Taylor expanded in wj around 0 99.8%
Taylor expanded in x around 0 99.8%
add-cbrt-cube91.1%
pow1/384.0%
pow384.0%
distribute-rgt-out84.0%
metadata-eval84.0%
Applied egg-rr84.0%
unpow1/391.1%
rem-cbrt-cube99.8%
*-commutative99.8%
unpow299.8%
associate-*r*99.8%
Applied egg-rr99.8%
if 0.0025999999999999999 < wj Initial program 19.4%
distribute-rgt1-in19.4%
associate-/l/19.4%
div-sub19.4%
associate-/l*19.4%
*-inverses99.4%
/-rgt-identity99.4%
Simplified99.4%
Taylor expanded in x around 0 99.4%
+-commutative99.4%
Simplified99.4%
sub-neg99.4%
flip-+99.7%
unpow299.7%
Applied egg-rr99.7%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -4.2e-8)
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(if (<= wj 2.6e-6)
(+
x
(+ (* -2.0 (* wj x)) (* (pow wj 2.0) (- 1.0 (+ (* x -4.0) (* x 1.5))))))
(- wj (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-8) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + ((-2.0 * (wj * x)) + (pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-4.2d-8)) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else if (wj <= 2.6d-6) then
tmp = x + (((-2.0d0) * (wj * x)) + ((wj ** 2.0d0) * (1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0)))))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-8) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + ((-2.0 * (wj * x)) + (Math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5)))));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -4.2e-8: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) elif wj <= 2.6e-6: tmp = x + ((-2.0 * (wj * x)) + (math.pow(wj, 2.0) * (1.0 - ((x * -4.0) + (x * 1.5))))) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -4.2e-8) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); elseif (wj <= 2.6e-6) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + Float64((wj ^ 2.0) * Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5)))))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -4.2e-8) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); elseif (wj <= 2.6e-6) tmp = x + ((-2.0 * (wj * x)) + ((wj ^ 2.0) * (1.0 - ((x * -4.0) + (x * 1.5))))); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -4.2e-8], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-6], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-8}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-6}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2} \cdot \left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -4.19999999999999989e-8Initial program 46.4%
distribute-rgt1-in96.4%
associate-/l/96.2%
div-sub46.2%
associate-/l*46.2%
*-inverses96.2%
/-rgt-identity96.2%
Simplified96.2%
if -4.19999999999999989e-8 < wj < 2.60000000000000009e-6Initial program 73.7%
distribute-rgt1-in73.7%
associate-/l/73.7%
div-sub73.7%
associate-/l*73.7%
*-inverses73.7%
/-rgt-identity73.7%
Simplified73.7%
Taylor expanded in wj around 0 99.8%
if 2.60000000000000009e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification99.6%
(FPCore (wj x)
:precision binary64
(if (<= wj -2.3e-6)
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(if (<= wj 0.0052)
(+
x
(- (* -2.0 (* wj x)) (+ (pow wj 3.0) (* wj (* wj (+ -1.0 (* x -2.5)))))))
(- wj (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 0.0052) {
tmp = x + ((-2.0 * (wj * x)) - (pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-2.3d-6)) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else if (wj <= 0.0052d0) then
tmp = x + (((-2.0d0) * (wj * x)) - ((wj ** 3.0d0) + (wj * (wj * ((-1.0d0) + (x * (-2.5d0)))))))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -2.3e-6) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 0.0052) {
tmp = x + ((-2.0 * (wj * x)) - (Math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5))))));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -2.3e-6: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) elif wj <= 0.0052: tmp = x + ((-2.0 * (wj * x)) - (math.pow(wj, 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -2.3e-6) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); elseif (wj <= 0.0052) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) - Float64((wj ^ 3.0) + Float64(wj * Float64(wj * Float64(-1.0 + Float64(x * -2.5))))))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -2.3e-6) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); elseif (wj <= 0.0052) tmp = x + ((-2.0 * (wj * x)) - ((wj ^ 3.0) + (wj * (wj * (-1.0 + (x * -2.5)))))); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -2.3e-6], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 0.0052], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] - N[(N[Power[wj, 3.0], $MachinePrecision] + N[(wj * N[(wj * N[(-1.0 + N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -2.3 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{elif}\;wj \leq 0.0052:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) - \left({wj}^{3} + wj \cdot \left(wj \cdot \left(-1 + x \cdot -2.5\right)\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -2.3e-6Initial program 43.5%
distribute-rgt1-in99.0%
associate-/l/98.9%
div-sub43.3%
associate-/l*43.3%
*-inverses98.9%
/-rgt-identity98.9%
Simplified98.9%
if -2.3e-6 < wj < 0.0051999999999999998Initial program 73.7%
distribute-rgt1-in73.7%
associate-/l/73.7%
div-sub73.7%
associate-/l*73.7%
*-inverses73.7%
/-rgt-identity73.7%
Simplified73.7%
Taylor expanded in wj around 0 99.8%
Taylor expanded in x around 0 99.8%
add-cbrt-cube91.1%
pow1/384.0%
pow384.0%
distribute-rgt-out84.0%
metadata-eval84.0%
Applied egg-rr84.0%
unpow1/391.1%
rem-cbrt-cube99.8%
*-commutative99.8%
unpow299.8%
associate-*r*99.8%
Applied egg-rr99.8%
if 0.0051999999999999998 < wj Initial program 19.4%
distribute-rgt1-in19.4%
associate-/l/19.4%
div-sub19.4%
associate-/l*19.4%
*-inverses99.4%
/-rgt-identity99.4%
Simplified99.4%
Taylor expanded in x around 0 99.4%
+-commutative99.4%
Simplified99.4%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.0037)
(/ x (* (exp wj) (+ wj 1.0)))
(if (<= wj 2.55e-6)
(+ x (+ (* -2.0 (* wj x)) (pow wj 2.0)))
(- wj (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -0.0037) {
tmp = x / (exp(wj) * (wj + 1.0));
} else if (wj <= 2.55e-6) {
tmp = x + ((-2.0 * (wj * x)) + pow(wj, 2.0));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-0.0037d0)) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else if (wj <= 2.55d-6) then
tmp = x + (((-2.0d0) * (wj * x)) + (wj ** 2.0d0))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -0.0037) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else if (wj <= 2.55e-6) {
tmp = x + ((-2.0 * (wj * x)) + Math.pow(wj, 2.0));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -0.0037: tmp = x / (math.exp(wj) * (wj + 1.0)) elif wj <= 2.55e-6: tmp = x + ((-2.0 * (wj * x)) + math.pow(wj, 2.0)) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -0.0037) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); elseif (wj <= 2.55e-6) tmp = Float64(x + Float64(Float64(-2.0 * Float64(wj * x)) + (wj ^ 2.0))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -0.0037) tmp = x / (exp(wj) * (wj + 1.0)); elseif (wj <= 2.55e-6) tmp = x + ((-2.0 * (wj * x)) + (wj ^ 2.0)); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -0.0037], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.55e-6], N[(x + N[(N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision] + N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.0037:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{elif}\;wj \leq 2.55 \cdot 10^{-6}:\\
\;\;\;\;x + \left(-2 \cdot \left(wj \cdot x\right) + {wj}^{2}\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -0.0037000000000000002Initial program 37.5%
distribute-rgt1-in100.0%
associate-/l/99.8%
div-sub37.3%
associate-/l*37.3%
*-inverses99.8%
/-rgt-identity99.8%
Simplified99.8%
Taylor expanded in x around inf 100.0%
+-commutative100.0%
Simplified100.0%
if -0.0037000000000000002 < wj < 2.5500000000000001e-6Initial program 73.8%
distribute-rgt1-in73.8%
associate-/l/73.8%
div-sub73.8%
associate-/l*73.8%
*-inverses73.8%
/-rgt-identity73.8%
Simplified73.8%
Taylor expanded in wj around 0 99.3%
Taylor expanded in x around 0 99.3%
if 2.5500000000000001e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification99.2%
(FPCore (wj x)
:precision binary64
(if (<= wj -5.4e-9)
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(if (<= wj 2.6e-6)
(+ x (* wj (+ wj (* x -2.0))))
(- wj (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -5.4e-9) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-5.4d-9)) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else if (wj <= 2.6d-6) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -5.4e-9) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -5.4e-9: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) elif wj <= 2.6e-6: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -5.4e-9) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); elseif (wj <= 2.6e-6) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -5.4e-9) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); elseif (wj <= 2.6e-6) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -5.4e-9], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-6], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -5.4 \cdot 10^{-9}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-6}:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -5.4000000000000004e-9Initial program 55.3%
distribute-rgt1-in97.0%
associate-/l/96.9%
div-sub55.2%
associate-/l*55.2%
*-inverses96.9%
/-rgt-identity96.9%
Simplified96.9%
if -5.4000000000000004e-9 < wj < 2.60000000000000009e-6Initial program 73.5%
distribute-rgt1-in73.5%
associate-/l/73.5%
div-sub73.5%
associate-/l*73.5%
*-inverses73.5%
/-rgt-identity73.5%
Simplified73.5%
Taylor expanded in wj around 0 100.0%
Taylor expanded in x around 0 100.0%
Taylor expanded in x around 0 100.0%
Taylor expanded in wj around 0 99.8%
+-commutative99.8%
unpow299.8%
*-commutative99.8%
associate-*r*99.8%
distribute-lft-out99.8%
Simplified99.8%
if 2.60000000000000009e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification99.6%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.00375)
(/ x (* (exp wj) (+ wj 1.0)))
(if (<= wj 2.6e-6)
(+ x (* wj (+ wj (* x -2.0))))
(- wj (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -0.00375) {
tmp = x / (exp(wj) * (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-0.00375d0)) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else if (wj <= 2.6d-6) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -0.00375) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -0.00375: tmp = x / (math.exp(wj) * (wj + 1.0)) elif wj <= 2.6e-6: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -0.00375) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); elseif (wj <= 2.6e-6) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -0.00375) tmp = x / (exp(wj) * (wj + 1.0)); elseif (wj <= 2.6e-6) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -0.00375], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-6], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.00375:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-6}:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -0.0037499999999999999Initial program 37.5%
distribute-rgt1-in100.0%
associate-/l/99.8%
div-sub37.3%
associate-/l*37.3%
*-inverses99.8%
/-rgt-identity99.8%
Simplified99.8%
Taylor expanded in x around inf 100.0%
+-commutative100.0%
Simplified100.0%
if -0.0037499999999999999 < wj < 2.60000000000000009e-6Initial program 73.8%
distribute-rgt1-in73.8%
associate-/l/73.8%
div-sub73.8%
associate-/l*73.8%
*-inverses73.8%
/-rgt-identity73.8%
Simplified73.8%
Taylor expanded in wj around 0 99.7%
Taylor expanded in x around 0 99.7%
Taylor expanded in x around 0 99.7%
Taylor expanded in wj around 0 99.3%
+-commutative99.3%
unpow299.3%
*-commutative99.3%
associate-*r*99.3%
distribute-lft-out99.3%
Simplified99.3%
if 2.60000000000000009e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification99.2%
(FPCore (wj x) :precision binary64 (if (<= wj 2.6e-6) (+ x (* wj (+ wj (* x -2.0)))) (- wj (/ wj (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 2.6d-6) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 2.6e-6) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 2.6e-6: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 2.6e-6) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 2.6e-6) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 2.6e-6], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 2.6 \cdot 10^{-6}:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 2.60000000000000009e-6Initial program 72.6%
distribute-rgt1-in74.6%
associate-/l/74.6%
div-sub72.6%
associate-/l*72.6%
*-inverses74.6%
/-rgt-identity74.6%
Simplified74.6%
Taylor expanded in wj around 0 96.7%
Taylor expanded in x around 0 96.7%
Taylor expanded in x around 0 96.6%
Taylor expanded in wj around 0 96.2%
+-commutative96.2%
unpow296.2%
*-commutative96.2%
associate-*r*96.2%
distribute-lft-out96.2%
Simplified96.2%
if 2.60000000000000009e-6 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (if (<= wj 3.4e-7) (+ x (* -2.0 (* wj x))) (- wj (/ wj (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 3.4e-7) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 3.4d-7) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj - (wj / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 3.4e-7) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (wj / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 3.4e-7: tmp = x + (-2.0 * (wj * x)) else: tmp = wj - (wj / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 3.4e-7) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj - Float64(wj / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 3.4e-7) tmp = x + (-2.0 * (wj * x)); else tmp = wj - (wj / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 3.4e-7], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3.4 \cdot 10^{-7}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 3.39999999999999974e-7Initial program 72.6%
distribute-rgt1-in74.6%
associate-/l/74.6%
div-sub72.6%
associate-/l*72.6%
*-inverses74.6%
/-rgt-identity74.6%
Simplified74.6%
Taylor expanded in wj around 0 85.7%
*-commutative85.7%
Simplified85.7%
if 3.39999999999999974e-7 < wj Initial program 28.1%
distribute-rgt1-in28.1%
associate-/l/28.1%
div-sub28.1%
associate-/l*28.1%
*-inverses94.8%
/-rgt-identity94.8%
Simplified94.8%
Taylor expanded in x around 0 94.8%
+-commutative94.8%
Simplified94.8%
Final simplification85.9%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 71.6%
distribute-rgt1-in73.6%
associate-/l/73.5%
div-sub71.6%
associate-/l*71.6%
*-inverses75.1%
/-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 83.8%
*-commutative83.8%
Simplified83.8%
Final simplification83.8%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 71.6%
distribute-rgt1-in73.6%
associate-/l/73.5%
div-sub71.6%
associate-/l*71.6%
*-inverses75.1%
/-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around inf 4.7%
Final simplification4.7%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 71.6%
distribute-rgt1-in73.6%
associate-/l/73.5%
div-sub71.6%
associate-/l*71.6%
*-inverses75.1%
/-rgt-identity75.1%
Simplified75.1%
Taylor expanded in wj around 0 83.1%
Final simplification83.1%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024020
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))