
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))) (t_1 (+ (* x -4.0) (* x 1.5))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 5e-19)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_1) (* x 0.6666666666666666))))))
t_1))
(* x 2.0))))
(*
x
(-
(/ wj (* x (- -1.0 wj)))
(- (/ (exp (- wj)) (- -1.0 wj)) (/ wj x)))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 5e-19) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = x * ((wj / (x * (-1.0 - wj))) - ((exp(-wj) / (-1.0 - wj)) - (wj / x)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = (x * (-4.0d0)) + (x * 1.5d0)
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 5d-19) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_1) + (x * 0.6666666666666666d0)))))) - t_1)) - (x * 2.0d0)))
else
tmp = x * ((wj / (x * ((-1.0d0) - wj))) - ((exp(-wj) / ((-1.0d0) - wj)) - (wj / x)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (Math.exp(wj) + t_0))) <= 5e-19) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = x * ((wj / (x * (-1.0 - wj))) - ((Math.exp(-wj) / (-1.0 - wj)) - (wj / x)));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = (x * -4.0) + (x * 1.5) tmp = 0 if (wj + ((x - t_0) / (math.exp(wj) + t_0))) <= 5e-19: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))) else: tmp = x * ((wj / (x * (-1.0 - wj))) - ((math.exp(-wj) / (-1.0 - wj)) - (wj / x))) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 5e-19) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_1) + Float64(x * 0.6666666666666666)))))) - t_1)) - Float64(x * 2.0)))); else tmp = Float64(x * Float64(Float64(wj / Float64(x * Float64(-1.0 - wj))) - Float64(Float64(exp(Float64(-wj)) / Float64(-1.0 - wj)) - Float64(wj / x)))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = (x * -4.0) + (x * 1.5); tmp = 0.0; if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 5e-19) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))); else tmp = x * ((wj / (x * (-1.0 - wj))) - ((exp(-wj) / (-1.0 - wj)) - (wj / x))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e-19], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$1), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$1), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(wj / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 5 \cdot 10^{-19}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_1 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_1\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{wj}{x \cdot \left(-1 - wj\right)} - \left(\frac{e^{-wj}}{-1 - wj} - \frac{wj}{x}\right)\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.0000000000000004e-19Initial program 69.7%
distribute-rgt1-in69.7%
associate-/l/69.7%
div-sub69.7%
associate-/l*69.7%
*-inverses69.7%
*-rgt-identity69.7%
Simplified69.7%
Taylor expanded in wj around 0 98.9%
if 5.0000000000000004e-19 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 92.8%
distribute-rgt1-in94.2%
associate-/l/94.3%
div-sub93.0%
associate-/l*93.0%
*-inverses99.5%
*-rgt-identity99.5%
Simplified99.5%
Taylor expanded in x around inf 99.5%
+-commutative99.5%
associate-/r*99.5%
exp-neg99.5%
+-commutative99.5%
+-commutative99.5%
Simplified99.5%
Final simplification99.1%
(FPCore (wj x) :precision binary64 (if (<= wj 2.8e-7) (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))) (+ wj (/ (- wj (/ x (exp wj))) (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 2.8e-7) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 2.8d-7) then
tmp = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
else
tmp = wj + ((wj - (x / exp(wj))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 2.8e-7) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x / Math.exp(wj))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 2.8e-7: tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))) else: tmp = wj + ((wj - (x / math.exp(wj))) / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 2.8e-7) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x / exp(wj))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 2.8e-7) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); else tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 2.8e-7], N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 2.8 \cdot 10^{-7}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{-1 - wj}\\
\end{array}
\end{array}
if wj < 2.80000000000000019e-7Initial program 77.4%
distribute-rgt1-in77.9%
associate-/l/77.9%
div-sub77.4%
associate-/l*77.4%
*-inverses77.9%
*-rgt-identity77.9%
Simplified77.9%
Taylor expanded in wj around 0 98.3%
Taylor expanded in x around 0 98.2%
Taylor expanded in x around 0 98.4%
neg-mul-198.4%
unsub-neg98.4%
Simplified98.4%
if 2.80000000000000019e-7 < wj Initial program 59.1%
distribute-rgt1-in59.3%
associate-/l/60.0%
div-sub60.0%
associate-/l*60.0%
*-inverses96.4%
*-rgt-identity96.4%
Simplified96.4%
Final simplification98.3%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00023) (+ x (* wj (+ (* wj (- 1.0 (* x -2.5))) (* x -2.0)))) (* x (+ (/ wj x) (/ wj (* x (- -1.0 wj)))))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00023) {
tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00023d0) then
tmp = x + (wj * ((wj * (1.0d0 - (x * (-2.5d0)))) + (x * (-2.0d0))))
else
tmp = x * ((wj / x) + (wj / (x * ((-1.0d0) - wj))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00023) {
tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00023: tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0))) else: tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00023) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - Float64(x * -2.5))) + Float64(x * -2.0)))); else tmp = Float64(x * Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj))))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00023) tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0))); else tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00023], N[(x + N[(wj * N[(N[(wj * N[(1.0 - N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00023:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - x \cdot -2.5\right) + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\\
\end{array}
\end{array}
if wj < 2.3000000000000001e-4Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 98.0%
cancel-sign-sub-inv98.0%
distribute-rgt-out98.4%
metadata-eval98.4%
metadata-eval98.4%
*-commutative98.4%
Simplified98.4%
if 2.3000000000000001e-4 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in wj around 0 53.6%
associate-*r*53.6%
neg-mul-153.6%
distribute-rgt-out53.6%
metadata-eval53.6%
Simplified53.6%
Taylor expanded in x around inf 54.6%
Taylor expanded in x around 0 91.4%
Final simplification98.1%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00035) (+ x (* wj (+ wj (* x -2.0)))) (* x (+ (/ wj x) (/ wj (* x (- -1.0 wj)))))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00035) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00035d0) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = x * ((wj / x) + (wj / (x * ((-1.0d0) - wj))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00035) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00035: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00035) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(x * Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj))))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00035) tmp = x + (wj * (wj + (x * -2.0))); else tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00035], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00035:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\\
\end{array}
\end{array}
if wj < 3.49999999999999996e-4Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 98.0%
cancel-sign-sub-inv98.0%
distribute-rgt-out98.4%
metadata-eval98.4%
metadata-eval98.4%
*-commutative98.4%
Simplified98.4%
Taylor expanded in x around 0 98.1%
if 3.49999999999999996e-4 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in wj around 0 53.6%
associate-*r*53.6%
neg-mul-153.6%
distribute-rgt-out53.6%
metadata-eval53.6%
Simplified53.6%
Taylor expanded in x around inf 54.6%
Taylor expanded in x around 0 91.4%
Final simplification97.8%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00039) (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))) (* x (+ (/ wj x) (/ wj (* x (- -1.0 wj)))))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00039d0) then
tmp = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
else
tmp = x * ((wj / x) + (wj / (x * ((-1.0d0) - wj))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00039: tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))) else: tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00039) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))); else tmp = Float64(x * Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj))))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00039) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); else tmp = x * ((wj / x) + (wj / (x * (-1.0 - wj)))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00039], N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00039:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\\
\end{array}
\end{array}
if wj < 3.89999999999999993e-4Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 98.3%
Taylor expanded in x around 0 98.2%
Taylor expanded in x around 0 98.2%
neg-mul-198.2%
unsub-neg98.2%
Simplified98.2%
if 3.89999999999999993e-4 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in wj around 0 53.6%
associate-*r*53.6%
neg-mul-153.6%
distribute-rgt-out53.6%
metadata-eval53.6%
Simplified53.6%
Taylor expanded in x around inf 54.6%
Taylor expanded in x around 0 91.4%
Final simplification98.0%
(FPCore (wj x) :precision binary64 (if (<= wj 6.5e-5) (+ x (* wj (+ wj (* x -2.0)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 6.5e-5) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 6.5d-5) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 6.5e-5) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 6.5e-5: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 6.5e-5) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 6.5e-5) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 6.5e-5], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 6.5 \cdot 10^{-5}:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 6.49999999999999943e-5Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 98.0%
cancel-sign-sub-inv98.0%
distribute-rgt-out98.4%
metadata-eval98.4%
metadata-eval98.4%
*-commutative98.4%
Simplified98.4%
Taylor expanded in x around 0 98.1%
if 6.49999999999999943e-5 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in x around 0 90.8%
+-commutative90.8%
Simplified90.8%
Final simplification97.8%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00038) (+ x (* wj (+ wj (* x -2.0)))) (+ wj (/ 1.0 (/ (- -1.0 wj) wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00038) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (1.0 / ((-1.0 - wj) / wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00038d0) then
tmp = x + (wj * (wj + (x * (-2.0d0))))
else
tmp = wj + (1.0d0 / (((-1.0d0) - wj) / wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00038) {
tmp = x + (wj * (wj + (x * -2.0)));
} else {
tmp = wj + (1.0 / ((-1.0 - wj) / wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00038: tmp = x + (wj * (wj + (x * -2.0))) else: tmp = wj + (1.0 / ((-1.0 - wj) / wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00038) tmp = Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))); else tmp = Float64(wj + Float64(1.0 / Float64(Float64(-1.0 - wj) / wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00038) tmp = x + (wj * (wj + (x * -2.0))); else tmp = wj + (1.0 / ((-1.0 - wj) / wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00038], N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(1.0 / N[(N[(-1.0 - wj), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00038:\\
\;\;\;\;x + wj \cdot \left(wj + x \cdot -2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{1}{\frac{-1 - wj}{wj}}\\
\end{array}
\end{array}
if wj < 3.8000000000000002e-4Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 98.0%
cancel-sign-sub-inv98.0%
distribute-rgt-out98.4%
metadata-eval98.4%
metadata-eval98.4%
*-commutative98.4%
Simplified98.4%
Taylor expanded in x around 0 98.1%
if 3.8000000000000002e-4 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in x around 0 90.8%
+-commutative90.8%
Simplified90.8%
clear-num90.9%
inv-pow90.9%
Applied egg-rr90.9%
unpow-190.9%
Simplified90.9%
Final simplification97.8%
(FPCore (wj x) :precision binary64 (if (<= wj 0.00039) (+ x (* -2.0 (* wj x))) (- wj (+ 1.0 (/ -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (1.0 + (-1.0 / wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.00039d0) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj - (1.0d0 + ((-1.0d0) / wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.00039) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj - (1.0 + (-1.0 / wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.00039: tmp = x + (-2.0 * (wj * x)) else: tmp = wj - (1.0 + (-1.0 / wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.00039) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj - Float64(1.0 + Float64(-1.0 / wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.00039) tmp = x + (-2.0 * (wj * x)); else tmp = wj - (1.0 + (-1.0 / wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.00039], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(1.0 + N[(-1.0 / wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.00039:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \left(1 + \frac{-1}{wj}\right)\\
\end{array}
\end{array}
if wj < 3.89999999999999993e-4Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 84.8%
*-commutative84.8%
Simplified84.8%
if 3.89999999999999993e-4 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in x around 0 90.8%
+-commutative90.8%
Simplified90.8%
Taylor expanded in wj around inf 58.8%
Final simplification83.9%
(FPCore (wj x) :precision binary64 (if (<= wj 8.5e-5) (+ x (* -2.0 (* wj x))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 8.5e-5) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 8.5d-5) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 8.5e-5) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 8.5e-5: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 8.5e-5) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 8.5e-5) tmp = x + (-2.0 * (wj * x)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 8.5e-5], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 8.5 \cdot 10^{-5}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 8.500000000000001e-5Initial program 77.6%
distribute-rgt1-in78.0%
associate-/l/78.0%
div-sub77.6%
associate-/l*77.6%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 84.8%
*-commutative84.8%
Simplified84.8%
if 8.500000000000001e-5 < wj Initial program 50.1%
distribute-rgt1-in50.4%
associate-/l/51.3%
div-sub51.3%
associate-/l*51.3%
*-inverses95.7%
*-rgt-identity95.7%
Simplified95.7%
Taylor expanded in x around 0 90.8%
+-commutative90.8%
Simplified90.8%
Final simplification85.0%
(FPCore (wj x) :precision binary64 (* x (+ 1.0 (* wj -2.0))))
double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * (-2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
def code(wj, x): return x * (1.0 + (wj * -2.0))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * -2.0))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * -2.0)); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot -2\right)
\end{array}
Initial program 76.7%
distribute-rgt1-in77.1%
associate-/l/77.1%
div-sub76.7%
associate-/l*76.7%
*-inverses78.6%
*-rgt-identity78.6%
Simplified78.6%
Taylor expanded in wj around 0 76.3%
associate-*r*76.3%
neg-mul-176.3%
distribute-rgt-out76.3%
metadata-eval76.3%
Simplified76.3%
Taylor expanded in x around inf 76.7%
Taylor expanded in wj around 0 82.0%
*-commutative82.0%
Simplified82.0%
Final simplification82.0%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 76.7%
distribute-rgt1-in77.1%
associate-/l/77.1%
div-sub76.7%
associate-/l*76.7%
*-inverses78.6%
*-rgt-identity78.6%
Simplified78.6%
Taylor expanded in wj around 0 82.0%
*-commutative82.0%
Simplified82.0%
Final simplification82.0%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 76.7%
distribute-rgt1-in77.1%
associate-/l/77.1%
div-sub76.7%
associate-/l*76.7%
*-inverses78.6%
*-rgt-identity78.6%
Simplified78.6%
Taylor expanded in wj around inf 5.1%
Final simplification5.1%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 76.7%
distribute-rgt1-in77.1%
associate-/l/77.1%
div-sub76.7%
associate-/l*76.7%
*-inverses78.6%
*-rgt-identity78.6%
Simplified78.6%
Taylor expanded in wj around 0 81.4%
Final simplification81.4%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024071
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))