
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x) :precision binary64 (if (<= wj -3.45e-6) (+ wj (* x (+ (/ (exp (- wj)) (+ wj 1.0)) (/ wj (* x (- -1.0 wj)))))) (+ x (* wj (- (* wj (- (- 1.0 wj) (+ (* x -4.0) (* x 1.5)))) (* x 2.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -3.45e-6) {
tmp = wj + (x * ((exp(-wj) / (wj + 1.0)) + (wj / (x * (-1.0 - wj)))));
} else {
tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-3.45d-6)) then
tmp = wj + (x * ((exp(-wj) / (wj + 1.0d0)) + (wj / (x * ((-1.0d0) - wj)))))
else
tmp = x + (wj * ((wj * ((1.0d0 - wj) - ((x * (-4.0d0)) + (x * 1.5d0)))) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -3.45e-6) {
tmp = wj + (x * ((Math.exp(-wj) / (wj + 1.0)) + (wj / (x * (-1.0 - wj)))));
} else {
tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -3.45e-6: tmp = wj + (x * ((math.exp(-wj) / (wj + 1.0)) + (wj / (x * (-1.0 - wj))))) else: tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -3.45e-6) tmp = Float64(wj + Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); else tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 - wj) - Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -3.45e-6) tmp = wj + (x * ((exp(-wj) / (wj + 1.0)) + (wj / (x * (-1.0 - wj))))); else tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -3.45e-6], N[(wj + N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 - wj), $MachinePrecision] - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.45 \cdot 10^{-6}:\\
\;\;\;\;wj + x \cdot \left(\frac{e^{-wj}}{wj + 1} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 - wj\right) - \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -3.45e-6Initial program 65.4%
distribute-rgt1-in98.9%
associate-/l/98.6%
div-sub65.3%
associate-/l*65.3%
*-inverses98.6%
*-rgt-identity98.6%
Simplified98.6%
Taylor expanded in x around inf 99.1%
+-commutative99.1%
associate-/r*98.9%
rec-exp98.9%
+-commutative98.9%
Simplified98.9%
if -3.45e-6 < wj Initial program 79.4%
distribute-rgt1-in79.4%
associate-/l/79.5%
div-sub79.5%
associate-/l*79.5%
*-inverses80.3%
*-rgt-identity80.3%
Simplified80.3%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.7%
mul-1-neg98.7%
Simplified98.7%
Final simplification98.7%
(FPCore (wj x) :precision binary64 (if (<= wj -2.55e-6) (+ wj (/ 1.0 (/ (+ wj 1.0) (- (/ x (exp wj)) wj)))) (+ x (* wj (- (* wj (- (- 1.0 wj) (+ (* x -4.0) (* x 1.5)))) (* x 2.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -2.55e-6) {
tmp = wj + (1.0 / ((wj + 1.0) / ((x / exp(wj)) - wj)));
} else {
tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-2.55d-6)) then
tmp = wj + (1.0d0 / ((wj + 1.0d0) / ((x / exp(wj)) - wj)))
else
tmp = x + (wj * ((wj * ((1.0d0 - wj) - ((x * (-4.0d0)) + (x * 1.5d0)))) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -2.55e-6) {
tmp = wj + (1.0 / ((wj + 1.0) / ((x / Math.exp(wj)) - wj)));
} else {
tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -2.55e-6: tmp = wj + (1.0 / ((wj + 1.0) / ((x / math.exp(wj)) - wj))) else: tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -2.55e-6) tmp = Float64(wj + Float64(1.0 / Float64(Float64(wj + 1.0) / Float64(Float64(x / exp(wj)) - wj)))); else tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 - wj) - Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -2.55e-6) tmp = wj + (1.0 / ((wj + 1.0) / ((x / exp(wj)) - wj))); else tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -2.55e-6], N[(wj + N[(1.0 / N[(N[(wj + 1.0), $MachinePrecision] / N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 - wj), $MachinePrecision] - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -2.55 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{1}{\frac{wj + 1}{\frac{x}{e^{wj}} - wj}}\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 - wj\right) - \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -2.5500000000000001e-6Initial program 65.4%
distribute-rgt1-in98.9%
associate-/l/98.6%
div-sub65.3%
associate-/l*65.3%
*-inverses98.6%
*-rgt-identity98.6%
Simplified98.6%
clear-num98.9%
inv-pow98.9%
Applied egg-rr98.9%
unpow-198.9%
Simplified98.9%
if -2.5500000000000001e-6 < wj Initial program 79.4%
distribute-rgt1-in79.4%
associate-/l/79.5%
div-sub79.5%
associate-/l*79.5%
*-inverses80.3%
*-rgt-identity80.3%
Simplified80.3%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.7%
mul-1-neg98.7%
Simplified98.7%
Final simplification98.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -1.16e-5)
(+ wj (/ (- wj (/ x (exp wj))) (- -1.0 wj)))
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* t_0 -2.0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0)))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -1.16e-5) {
tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj));
} else {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= (-1.16d-5)) then
tmp = wj + ((wj - (x / exp(wj))) / ((-1.0d0) - wj))
else
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + ((t_0 * (-2.0d0)) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -1.16e-5) {
tmp = wj + ((wj - (x / Math.exp(wj))) / (-1.0 - wj));
} else {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= -1.16e-5: tmp = wj + ((wj - (x / math.exp(wj))) / (-1.0 - wj)) else: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -1.16e-5) tmp = Float64(wj + Float64(Float64(wj - Float64(x / exp(wj))) / Float64(-1.0 - wj))); else tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(t_0 * -2.0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= -1.16e-5) tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj)); else tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -1.16e-5], N[(wj + N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(t$95$0 * -2.0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -1.16 \cdot 10^{-5}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{-1 - wj}\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(t\_0 \cdot -2 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -1.1600000000000001e-5Initial program 61.0%
distribute-rgt1-in98.7%
associate-/l/98.7%
div-sub61.2%
associate-/l*61.2%
*-inverses98.7%
*-rgt-identity98.7%
Simplified98.7%
if -1.1600000000000001e-5 < wj Initial program 79.5%
distribute-rgt1-in79.5%
associate-/l/79.6%
div-sub79.6%
associate-/l*79.6%
*-inverses80.4%
*-rgt-identity80.4%
Simplified80.4%
Taylor expanded in wj around 0 98.7%
Final simplification98.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* t_0 -2.0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
return x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
code = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + ((t_0 * (-2.0d0)) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
return x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) return x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)))
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(t_0 * -2.0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))) end
function tmp = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((t_0 * -2.0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(t$95$0 * -2.0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(t\_0 \cdot -2 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)
\end{array}
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 96.3%
Final simplification96.3%
(FPCore (wj x)
:precision binary64
(*
x
(+
1.0
(*
wj
(-
(* wj (+ 2.5 (+ (/ 1.0 x) (* wj (- (/ -1.0 x) 2.6666666666666665)))))
2.0)))))
double code(double wj, double x) {
return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * ((wj * (2.5d0 + ((1.0d0 / x) + (wj * (((-1.0d0) / x) - 2.6666666666666665d0))))) - 2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
def code(wj, x): return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * Float64(Float64(wj * Float64(2.5 + Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(-1.0 / x) - 2.6666666666666665))))) - 2.0)))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * N[(N[(wj * N[(2.5 + N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(-1.0 / x), $MachinePrecision] - 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot \left(wj \cdot \left(2.5 + \left(\frac{1}{x} + wj \cdot \left(\frac{-1}{x} - 2.6666666666666665\right)\right)\right) - 2\right)\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in x around inf 82.3%
sub-neg82.3%
+-commutative82.3%
mul-1-neg82.3%
associate-+l+82.3%
+-commutative82.3%
mul-1-neg82.3%
associate-/r*82.3%
+-commutative82.3%
distribute-neg-frac282.3%
neg-sub082.3%
+-commutative82.3%
associate--r+82.3%
metadata-eval82.3%
+-commutative82.3%
rem-exp-log80.8%
+-commutative80.8%
log1p-undefine80.8%
Simplified80.8%
Taylor expanded in wj around 0 96.2%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- (- 1.0 wj) (+ (* x -4.0) (* x 1.5)))) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * ((1.0d0 - wj) - ((x * (-4.0d0)) + (x * 1.5d0)))) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 - wj) - Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 - wj), $MachinePrecision] - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(\left(1 - wj\right) - \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot 2\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 96.3%
Taylor expanded in x around 0 96.2%
mul-1-neg96.2%
Simplified96.2%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 96.3%
Taylor expanded in x around 0 96.2%
mul-1-neg96.2%
Simplified96.2%
Taylor expanded in x around 0 96.0%
Final simplification96.0%
(FPCore (wj x) :precision binary64 (- x (* wj (+ (* x 2.0) (* wj wj)))))
double code(double wj, double x) {
return x - (wj * ((x * 2.0) + (wj * wj)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x - (wj * ((x * 2.0d0) + (wj * wj)))
end function
public static double code(double wj, double x) {
return x - (wj * ((x * 2.0) + (wj * wj)));
}
def code(wj, x): return x - (wj * ((x * 2.0) + (wj * wj)))
function code(wj, x) return Float64(x - Float64(wj * Float64(Float64(x * 2.0) + Float64(wj * wj)))) end
function tmp = code(wj, x) tmp = x - (wj * ((x * 2.0) + (wj * wj))); end
code[wj_, x_] := N[(x - N[(wj * N[(N[(x * 2.0), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x - wj \cdot \left(x \cdot 2 + wj \cdot wj\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 96.3%
Taylor expanded in x around 0 96.2%
mul-1-neg96.2%
Simplified96.2%
Taylor expanded in wj around inf 84.3%
neg-mul-184.3%
Simplified84.3%
Final simplification84.3%
(FPCore (wj x) :precision binary64 (+ x (* wj (* x (- (* wj 2.5) 2.0)))))
double code(double wj, double x) {
return x + (wj * (x * ((wj * 2.5) - 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (x * ((wj * 2.5d0) - 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * (x * ((wj * 2.5) - 2.0)));
}
def code(wj, x): return x + (wj * (x * ((wj * 2.5) - 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(x * Float64(Float64(wj * 2.5) - 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (x * ((wj * 2.5) - 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(x * N[(N[(wj * 2.5), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(x \cdot \left(wj \cdot 2.5 - 2\right)\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 96.3%
Taylor expanded in x around 0 96.2%
mul-1-neg96.2%
Simplified96.2%
Taylor expanded in x around inf 84.3%
Final simplification84.3%
(FPCore (wj x) :precision binary64 (* x (/ (- 1.0 wj) (+ wj 1.0))))
double code(double wj, double x) {
return x * ((1.0 - wj) / (wj + 1.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * ((1.0d0 - wj) / (wj + 1.0d0))
end function
public static double code(double wj, double x) {
return x * ((1.0 - wj) / (wj + 1.0));
}
def code(wj, x): return x * ((1.0 - wj) / (wj + 1.0))
function code(wj, x) return Float64(x * Float64(Float64(1.0 - wj) / Float64(wj + 1.0))) end
function tmp = code(wj, x) tmp = x * ((1.0 - wj) / (wj + 1.0)); end
code[wj_, x_] := N[(x * N[(N[(1.0 - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{1 - wj}{wj + 1}
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 79.2%
+-commutative79.2%
Simplified79.2%
Taylor expanded in x around inf 84.2%
+-commutative84.2%
+-commutative84.2%
div-sub84.2%
Simplified84.2%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 84.1%
*-commutative84.1%
Simplified84.1%
Final simplification84.1%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around 0 83.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 79.0%
distribute-rgt1-in80.1%
associate-/l/80.2%
div-sub79.0%
associate-/l*79.0%
*-inverses81.0%
*-rgt-identity81.0%
Simplified81.0%
Taylor expanded in wj around inf 4.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024118
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))