
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))) (t_1 (+ (* x -4.0) (* x 1.5))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 2e-13)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_1) (* x 0.6666666666666666))))))
t_1))
(* x 2.0))))
(+ wj (/ (- wj (* x (exp (- wj)))) (- -1.0 wj))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2e-13) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x * exp(-wj))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = (x * (-4.0d0)) + (x * 1.5d0)
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2d-13) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_1) + (x * 0.6666666666666666d0)))))) - t_1)) - (x * 2.0d0)))
else
tmp = wj + ((wj - (x * exp(-wj))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (Math.exp(wj) + t_0))) <= 2e-13) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x * Math.exp(-wj))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = (x * -4.0) + (x * 1.5) tmp = 0 if (wj + ((x - t_0) / (math.exp(wj) + t_0))) <= 2e-13: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))) else: tmp = wj + ((wj - (x * math.exp(-wj))) / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 2e-13) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_1) + Float64(x * 0.6666666666666666)))))) - t_1)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x * exp(Float64(-wj)))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = (x * -4.0) + (x * 1.5); tmp = 0.0; if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2e-13) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))); else tmp = wj + ((wj - (x * exp(-wj))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-13], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$1), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$1), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 2 \cdot 10^{-13}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_1 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_1\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - x \cdot e^{-wj}}{-1 - wj}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 2.0000000000000001e-13Initial program 73.3%
distribute-rgt1-in73.8%
associate-/l/73.9%
div-sub73.3%
associate-/l*73.3%
*-inverses73.9%
*-rgt-identity73.9%
Simplified73.9%
Taylor expanded in wj around 0 99.4%
if 2.0000000000000001e-13 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 93.7%
distribute-rgt1-in95.2%
associate-/l/95.2%
div-sub93.7%
associate-/l*93.7%
*-inverses99.7%
*-rgt-identity99.7%
Simplified99.7%
clear-num99.5%
associate-/r/99.7%
rec-exp99.7%
Applied egg-rr99.7%
Final simplification99.5%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj 2.4e-5)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))
(+ wj (/ (- wj (/ x (exp wj))) (- -1.0 wj))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 2.4e-5) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= 2.4d-5) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
else
tmp = wj + ((wj - (x / exp(wj))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 2.4e-5) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x / Math.exp(wj))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= 2.4e-5: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) else: tmp = wj + ((wj - (x / math.exp(wj))) / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= 2.4e-5) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x / exp(wj))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= 2.4e-5) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); else tmp = wj + ((wj - (x / exp(wj))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 2.4e-5], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq 2.4 \cdot 10^{-5}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \frac{x}{e^{wj}}}{-1 - wj}\\
\end{array}
\end{array}
if wj < 2.4000000000000001e-5Initial program 79.4%
distribute-rgt1-in80.2%
associate-/l/80.2%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 98.7%
if 2.4000000000000001e-5 < wj Initial program 47.0%
distribute-rgt1-in47.0%
associate-/l/47.0%
div-sub47.0%
associate-/l*47.0%
*-inverses97.0%
*-rgt-identity97.0%
Simplified97.0%
Final simplification98.6%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj 2.5e-5)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))
(- wj (/ (- wj (+ x (* wj (* x (+ -1.0 (* wj 0.5)))))) (+ wj 1.0))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 2.5e-5) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj - ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= 2.5d-5) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
else
tmp = wj - ((wj - (x + (wj * (x * ((-1.0d0) + (wj * 0.5d0)))))) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 2.5e-5) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj - ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= 2.5e-5: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) else: tmp = wj - ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (wj + 1.0)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= 2.5e-5) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); else tmp = Float64(wj - Float64(Float64(wj - Float64(x + Float64(wj * Float64(x * Float64(-1.0 + Float64(wj * 0.5)))))) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= 2.5e-5) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); else tmp = wj - ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 2.5e-5], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj - N[(N[(wj - N[(x + N[(wj * N[(x * N[(-1.0 + N[(wj * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq 2.5 \cdot 10^{-5}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - \left(x + wj \cdot \left(x \cdot \left(-1 + wj \cdot 0.5\right)\right)\right)}{wj + 1}\\
\end{array}
\end{array}
if wj < 2.50000000000000012e-5Initial program 79.4%
distribute-rgt1-in80.2%
associate-/l/80.2%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 98.7%
if 2.50000000000000012e-5 < wj Initial program 47.0%
distribute-rgt1-in47.0%
associate-/l/47.0%
div-sub47.0%
associate-/l*47.0%
*-inverses97.0%
*-rgt-identity97.0%
Simplified97.0%
clear-num97.0%
associate-/r/97.0%
rec-exp97.0%
Applied egg-rr97.0%
Taylor expanded in wj around 0 80.4%
Taylor expanded in wj around 0 81.4%
associate-*r*81.4%
distribute-rgt-out81.4%
Simplified81.4%
Final simplification98.3%
(FPCore (wj x) :precision binary64 (if (<= wj 3.3e-7) (+ x (* wj (- (- wj (* wj wj)) (* x 2.0)))) (+ wj (/ (- wj (+ x (* wj (* x (+ -1.0 (* wj 0.5)))))) (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 3.3e-7) {
tmp = x + (wj * ((wj - (wj * wj)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 3.3d-7) then
tmp = x + (wj * ((wj - (wj * wj)) - (x * 2.0d0)))
else
tmp = wj + ((wj - (x + (wj * (x * ((-1.0d0) + (wj * 0.5d0)))))) / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 3.3e-7) {
tmp = x + (wj * ((wj - (wj * wj)) - (x * 2.0)));
} else {
tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 3.3e-7: tmp = x + (wj * ((wj - (wj * wj)) - (x * 2.0))) else: tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 3.3e-7) tmp = Float64(x + Float64(wj * Float64(Float64(wj - Float64(wj * wj)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(Float64(wj - Float64(x + Float64(wj * Float64(x * Float64(-1.0 + Float64(wj * 0.5)))))) / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 3.3e-7) tmp = x + (wj * ((wj - (wj * wj)) - (x * 2.0))); else tmp = wj + ((wj - (x + (wj * (x * (-1.0 + (wj * 0.5)))))) / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 3.3e-7], N[(x + N[(wj * N[(N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(wj - N[(x + N[(wj * N[(x * N[(-1.0 + N[(wj * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3.3 \cdot 10^{-7}:\\
\;\;\;\;x + wj \cdot \left(\left(wj - wj \cdot wj\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj - \left(x + wj \cdot \left(x \cdot \left(-1 + wj \cdot 0.5\right)\right)\right)}{-1 - wj}\\
\end{array}
\end{array}
if wj < 3.3000000000000002e-7Initial program 79.3%
distribute-rgt1-in80.1%
associate-/l/80.1%
div-sub79.3%
associate-/l*79.3%
*-inverses80.1%
*-rgt-identity80.1%
Simplified80.1%
Taylor expanded in wj around 0 98.7%
Taylor expanded in x around 0 98.3%
neg-mul-198.3%
unsub-neg98.3%
Simplified98.3%
sub-neg98.3%
distribute-rgt-in98.3%
*-un-lft-identity98.3%
Applied egg-rr98.3%
if 3.3000000000000002e-7 < wj Initial program 54.4%
distribute-rgt1-in54.4%
associate-/l/54.2%
div-sub54.2%
associate-/l*54.2%
*-inverses97.1%
*-rgt-identity97.1%
Simplified97.1%
clear-num97.1%
associate-/r/97.1%
rec-exp97.2%
Applied egg-rr97.2%
Taylor expanded in wj around 0 83.0%
Taylor expanded in wj around 0 83.2%
associate-*r*83.2%
distribute-rgt-out83.2%
Simplified83.2%
Final simplification97.9%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ wj (* x (- (* wj 2.5) 2.0))))))
double code(double wj, double x) {
return x + (wj * (wj + (x * ((wj * 2.5) - 2.0))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj + (x * ((wj * 2.5d0) - 2.0d0))))
end function
public static double code(double wj, double x) {
return x + (wj * (wj + (x * ((wj * 2.5) - 2.0))));
}
def code(wj, x): return x + (wj * (wj + (x * ((wj * 2.5) - 2.0))))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj + Float64(x * Float64(Float64(wj * 2.5) - 2.0))))) end
function tmp = code(wj, x) tmp = x + (wj * (wj + (x * ((wj * 2.5) - 2.0)))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj + N[(x * N[(N[(wj * 2.5), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj + x \cdot \left(wj \cdot 2.5 - 2\right)\right)
\end{array}
Initial program 78.6%
distribute-rgt1-in79.4%
associate-/l/79.4%
div-sub78.7%
associate-/l*78.7%
*-inverses80.6%
*-rgt-identity80.6%
Simplified80.6%
Taylor expanded in wj around 0 96.2%
cancel-sign-sub-inv96.2%
metadata-eval96.2%
distribute-rgt-out96.2%
metadata-eval96.2%
*-commutative96.2%
Simplified96.2%
Taylor expanded in x around 0 96.2%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ wj (* x -2.0)))))
double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj + (x * (-2.0d0))))
end function
public static double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
def code(wj, x): return x + (wj * (wj + (x * -2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj + (x * -2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj + x \cdot -2\right)
\end{array}
Initial program 78.6%
distribute-rgt1-in79.4%
associate-/l/79.4%
div-sub78.7%
associate-/l*78.7%
*-inverses80.6%
*-rgt-identity80.6%
Simplified80.6%
Taylor expanded in wj around 0 96.2%
cancel-sign-sub-inv96.2%
metadata-eval96.2%
distribute-rgt-out96.2%
metadata-eval96.2%
*-commutative96.2%
Simplified96.2%
Taylor expanded in x around 0 96.2%
Taylor expanded in wj around 0 95.9%
Final simplification95.9%
(FPCore (wj x) :precision binary64 (+ x (* wj wj)))
double code(double wj, double x) {
return x + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * wj)
end function
public static double code(double wj, double x) {
return x + (wj * wj);
}
def code(wj, x): return x + (wj * wj)
function code(wj, x) return Float64(x + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = x + (wj * wj); end
code[wj_, x_] := N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot wj
\end{array}
Initial program 78.6%
distribute-rgt1-in79.4%
associate-/l/79.4%
div-sub78.7%
associate-/l*78.7%
*-inverses80.6%
*-rgt-identity80.6%
Simplified80.6%
Taylor expanded in wj around 0 96.2%
cancel-sign-sub-inv96.2%
metadata-eval96.2%
distribute-rgt-out96.2%
metadata-eval96.2%
*-commutative96.2%
Simplified96.2%
Taylor expanded in x around 0 96.2%
Taylor expanded in x around 0 95.1%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 78.6%
distribute-rgt1-in79.4%
associate-/l/79.4%
div-sub78.7%
associate-/l*78.7%
*-inverses80.6%
*-rgt-identity80.6%
Simplified80.6%
Taylor expanded in wj around 0 86.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 78.6%
distribute-rgt1-in79.4%
associate-/l/79.4%
div-sub78.7%
associate-/l*78.7%
*-inverses80.6%
*-rgt-identity80.6%
Simplified80.6%
Taylor expanded in wj around inf 4.4%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024086
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))