
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 2e-16)
(*
x
(+
1.0
(*
wj
(-
(+ (* wj (fma wj -2.6666666666666665 2.5)) (/ (- wj (pow wj 2.0)) x))
2.0))))
(*
x
(+ (+ (/ wj x) (/ (exp (- wj)) (+ wj 1.0))) (/ wj (* x (- -1.0 wj))))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2e-16) {
tmp = x * (1.0 + (wj * (((wj * fma(wj, -2.6666666666666665, 2.5)) + ((wj - pow(wj, 2.0)) / x)) - 2.0)));
} else {
tmp = x * (((wj / x) + (exp(-wj) / (wj + 1.0))) + (wj / (x * (-1.0 - wj))));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 2e-16) tmp = Float64(x * Float64(1.0 + Float64(wj * Float64(Float64(Float64(wj * fma(wj, -2.6666666666666665, 2.5)) + Float64(Float64(wj - (wj ^ 2.0)) / x)) - 2.0)))); else tmp = Float64(x * Float64(Float64(Float64(wj / x) + Float64(exp(Float64(-wj)) / Float64(wj + 1.0))) + Float64(wj / Float64(x * Float64(-1.0 - wj))))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-16], N[(x * N[(1.0 + N[(wj * N[(N[(N[(wj * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision]), $MachinePrecision] + N[(N[(wj - N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(N[(wj / x), $MachinePrecision] + N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 2 \cdot 10^{-16}:\\
\;\;\;\;x \cdot \left(1 + wj \cdot \left(\left(wj \cdot \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right) + \frac{wj - {wj}^{2}}{x}\right) - 2\right)\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\left(\frac{wj}{x} + \frac{e^{-wj}}{wj + 1}\right) + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 2e-16Initial program 71.7%
distribute-rgt1-in72.3%
associate-/l/72.4%
div-sub71.8%
associate-/l*71.8%
*-inverses72.4%
*-rgt-identity72.4%
Simplified72.4%
Taylor expanded in wj around 0 71.6%
Taylor expanded in x around inf 72.2%
Taylor expanded in wj around 0 99.0%
Taylor expanded in x around -inf 99.0%
+-commutative99.0%
mul-1-neg99.0%
unsub-neg99.0%
+-commutative99.0%
*-commutative99.0%
fma-define99.0%
sub-neg99.0%
metadata-eval99.0%
distribute-rgt-in99.1%
unpow299.1%
neg-mul-199.1%
sub-neg99.1%
Simplified99.1%
if 2e-16 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 96.8%
distribute-rgt1-in99.4%
associate-/l/99.4%
div-sub96.8%
associate-/l*96.8%
*-inverses99.4%
*-rgt-identity99.4%
Simplified99.4%
Taylor expanded in x around inf 99.4%
+-commutative99.4%
associate-/r*99.4%
rec-exp99.4%
+-commutative99.4%
+-commutative99.4%
Simplified99.4%
Final simplification99.2%
(FPCore (wj x)
:precision binary64
(if (<= wj -2.9e-6)
(- wj (/ (- wj (/ x (exp wj))) (+ wj 1.0)))
(*
x
(+
1.0
(*
wj
(-
(* wj (+ 2.5 (+ (/ 1.0 x) (* wj (- (/ -1.0 x) 2.6666666666666665)))))
2.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -2.9e-6) {
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0));
} else {
tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-2.9d-6)) then
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0d0))
else
tmp = x * (1.0d0 + (wj * ((wj * (2.5d0 + ((1.0d0 / x) + (wj * (((-1.0d0) / x) - 2.6666666666666665d0))))) - 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -2.9e-6) {
tmp = wj - ((wj - (x / Math.exp(wj))) / (wj + 1.0));
} else {
tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -2.9e-6: tmp = wj - ((wj - (x / math.exp(wj))) / (wj + 1.0)) else: tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -2.9e-6) tmp = Float64(wj - Float64(Float64(wj - Float64(x / exp(wj))) / Float64(wj + 1.0))); else tmp = Float64(x * Float64(1.0 + Float64(wj * Float64(Float64(wj * Float64(2.5 + Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(-1.0 / x) - 2.6666666666666665))))) - 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -2.9e-6) tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0)); else tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -2.9e-6], N[(wj - N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(1.0 + N[(wj * N[(N[(wj * N[(2.5 + N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(-1.0 / x), $MachinePrecision] - 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -2.9 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{wj - \frac{x}{e^{wj}}}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(1 + wj \cdot \left(wj \cdot \left(2.5 + \left(\frac{1}{x} + wj \cdot \left(\frac{-1}{x} - 2.6666666666666665\right)\right)\right) - 2\right)\right)\\
\end{array}
\end{array}
if wj < -2.9000000000000002e-6Initial program 46.9%
distribute-rgt1-in96.9%
associate-/l/96.9%
div-sub46.9%
associate-/l*46.9%
*-inverses96.9%
*-rgt-identity96.9%
Simplified96.9%
if -2.9000000000000002e-6 < wj Initial program 80.0%
distribute-rgt1-in80.0%
associate-/l/80.1%
div-sub80.1%
associate-/l*80.1%
*-inverses80.1%
*-rgt-identity80.1%
Simplified80.1%
Taylor expanded in wj around 0 80.1%
Taylor expanded in x around inf 80.6%
Taylor expanded in wj around 0 99.0%
Final simplification99.0%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.002)
(/ x (* (exp wj) (+ wj 1.0)))
(*
x
(+
1.0
(*
wj
(-
(* wj (+ 2.5 (+ (/ 1.0 x) (* wj (- (/ -1.0 x) 2.6666666666666665)))))
2.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -0.002) {
tmp = x / (exp(wj) * (wj + 1.0));
} else {
tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-0.002d0)) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else
tmp = x * (1.0d0 + (wj * ((wj * (2.5d0 + ((1.0d0 / x) + (wj * (((-1.0d0) / x) - 2.6666666666666665d0))))) - 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -0.002) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else {
tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -0.002: tmp = x / (math.exp(wj) * (wj + 1.0)) else: tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -0.002) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); else tmp = Float64(x * Float64(1.0 + Float64(wj * Float64(Float64(wj * Float64(2.5 + Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(-1.0 / x) - 2.6666666666666665))))) - 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -0.002) tmp = x / (exp(wj) * (wj + 1.0)); else tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -0.002], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(1.0 + N[(wj * N[(N[(wj * N[(2.5 + N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(-1.0 / x), $MachinePrecision] - 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.002:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(1 + wj \cdot \left(wj \cdot \left(2.5 + \left(\frac{1}{x} + wj \cdot \left(\frac{-1}{x} - 2.6666666666666665\right)\right)\right) - 2\right)\right)\\
\end{array}
\end{array}
if wj < -2e-3Initial program 40.0%
distribute-rgt1-in100.0%
associate-/l/100.0%
div-sub40.0%
associate-/l*40.0%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 100.0%
distribute-rgt-in40.0%
*-lft-identity40.0%
distribute-rgt1-in100.0%
*-commutative100.0%
Simplified100.0%
if -2e-3 < wj Initial program 80.0%
distribute-rgt1-in80.0%
associate-/l/80.1%
div-sub80.1%
associate-/l*80.1%
*-inverses80.1%
*-rgt-identity80.1%
Simplified80.1%
Taylor expanded in wj around 0 80.1%
Taylor expanded in x around inf 80.6%
Taylor expanded in wj around 0 98.9%
Final simplification98.9%
(FPCore (wj x)
:precision binary64
(*
x
(+
1.0
(*
wj
(-
(* wj (+ 2.5 (+ (/ 1.0 x) (* wj (- (/ -1.0 x) 2.6666666666666665)))))
2.0)))))
double code(double wj, double x) {
return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * ((wj * (2.5d0 + ((1.0d0 / x) + (wj * (((-1.0d0) / x) - 2.6666666666666665d0))))) - 2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)));
}
def code(wj, x): return x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0)))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * Float64(Float64(wj * Float64(2.5 + Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(-1.0 / x) - 2.6666666666666665))))) - 2.0)))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * ((wj * (2.5 + ((1.0 / x) + (wj * ((-1.0 / x) - 2.6666666666666665))))) - 2.0))); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * N[(N[(wj * N[(2.5 + N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(-1.0 / x), $MachinePrecision] - 2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot \left(wj \cdot \left(2.5 + \left(\frac{1}{x} + wj \cdot \left(\frac{-1}{x} - 2.6666666666666665\right)\right)\right) - 2\right)\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around 0 79.0%
Taylor expanded in x around inf 79.5%
Taylor expanded in wj around 0 97.3%
Final simplification97.3%
(FPCore (wj x) :precision binary64 (* x (- 1.0 (* wj (+ 2.0 (* wj (/ (+ wj -1.0) x)))))))
double code(double wj, double x) {
return x * (1.0 - (wj * (2.0 + (wj * ((wj + -1.0) / x)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 - (wj * (2.0d0 + (wj * ((wj + (-1.0d0)) / x)))))
end function
public static double code(double wj, double x) {
return x * (1.0 - (wj * (2.0 + (wj * ((wj + -1.0) / x)))));
}
def code(wj, x): return x * (1.0 - (wj * (2.0 + (wj * ((wj + -1.0) / x)))))
function code(wj, x) return Float64(x * Float64(1.0 - Float64(wj * Float64(2.0 + Float64(wj * Float64(Float64(wj + -1.0) / x)))))) end
function tmp = code(wj, x) tmp = x * (1.0 - (wj * (2.0 + (wj * ((wj + -1.0) / x))))); end
code[wj_, x_] := N[(x * N[(1.0 - N[(wj * N[(2.0 + N[(wj * N[(N[(wj + -1.0), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 - wj \cdot \left(2 + wj \cdot \frac{wj + -1}{x}\right)\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around 0 79.0%
Taylor expanded in x around inf 79.5%
Taylor expanded in wj around 0 97.3%
Taylor expanded in x around 0 97.2%
associate-/l*97.2%
neg-mul-197.2%
sub-neg97.2%
Simplified97.2%
Final simplification97.2%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around 0 96.6%
Taylor expanded in x around 0 96.6%
mul-1-neg96.6%
unsub-neg96.6%
Simplified96.6%
Final simplification96.6%
(FPCore (wj x) :precision binary64 (+ x (* wj wj)))
double code(double wj, double x) {
return x + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * wj)
end function
public static double code(double wj, double x) {
return x + (wj * wj);
}
def code(wj, x): return x + (wj * wj)
function code(wj, x) return Float64(x + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = x + (wj * wj); end
code[wj_, x_] := N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot wj
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around 0 95.7%
cancel-sign-sub-inv95.7%
distribute-rgt-out95.8%
metadata-eval95.8%
metadata-eval95.8%
*-commutative95.8%
Simplified95.8%
Taylor expanded in x around 0 95.8%
Taylor expanded in wj around inf 95.8%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around 0 84.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 79.3%
distribute-rgt1-in80.4%
associate-/l/80.5%
div-sub79.3%
associate-/l*79.3%
*-inverses80.5%
*-rgt-identity80.5%
Simplified80.5%
Taylor expanded in wj around inf 3.8%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024150
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))