
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (- (/ x (exp wj)) wj)) (t_1 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -3.65e-6)
(+ wj (/ t_0 (+ wj 1.0)))
(if (<= wj 3.8e-6)
(+
(*
(pow wj 3.0)
(- (- (- -1.0 (* -2.0 t_1)) (* x -3.0)) (* x 0.6666666666666666)))
(+ (* (- 1.0 t_1) (pow wj 2.0)) (+ x (* -2.0 (* wj x)))))
(fma t_0 (/ 1.0 (+ wj 1.0)) wj)))))
double code(double wj, double x) {
double t_0 = (x / exp(wj)) - wj;
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -3.65e-6) {
tmp = wj + (t_0 / (wj + 1.0));
} else if (wj <= 3.8e-6) {
tmp = (pow(wj, 3.0) * (((-1.0 - (-2.0 * t_1)) - (x * -3.0)) - (x * 0.6666666666666666))) + (((1.0 - t_1) * pow(wj, 2.0)) + (x + (-2.0 * (wj * x))));
} else {
tmp = fma(t_0, (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(Float64(x / exp(wj)) - wj) t_1 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -3.65e-6) tmp = Float64(wj + Float64(t_0 / Float64(wj + 1.0))); elseif (wj <= 3.8e-6) tmp = Float64(Float64((wj ^ 3.0) * Float64(Float64(Float64(-1.0 - Float64(-2.0 * t_1)) - Float64(x * -3.0)) - Float64(x * 0.6666666666666666))) + Float64(Float64(Float64(1.0 - t_1) * (wj ^ 2.0)) + Float64(x + Float64(-2.0 * Float64(wj * x))))); else tmp = fma(t_0, Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -3.65e-6], N[(wj + N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 3.8e-6], N[(N[(N[Power[wj, 3.0], $MachinePrecision] * N[(N[(N[(-1.0 - N[(-2.0 * t$95$1), $MachinePrecision]), $MachinePrecision] - N[(x * -3.0), $MachinePrecision]), $MachinePrecision] - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(1.0 - t$95$1), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}} - wj\\
t_1 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -3.65 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{t_0}{wj + 1}\\
\mathbf{elif}\;wj \leq 3.8 \cdot 10^{-6}:\\
\;\;\;\;{wj}^{3} \cdot \left(\left(\left(-1 - -2 \cdot t_1\right) - x \cdot -3\right) - x \cdot 0.6666666666666666\right) + \left(\left(1 - t_1\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(wj \cdot x\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t_0, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if wj < -3.65000000000000021e-6Initial program 70.8%
sub-neg70.8%
div-sub70.8%
sub-neg70.8%
+-commutative70.8%
distribute-neg-in70.8%
remove-double-neg70.8%
sub-neg70.8%
div-sub70.8%
distribute-rgt1-in99.1%
associate-/l/99.1%
Simplified99.1%
if -3.65000000000000021e-6 < wj < 3.8e-6Initial program 78.5%
sub-neg78.5%
div-sub78.5%
sub-neg78.5%
+-commutative78.5%
distribute-neg-in78.5%
remove-double-neg78.5%
sub-neg78.5%
div-sub78.5%
distribute-rgt1-in78.5%
associate-/l/78.6%
Simplified78.6%
Taylor expanded in wj around 0 100.0%
if 3.8e-6 < wj Initial program 39.7%
sub-neg39.7%
div-sub39.7%
sub-neg39.7%
+-commutative39.7%
distribute-neg-in39.7%
remove-double-neg39.7%
sub-neg39.7%
div-sub39.7%
distribute-rgt1-in39.7%
associate-/l/39.7%
Simplified99.7%
+-commutative99.7%
div-inv99.7%
fma-def100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (- (/ x (exp wj)) wj)))
(if (<= wj -4.5e-9)
(+ wj (/ t_0 (+ wj 1.0)))
(if (<= wj 7e-9)
(fma wj wj (fma -2.0 (* wj x) x))
(fma t_0 (/ 1.0 (+ wj 1.0)) wj)))))
double code(double wj, double x) {
double t_0 = (x / exp(wj)) - wj;
double tmp;
if (wj <= -4.5e-9) {
tmp = wj + (t_0 / (wj + 1.0));
} else if (wj <= 7e-9) {
tmp = fma(wj, wj, fma(-2.0, (wj * x), x));
} else {
tmp = fma(t_0, (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(Float64(x / exp(wj)) - wj) tmp = 0.0 if (wj <= -4.5e-9) tmp = Float64(wj + Float64(t_0 / Float64(wj + 1.0))); elseif (wj <= 7e-9) tmp = fma(wj, wj, fma(-2.0, Float64(wj * x), x)); else tmp = fma(t_0, Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]}, If[LessEqual[wj, -4.5e-9], N[(wj + N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 7e-9], N[(wj * wj + N[(-2.0 * N[(wj * x), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision], N[(t$95$0 * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}} - wj\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-9}:\\
\;\;\;\;wj + \frac{t_0}{wj + 1}\\
\mathbf{elif}\;wj \leq 7 \cdot 10^{-9}:\\
\;\;\;\;\mathsf{fma}\left(wj, wj, \mathsf{fma}\left(-2, wj \cdot x, x\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t_0, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if wj < -4.49999999999999976e-9Initial program 74.4%
sub-neg74.4%
div-sub74.4%
sub-neg74.4%
+-commutative74.4%
distribute-neg-in74.4%
remove-double-neg74.4%
sub-neg74.4%
div-sub74.4%
distribute-rgt1-in99.2%
associate-/l/99.2%
Simplified99.2%
if -4.49999999999999976e-9 < wj < 6.9999999999999998e-9Initial program 78.4%
sub-neg78.4%
div-sub78.4%
sub-neg78.4%
+-commutative78.4%
distribute-neg-in78.4%
remove-double-neg78.4%
sub-neg78.4%
div-sub78.4%
distribute-rgt1-in78.4%
associate-/l/78.4%
Simplified78.4%
Taylor expanded in wj around 0 99.7%
Taylor expanded in x around 0 99.7%
unpow299.7%
Simplified99.7%
Taylor expanded in wj around 0 99.7%
unpow299.7%
fma-def99.7%
fma-udef99.7%
*-commutative99.7%
Simplified99.7%
if 6.9999999999999998e-9 < wj Initial program 49.5%
sub-neg49.5%
div-sub49.5%
sub-neg49.5%
+-commutative49.5%
distribute-neg-in49.5%
remove-double-neg49.5%
sub-neg49.5%
div-sub49.5%
distribute-rgt1-in49.5%
associate-/l/49.5%
Simplified99.5%
+-commutative99.5%
div-inv99.5%
fma-def99.7%
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (wj x) :precision binary64 (if (or (<= wj -4.4e-9) (not (<= wj 7.6e-9))) (+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0))) (fma wj wj (fma -2.0 (* wj x) x))))
double code(double wj, double x) {
double tmp;
if ((wj <= -4.4e-9) || !(wj <= 7.6e-9)) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = fma(wj, wj, fma(-2.0, (wj * x), x));
}
return tmp;
}
function code(wj, x) tmp = 0.0 if ((wj <= -4.4e-9) || !(wj <= 7.6e-9)) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); else tmp = fma(wj, wj, fma(-2.0, Float64(wj * x), x)); end return tmp end
code[wj_, x_] := If[Or[LessEqual[wj, -4.4e-9], N[Not[LessEqual[wj, 7.6e-9]], $MachinePrecision]], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj * wj + N[(-2.0 * N[(wj * x), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.4 \cdot 10^{-9} \lor \neg \left(wj \leq 7.6 \cdot 10^{-9}\right):\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(wj, wj, \mathsf{fma}\left(-2, wj \cdot x, x\right)\right)\\
\end{array}
\end{array}
if wj < -4.3999999999999997e-9 or 7.60000000000000023e-9 < wj Initial program 63.7%
sub-neg63.7%
div-sub63.7%
sub-neg63.7%
+-commutative63.7%
distribute-neg-in63.7%
remove-double-neg63.7%
sub-neg63.7%
div-sub63.7%
distribute-rgt1-in77.9%
associate-/l/77.9%
Simplified99.3%
if -4.3999999999999997e-9 < wj < 7.60000000000000023e-9Initial program 78.4%
sub-neg78.4%
div-sub78.4%
sub-neg78.4%
+-commutative78.4%
distribute-neg-in78.4%
remove-double-neg78.4%
sub-neg78.4%
div-sub78.4%
distribute-rgt1-in78.4%
associate-/l/78.4%
Simplified78.4%
Taylor expanded in wj around 0 99.7%
Taylor expanded in x around 0 99.7%
unpow299.7%
Simplified99.7%
Taylor expanded in wj around 0 99.7%
unpow299.7%
fma-def99.7%
fma-udef99.7%
*-commutative99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (or (<= wj -1.52e-6) (not (<= wj 5.8e-8)))
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(+
(* (- 1.0 (+ (* x -4.0) (* x 1.5))) (pow wj 2.0))
(+ x (* -2.0 (* wj x))))))
double code(double wj, double x) {
double tmp;
if ((wj <= -1.52e-6) || !(wj <= 5.8e-8)) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * pow(wj, 2.0)) + (x + (-2.0 * (wj * x)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if ((wj <= (-1.52d-6)) .or. (.not. (wj <= 5.8d-8))) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else
tmp = ((1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0))) * (wj ** 2.0d0)) + (x + ((-2.0d0) * (wj * x)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if ((wj <= -1.52e-6) || !(wj <= 5.8e-8)) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * Math.pow(wj, 2.0)) + (x + (-2.0 * (wj * x)));
}
return tmp;
}
def code(wj, x): tmp = 0 if (wj <= -1.52e-6) or not (wj <= 5.8e-8): tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) else: tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * math.pow(wj, 2.0)) + (x + (-2.0 * (wj * x))) return tmp
function code(wj, x) tmp = 0.0 if ((wj <= -1.52e-6) || !(wj <= 5.8e-8)) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); else tmp = Float64(Float64(Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5))) * (wj ^ 2.0)) + Float64(x + Float64(-2.0 * Float64(wj * x)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if ((wj <= -1.52e-6) || ~((wj <= 5.8e-8))) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); else tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * (wj ^ 2.0)) + (x + (-2.0 * (wj * x))); end tmp_2 = tmp; end
code[wj_, x_] := If[Or[LessEqual[wj, -1.52e-6], N[Not[LessEqual[wj, 5.8e-8]], $MachinePrecision]], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1.52 \cdot 10^{-6} \lor \neg \left(wj \leq 5.8 \cdot 10^{-8}\right):\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(wj \cdot x\right)\right)\\
\end{array}
\end{array}
if wj < -1.52000000000000006e-6 or 5.8000000000000003e-8 < wj Initial program 57.8%
sub-neg57.8%
div-sub57.8%
sub-neg57.8%
+-commutative57.8%
distribute-neg-in57.8%
remove-double-neg57.8%
sub-neg57.8%
div-sub57.8%
distribute-rgt1-in74.3%
associate-/l/74.3%
Simplified99.3%
if -1.52000000000000006e-6 < wj < 5.8000000000000003e-8Initial program 78.5%
sub-neg78.5%
div-sub78.5%
sub-neg78.5%
+-commutative78.5%
distribute-neg-in78.5%
remove-double-neg78.5%
sub-neg78.5%
div-sub78.5%
distribute-rgt1-in78.5%
associate-/l/78.6%
Simplified78.6%
Taylor expanded in wj around 0 99.7%
Final simplification99.7%
(FPCore (wj x) :precision binary64 (if (or (<= wj -4.4e-9) (not (<= wj 7.6e-9))) (+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0))) (+ (+ x (* -2.0 (* wj x))) (* wj wj))))
double code(double wj, double x) {
double tmp;
if ((wj <= -4.4e-9) || !(wj <= 7.6e-9)) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = (x + (-2.0 * (wj * x))) + (wj * wj);
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if ((wj <= (-4.4d-9)) .or. (.not. (wj <= 7.6d-9))) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else
tmp = (x + ((-2.0d0) * (wj * x))) + (wj * wj)
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if ((wj <= -4.4e-9) || !(wj <= 7.6e-9)) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = (x + (-2.0 * (wj * x))) + (wj * wj);
}
return tmp;
}
def code(wj, x): tmp = 0 if (wj <= -4.4e-9) or not (wj <= 7.6e-9): tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) else: tmp = (x + (-2.0 * (wj * x))) + (wj * wj) return tmp
function code(wj, x) tmp = 0.0 if ((wj <= -4.4e-9) || !(wj <= 7.6e-9)) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); else tmp = Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if ((wj <= -4.4e-9) || ~((wj <= 7.6e-9))) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); else tmp = (x + (-2.0 * (wj * x))) + (wj * wj); end tmp_2 = tmp; end
code[wj_, x_] := If[Or[LessEqual[wj, -4.4e-9], N[Not[LessEqual[wj, 7.6e-9]], $MachinePrecision]], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.4 \cdot 10^{-9} \lor \neg \left(wj \leq 7.6 \cdot 10^{-9}\right):\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj\\
\end{array}
\end{array}
if wj < -4.3999999999999997e-9 or 7.60000000000000023e-9 < wj Initial program 63.7%
sub-neg63.7%
div-sub63.7%
sub-neg63.7%
+-commutative63.7%
distribute-neg-in63.7%
remove-double-neg63.7%
sub-neg63.7%
div-sub63.7%
distribute-rgt1-in77.9%
associate-/l/77.9%
Simplified99.3%
if -4.3999999999999997e-9 < wj < 7.60000000000000023e-9Initial program 78.4%
sub-neg78.4%
div-sub78.4%
sub-neg78.4%
+-commutative78.4%
distribute-neg-in78.4%
remove-double-neg78.4%
sub-neg78.4%
div-sub78.4%
distribute-rgt1-in78.4%
associate-/l/78.4%
Simplified78.4%
Taylor expanded in wj around 0 99.7%
Taylor expanded in x around 0 99.7%
unpow299.7%
Simplified99.7%
Final simplification99.6%
(FPCore (wj x) :precision binary64 (+ (+ x (* -2.0 (* wj x))) (* wj wj)))
double code(double wj, double x) {
return (x + (-2.0 * (wj * x))) + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (x + ((-2.0d0) * (wj * x))) + (wj * wj)
end function
public static double code(double wj, double x) {
return (x + (-2.0 * (wj * x))) + (wj * wj);
}
def code(wj, x): return (x + (-2.0 * (wj * x))) + (wj * wj)
function code(wj, x) return Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = (x + (-2.0 * (wj * x))) + (wj * wj); end
code[wj_, x_] := N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj
\end{array}
Initial program 77.6%
sub-neg77.6%
div-sub77.6%
sub-neg77.6%
+-commutative77.6%
distribute-neg-in77.6%
remove-double-neg77.6%
sub-neg77.6%
div-sub77.6%
distribute-rgt1-in78.3%
associate-/l/78.4%
Simplified79.5%
Taylor expanded in wj around 0 95.5%
Taylor expanded in x around 0 95.4%
unpow295.4%
Simplified95.4%
Final simplification95.4%
(FPCore (wj x) :precision binary64 (+ x (* wj wj)))
double code(double wj, double x) {
return x + (wj * wj);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * wj)
end function
public static double code(double wj, double x) {
return x + (wj * wj);
}
def code(wj, x): return x + (wj * wj)
function code(wj, x) return Float64(x + Float64(wj * wj)) end
function tmp = code(wj, x) tmp = x + (wj * wj); end
code[wj_, x_] := N[(x + N[(wj * wj), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot wj
\end{array}
Initial program 77.6%
sub-neg77.6%
div-sub77.6%
sub-neg77.6%
+-commutative77.6%
distribute-neg-in77.6%
remove-double-neg77.6%
sub-neg77.6%
div-sub77.6%
distribute-rgt1-in78.3%
associate-/l/78.4%
Simplified79.5%
Taylor expanded in wj around 0 95.5%
Taylor expanded in x around 0 95.4%
unpow295.4%
Simplified95.4%
Taylor expanded in wj around 0 94.8%
Final simplification94.8%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 77.6%
sub-neg77.6%
div-sub77.6%
sub-neg77.6%
+-commutative77.6%
distribute-neg-in77.6%
remove-double-neg77.6%
sub-neg77.6%
div-sub77.6%
distribute-rgt1-in78.3%
associate-/l/78.4%
Simplified79.5%
Taylor expanded in wj around inf 4.6%
Final simplification4.6%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 77.6%
sub-neg77.6%
div-sub77.6%
sub-neg77.6%
+-commutative77.6%
distribute-neg-in77.6%
remove-double-neg77.6%
sub-neg77.6%
div-sub77.6%
distribute-rgt1-in78.3%
associate-/l/78.4%
Simplified79.5%
Taylor expanded in wj around 0 84.3%
Final simplification84.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2023196
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))