
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (- (/ x (exp wj)) wj)))
(if (<= wj -4.5e-6)
(+ wj (/ t_0 (+ wj 1.0)))
(if (<= wj 2.8e-6)
(+
x
(*
wj
(-
(+ (* wj (* x (+ 2.5 (* wj -2.6666666666666665)))) (* wj (- 1.0 wj)))
(* x 2.0))))
(fma t_0 (exp (- (log1p wj))) wj)))))
double code(double wj, double x) {
double t_0 = (x / exp(wj)) - wj;
double tmp;
if (wj <= -4.5e-6) {
tmp = wj + (t_0 / (wj + 1.0));
} else if (wj <= 2.8e-6) {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
} else {
tmp = fma(t_0, exp(-log1p(wj)), wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(Float64(x / exp(wj)) - wj) tmp = 0.0 if (wj <= -4.5e-6) tmp = Float64(wj + Float64(t_0 / Float64(wj + 1.0))); elseif (wj <= 2.8e-6) tmp = Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(x * Float64(2.5 + Float64(wj * -2.6666666666666665)))) + Float64(wj * Float64(1.0 - wj))) - Float64(x * 2.0)))); else tmp = fma(t_0, exp(Float64(-log1p(wj))), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], N[(wj + N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.8e-6], N[(x + N[(wj * N[(N[(N[(wj * N[(x * N[(2.5 + N[(wj * -2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(t$95$0 * N[Exp[(-N[Log[1 + wj], $MachinePrecision])], $MachinePrecision] + wj), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}} - wj\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{t\_0}{wj + 1}\\
\mathbf{elif}\;wj \leq 2.8 \cdot 10^{-6}:\\
\;\;\;\;x + wj \cdot \left(\left(wj \cdot \left(x \cdot \left(2.5 + wj \cdot -2.6666666666666665\right)\right) + wj \cdot \left(1 - wj\right)\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, e^{-\mathsf{log1p}\left(wj\right)}, wj\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 59.6%
sub-neg59.6%
distribute-neg-frac59.6%
distribute-rgt1-in97.1%
associate-/l/97.1%
sub-neg97.1%
+-commutative97.1%
distribute-neg-in97.1%
remove-double-neg97.1%
sub-neg97.1%
div-sub59.6%
associate-/l*59.6%
*-inverses97.1%
*-rgt-identity97.1%
Simplified97.1%
if -4.50000000000000011e-6 < wj < 2.79999999999999987e-6Initial program 77.2%
sub-neg77.2%
distribute-neg-frac77.2%
distribute-rgt1-in77.2%
associate-/l/77.2%
sub-neg77.2%
+-commutative77.2%
distribute-neg-in77.2%
remove-double-neg77.2%
sub-neg77.2%
div-sub77.2%
associate-/l*77.2%
*-inverses77.2%
*-rgt-identity77.2%
Simplified77.2%
Taylor expanded in wj around 0 99.5%
Taylor expanded in x around 0 99.8%
if 2.79999999999999987e-6 < wj Initial program 82.2%
sub-neg82.2%
distribute-neg-frac82.2%
distribute-rgt1-in82.2%
associate-/l/81.6%
sub-neg81.6%
+-commutative81.6%
distribute-neg-in81.6%
remove-double-neg81.6%
sub-neg81.6%
div-sub81.6%
associate-/l*81.6%
*-inverses98.2%
*-rgt-identity98.2%
Simplified98.2%
+-commutative98.2%
div-inv98.2%
fma-define98.2%
add-exp-log98.2%
rec-exp98.2%
+-commutative98.2%
log1p-define99.2%
Applied egg-rr99.2%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (- (/ x (exp wj)) wj)))
(if (<= wj -4.5e-6)
(+ wj (/ t_0 (+ wj 1.0)))
(if (<= wj 4.3e-6)
(+
x
(*
wj
(-
(+ (* wj (* x (+ 2.5 (* wj -2.6666666666666665)))) (* wj (- 1.0 wj)))
(* x 2.0))))
(+ wj (/ 1.0 (/ (+ wj 1.0) t_0)))))))
double code(double wj, double x) {
double t_0 = (x / exp(wj)) - wj;
double tmp;
if (wj <= -4.5e-6) {
tmp = wj + (t_0 / (wj + 1.0));
} else if (wj <= 4.3e-6) {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
} else {
tmp = wj + (1.0 / ((wj + 1.0) / t_0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x / exp(wj)) - wj
if (wj <= (-4.5d-6)) then
tmp = wj + (t_0 / (wj + 1.0d0))
else if (wj <= 4.3d-6) then
tmp = x + (wj * (((wj * (x * (2.5d0 + (wj * (-2.6666666666666665d0))))) + (wj * (1.0d0 - wj))) - (x * 2.0d0)))
else
tmp = wj + (1.0d0 / ((wj + 1.0d0) / t_0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x / Math.exp(wj)) - wj;
double tmp;
if (wj <= -4.5e-6) {
tmp = wj + (t_0 / (wj + 1.0));
} else if (wj <= 4.3e-6) {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
} else {
tmp = wj + (1.0 / ((wj + 1.0) / t_0));
}
return tmp;
}
def code(wj, x): t_0 = (x / math.exp(wj)) - wj tmp = 0 if wj <= -4.5e-6: tmp = wj + (t_0 / (wj + 1.0)) elif wj <= 4.3e-6: tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))) else: tmp = wj + (1.0 / ((wj + 1.0) / t_0)) return tmp
function code(wj, x) t_0 = Float64(Float64(x / exp(wj)) - wj) tmp = 0.0 if (wj <= -4.5e-6) tmp = Float64(wj + Float64(t_0 / Float64(wj + 1.0))); elseif (wj <= 4.3e-6) tmp = Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(x * Float64(2.5 + Float64(wj * -2.6666666666666665)))) + Float64(wj * Float64(1.0 - wj))) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(1.0 / Float64(Float64(wj + 1.0) / t_0))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x / exp(wj)) - wj; tmp = 0.0; if (wj <= -4.5e-6) tmp = wj + (t_0 / (wj + 1.0)); elseif (wj <= 4.3e-6) tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))); else tmp = wj + (1.0 / ((wj + 1.0) / t_0)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], N[(wj + N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 4.3e-6], N[(x + N[(wj * N[(N[(N[(wj * N[(x * N[(2.5 + N[(wj * -2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(1.0 / N[(N[(wj + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}} - wj\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;wj + \frac{t\_0}{wj + 1}\\
\mathbf{elif}\;wj \leq 4.3 \cdot 10^{-6}:\\
\;\;\;\;x + wj \cdot \left(\left(wj \cdot \left(x \cdot \left(2.5 + wj \cdot -2.6666666666666665\right)\right) + wj \cdot \left(1 - wj\right)\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{1}{\frac{wj + 1}{t\_0}}\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 59.6%
sub-neg59.6%
distribute-neg-frac59.6%
distribute-rgt1-in97.1%
associate-/l/97.1%
sub-neg97.1%
+-commutative97.1%
distribute-neg-in97.1%
remove-double-neg97.1%
sub-neg97.1%
div-sub59.6%
associate-/l*59.6%
*-inverses97.1%
*-rgt-identity97.1%
Simplified97.1%
if -4.50000000000000011e-6 < wj < 4.30000000000000033e-6Initial program 77.2%
sub-neg77.2%
distribute-neg-frac77.2%
distribute-rgt1-in77.2%
associate-/l/77.2%
sub-neg77.2%
+-commutative77.2%
distribute-neg-in77.2%
remove-double-neg77.2%
sub-neg77.2%
div-sub77.2%
associate-/l*77.2%
*-inverses77.2%
*-rgt-identity77.2%
Simplified77.2%
Taylor expanded in wj around 0 99.5%
Taylor expanded in x around 0 99.8%
if 4.30000000000000033e-6 < wj Initial program 82.2%
sub-neg82.2%
distribute-neg-frac82.2%
distribute-rgt1-in82.2%
associate-/l/81.6%
sub-neg81.6%
+-commutative81.6%
distribute-neg-in81.6%
remove-double-neg81.6%
sub-neg81.6%
div-sub81.6%
associate-/l*81.6%
*-inverses98.2%
*-rgt-identity98.2%
Simplified98.2%
clear-num98.9%
Applied egg-rr98.9%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (or (<= wj -4.5e-6) (not (<= wj 4.8e-6)))
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))
(+
x
(*
wj
(-
(+ (* wj (* x (+ 2.5 (* wj -2.6666666666666665)))) (* wj (- 1.0 wj)))
(* x 2.0))))))
double code(double wj, double x) {
double tmp;
if ((wj <= -4.5e-6) || !(wj <= 4.8e-6)) {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if ((wj <= (-4.5d-6)) .or. (.not. (wj <= 4.8d-6))) then
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
else
tmp = x + (wj * (((wj * (x * (2.5d0 + (wj * (-2.6666666666666665d0))))) + (wj * (1.0d0 - wj))) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if ((wj <= -4.5e-6) || !(wj <= 4.8e-6)) {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
} else {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if (wj <= -4.5e-6) or not (wj <= 4.8e-6): tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) else: tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))) return tmp
function code(wj, x) tmp = 0.0 if ((wj <= -4.5e-6) || !(wj <= 4.8e-6)) tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); else tmp = Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(x * Float64(2.5 + Float64(wj * -2.6666666666666665)))) + Float64(wj * Float64(1.0 - wj))) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if ((wj <= -4.5e-6) || ~((wj <= 4.8e-6))) tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); else tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[Or[LessEqual[wj, -4.5e-6], N[Not[LessEqual[wj, 4.8e-6]], $MachinePrecision]], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(N[(wj * N[(x * N[(2.5 + N[(wj * -2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6} \lor \neg \left(wj \leq 4.8 \cdot 10^{-6}\right):\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(\left(wj \cdot \left(x \cdot \left(2.5 + wj \cdot -2.6666666666666665\right)\right) + wj \cdot \left(1 - wj\right)\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6 or 4.7999999999999998e-6 < wj Initial program 69.3%
sub-neg69.3%
distribute-neg-frac69.3%
distribute-rgt1-in90.7%
associate-/l/90.4%
sub-neg90.4%
+-commutative90.4%
distribute-neg-in90.4%
remove-double-neg90.4%
sub-neg90.4%
div-sub69.0%
associate-/l*69.0%
*-inverses97.6%
*-rgt-identity97.6%
Simplified97.6%
if -4.50000000000000011e-6 < wj < 4.7999999999999998e-6Initial program 77.2%
sub-neg77.2%
distribute-neg-frac77.2%
distribute-rgt1-in77.2%
associate-/l/77.2%
sub-neg77.2%
+-commutative77.2%
distribute-neg-in77.2%
remove-double-neg77.2%
sub-neg77.2%
div-sub77.2%
associate-/l*77.2%
*-inverses77.2%
*-rgt-identity77.2%
Simplified77.2%
Taylor expanded in wj around 0 99.5%
Taylor expanded in x around 0 99.8%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.009)
(/ x (* (exp wj) (+ wj 1.0)))
(+
x
(*
wj
(-
(+ (* wj (* x (+ 2.5 (* wj -2.6666666666666665)))) (* wj (- 1.0 wj)))
(* x 2.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -0.009) {
tmp = x / (exp(wj) * (wj + 1.0));
} else {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-0.009d0)) then
tmp = x / (exp(wj) * (wj + 1.0d0))
else
tmp = x + (wj * (((wj * (x * (2.5d0 + (wj * (-2.6666666666666665d0))))) + (wj * (1.0d0 - wj))) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -0.009) {
tmp = x / (Math.exp(wj) * (wj + 1.0));
} else {
tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -0.009: tmp = x / (math.exp(wj) * (wj + 1.0)) else: tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -0.009) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); else tmp = Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(x * Float64(2.5 + Float64(wj * -2.6666666666666665)))) + Float64(wj * Float64(1.0 - wj))) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -0.009) tmp = x / (exp(wj) * (wj + 1.0)); else tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -0.009], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(N[(wj * N[(x * N[(2.5 + N[(wj * -2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.009:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(\left(wj \cdot \left(x \cdot \left(2.5 + wj \cdot -2.6666666666666665\right)\right) + wj \cdot \left(1 - wj\right)\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -0.00899999999999999932Initial program 49.7%
sub-neg49.7%
distribute-neg-frac49.7%
distribute-rgt1-in100.0%
associate-/l/99.7%
sub-neg99.7%
+-commutative99.7%
distribute-neg-in99.7%
remove-double-neg99.7%
sub-neg99.7%
div-sub49.7%
associate-/l*49.7%
*-inverses99.7%
*-rgt-identity99.7%
Simplified99.7%
Taylor expanded in x around inf 83.6%
if -0.00899999999999999932 < wj Initial program 77.4%
sub-neg77.4%
distribute-neg-frac77.4%
distribute-rgt1-in77.4%
associate-/l/77.4%
sub-neg77.4%
+-commutative77.4%
distribute-neg-in77.4%
remove-double-neg77.4%
sub-neg77.4%
div-sub77.4%
associate-/l*77.4%
*-inverses77.8%
*-rgt-identity77.8%
Simplified77.8%
Taylor expanded in wj around 0 97.3%
Taylor expanded in x around 0 97.7%
Final simplification97.4%
(FPCore (wj x)
:precision binary64
(+
x
(*
wj
(-
(+ (* wj (* x (+ 2.5 (* wj -2.6666666666666665)))) (* wj (- 1.0 wj)))
(* x 2.0)))))
double code(double wj, double x) {
return x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (((wj * (x * (2.5d0 + (wj * (-2.6666666666666665d0))))) + (wj * (1.0d0 - wj))) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)));
}
def code(wj, x): return x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(x * Float64(2.5 + Float64(wj * -2.6666666666666665)))) + Float64(wj * Float64(1.0 - wj))) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (((wj * (x * (2.5 + (wj * -2.6666666666666665)))) + (wj * (1.0 - wj))) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(N[(wj * N[(x * N[(2.5 + N[(wj * -2.6666666666666665), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(\left(wj \cdot \left(x \cdot \left(2.5 + wj \cdot -2.6666666666666665\right)\right) + wj \cdot \left(1 - wj\right)\right) - x \cdot 2\right)
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 95.3%
Taylor expanded in x around 0 95.6%
Final simplification95.6%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 95.3%
Taylor expanded in x around 0 95.3%
neg-mul-195.3%
unsub-neg95.3%
Simplified95.3%
Final simplification95.3%
(FPCore (wj x) :precision binary64 (if (<= wj 3.5e-10) (+ x (* -2.0 (* wj x))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 3.5e-10) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 3.5d-10) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 3.5e-10) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 3.5e-10: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 3.5e-10) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 3.5e-10) tmp = x + (-2.0 * (wj * x)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 3.5e-10], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3.5 \cdot 10^{-10}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 3.4999999999999998e-10Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in78.0%
associate-/l/78.0%
sub-neg78.0%
+-commutative78.0%
distribute-neg-in78.0%
remove-double-neg78.0%
sub-neg78.0%
div-sub76.8%
associate-/l*76.8%
*-inverses78.0%
*-rgt-identity78.0%
Simplified78.0%
Taylor expanded in wj around 0 81.4%
if 3.4999999999999998e-10 < wj Initial program 76.9%
sub-neg76.9%
distribute-neg-frac76.9%
distribute-rgt1-in76.7%
associate-/l/76.6%
sub-neg76.6%
+-commutative76.6%
distribute-neg-in76.6%
remove-double-neg76.6%
sub-neg76.6%
div-sub76.6%
associate-/l*76.6%
*-inverses89.1%
*-rgt-identity89.1%
Simplified89.1%
Taylor expanded in x around 0 64.9%
Final simplification80.8%
(FPCore (wj x) :precision binary64 (+ x (* wj (- wj (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * (wj - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * (wj - (x * 2.0)));
}
def code(wj, x): return x + (wj * (wj - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj - x \cdot 2\right)
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 95.3%
Taylor expanded in x around 0 95.3%
neg-mul-195.3%
unsub-neg95.3%
Simplified95.3%
Taylor expanded in wj around 0 94.7%
Final simplification94.7%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 79.1%
(FPCore (wj x) :precision binary64 (* x (+ 1.0 (* wj -2.0))))
double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * (-2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
def code(wj, x): return x * (1.0 + (wj * -2.0))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * -2.0))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * -2.0)); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot -2\right)
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 79.1%
associate-*r*79.1%
distribute-rgt1-in79.1%
Simplified79.1%
Final simplification79.1%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 78.3%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 76.8%
sub-neg76.8%
distribute-neg-frac76.8%
distribute-rgt1-in77.9%
associate-/l/77.9%
sub-neg77.9%
+-commutative77.9%
distribute-neg-in77.9%
remove-double-neg77.9%
sub-neg77.9%
div-sub76.8%
associate-/l*76.8%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around inf 4.4%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024096
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))