
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 2e-13)
(+
(*
(pow wj 3.0)
(-
(- (- -1.0 (* -2.0 (+ (* x -4.0) (* x 1.5)))) (* x -3.0))
(* x 0.6666666666666666)))
(+ (* wj wj) (+ x (* -2.0 (* wj x)))))
(fma (- (/ x (exp wj)) wj) (/ 1.0 (+ wj 1.0)) wj))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 2e-13) {
tmp = (pow(wj, 3.0) * (((-1.0 - (-2.0 * ((x * -4.0) + (x * 1.5)))) - (x * -3.0)) - (x * 0.6666666666666666))) + ((wj * wj) + (x + (-2.0 * (wj * x))));
} else {
tmp = fma(((x / exp(wj)) - wj), (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 2e-13) tmp = Float64(Float64((wj ^ 3.0) * Float64(Float64(Float64(-1.0 - Float64(-2.0 * Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * -3.0)) - Float64(x * 0.6666666666666666))) + Float64(Float64(wj * wj) + Float64(x + Float64(-2.0 * Float64(wj * x))))); else tmp = fma(Float64(Float64(x / exp(wj)) - wj), Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-13], N[(N[(N[Power[wj, 3.0], $MachinePrecision] * N[(N[(N[(-1.0 - N[(-2.0 * N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * -3.0), $MachinePrecision]), $MachinePrecision] - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(wj * wj), $MachinePrecision] + N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t_0}{e^{wj} + t_0} \leq 2 \cdot 10^{-13}:\\
\;\;\;\;{wj}^{3} \cdot \left(\left(\left(-1 - -2 \cdot \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot -3\right) - x \cdot 0.6666666666666666\right) + \left(wj \cdot wj + \left(x + -2 \cdot \left(wj \cdot x\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{x}{e^{wj}} - wj, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 2.0000000000000001e-13Initial program 69.4%
sub-neg69.4%
div-sub69.4%
sub-neg69.4%
+-commutative69.4%
distribute-neg-in69.4%
remove-double-neg69.4%
sub-neg69.4%
div-sub69.4%
distribute-rgt1-in69.4%
associate-/l/69.4%
Simplified69.4%
Taylor expanded in wj around 0 99.9%
Taylor expanded in x around 0 99.9%
unpow299.9%
Simplified99.9%
if 2.0000000000000001e-13 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 96.8%
sub-neg96.8%
div-sub96.8%
sub-neg96.8%
+-commutative96.8%
distribute-neg-in96.8%
remove-double-neg96.8%
sub-neg96.8%
div-sub96.8%
distribute-rgt1-in96.9%
associate-/l/96.9%
Simplified99.6%
+-commutative99.6%
div-inv99.6%
fma-def99.8%
Applied egg-rr99.8%
Final simplification99.9%
(FPCore (wj x)
:precision binary64
(+
(*
(pow wj 3.0)
(-
(- (- -1.0 (* -2.0 (+ (* x -4.0) (* x 1.5)))) (* x -3.0))
(* x 0.6666666666666666)))
(+ (* wj wj) (+ x (* -2.0 (* wj x))))))
double code(double wj, double x) {
return (pow(wj, 3.0) * (((-1.0 - (-2.0 * ((x * -4.0) + (x * 1.5)))) - (x * -3.0)) - (x * 0.6666666666666666))) + ((wj * wj) + (x + (-2.0 * (wj * x))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = ((wj ** 3.0d0) * ((((-1.0d0) - ((-2.0d0) * ((x * (-4.0d0)) + (x * 1.5d0)))) - (x * (-3.0d0))) - (x * 0.6666666666666666d0))) + ((wj * wj) + (x + ((-2.0d0) * (wj * x))))
end function
public static double code(double wj, double x) {
return (Math.pow(wj, 3.0) * (((-1.0 - (-2.0 * ((x * -4.0) + (x * 1.5)))) - (x * -3.0)) - (x * 0.6666666666666666))) + ((wj * wj) + (x + (-2.0 * (wj * x))));
}
def code(wj, x): return (math.pow(wj, 3.0) * (((-1.0 - (-2.0 * ((x * -4.0) + (x * 1.5)))) - (x * -3.0)) - (x * 0.6666666666666666))) + ((wj * wj) + (x + (-2.0 * (wj * x))))
function code(wj, x) return Float64(Float64((wj ^ 3.0) * Float64(Float64(Float64(-1.0 - Float64(-2.0 * Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * -3.0)) - Float64(x * 0.6666666666666666))) + Float64(Float64(wj * wj) + Float64(x + Float64(-2.0 * Float64(wj * x))))) end
function tmp = code(wj, x) tmp = ((wj ^ 3.0) * (((-1.0 - (-2.0 * ((x * -4.0) + (x * 1.5)))) - (x * -3.0)) - (x * 0.6666666666666666))) + ((wj * wj) + (x + (-2.0 * (wj * x)))); end
code[wj_, x_] := N[(N[(N[Power[wj, 3.0], $MachinePrecision] * N[(N[(N[(-1.0 - N[(-2.0 * N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * -3.0), $MachinePrecision]), $MachinePrecision] - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(wj * wj), $MachinePrecision] + N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{wj}^{3} \cdot \left(\left(\left(-1 - -2 \cdot \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot -3\right) - x \cdot 0.6666666666666666\right) + \left(wj \cdot wj + \left(x + -2 \cdot \left(wj \cdot x\right)\right)\right)
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around 0 97.1%
Taylor expanded in x around 0 97.0%
unpow297.0%
Simplified97.0%
Final simplification97.0%
(FPCore (wj x) :precision binary64 (+ (+ x (* -2.0 (* wj x))) (* (- 1.0 (+ (* x -4.0) (* x 1.5))) (pow wj 2.0))))
double code(double wj, double x) {
return (x + (-2.0 * (wj * x))) + ((1.0 - ((x * -4.0) + (x * 1.5))) * pow(wj, 2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (x + ((-2.0d0) * (wj * x))) + ((1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0))) * (wj ** 2.0d0))
end function
public static double code(double wj, double x) {
return (x + (-2.0 * (wj * x))) + ((1.0 - ((x * -4.0) + (x * 1.5))) * Math.pow(wj, 2.0));
}
def code(wj, x): return (x + (-2.0 * (wj * x))) + ((1.0 - ((x * -4.0) + (x * 1.5))) * math.pow(wj, 2.0))
function code(wj, x) return Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5))) * (wj ^ 2.0))) end
function tmp = code(wj, x) tmp = (x + (-2.0 * (wj * x))) + ((1.0 - ((x * -4.0) + (x * 1.5))) * (wj ^ 2.0)); end
code[wj_, x_] := N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x + -2 \cdot \left(wj \cdot x\right)\right) + \left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2}
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around 0 96.8%
Final simplification96.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -3.4e-30)
(* wj (* wj (- 1.0 wj)))
(if (<= wj 2.6e-55)
x
(if (<= wj 8.9e-45)
(* wj wj)
(- wj (/ (+ wj (* x (+ wj -1.0))) (+ wj 1.0)))))))
double code(double wj, double x) {
double tmp;
if (wj <= -3.4e-30) {
tmp = wj * (wj * (1.0 - wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.9e-45) {
tmp = wj * wj;
} else {
tmp = wj - ((wj + (x * (wj + -1.0))) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-3.4d-30)) then
tmp = wj * (wj * (1.0d0 - wj))
else if (wj <= 2.6d-55) then
tmp = x
else if (wj <= 8.9d-45) then
tmp = wj * wj
else
tmp = wj - ((wj + (x * (wj + (-1.0d0)))) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -3.4e-30) {
tmp = wj * (wj * (1.0 - wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.9e-45) {
tmp = wj * wj;
} else {
tmp = wj - ((wj + (x * (wj + -1.0))) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -3.4e-30: tmp = wj * (wj * (1.0 - wj)) elif wj <= 2.6e-55: tmp = x elif wj <= 8.9e-45: tmp = wj * wj else: tmp = wj - ((wj + (x * (wj + -1.0))) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= -3.4e-30) tmp = Float64(wj * Float64(wj * Float64(1.0 - wj))); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.9e-45) tmp = Float64(wj * wj); else tmp = Float64(wj - Float64(Float64(wj + Float64(x * Float64(wj + -1.0))) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -3.4e-30) tmp = wj * (wj * (1.0 - wj)); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.9e-45) tmp = wj * wj; else tmp = wj - ((wj + (x * (wj + -1.0))) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -3.4e-30], N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-55], x, If[LessEqual[wj, 8.9e-45], N[(wj * wj), $MachinePrecision], N[(wj - N[(N[(wj + N[(x * N[(wj + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.4 \cdot 10^{-30}:\\
\;\;\;\;wj \cdot \left(wj \cdot \left(1 - wj\right)\right)\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-55}:\\
\;\;\;\;x\\
\mathbf{elif}\;wj \leq 8.9 \cdot 10^{-45}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj + x \cdot \left(wj + -1\right)}{wj + 1}\\
\end{array}
\end{array}
if wj < -3.4000000000000003e-30Initial program 53.3%
sub-neg53.3%
div-sub53.3%
sub-neg53.3%
+-commutative53.3%
distribute-neg-in53.3%
remove-double-neg53.3%
sub-neg53.3%
div-sub53.3%
distribute-rgt1-in53.4%
associate-/l/53.3%
Simplified53.3%
Taylor expanded in x around 0 20.8%
+-commutative20.8%
Simplified20.8%
Taylor expanded in wj around 0 17.8%
associate-+r+17.8%
+-commutative17.8%
cube-mult17.8%
unpow217.8%
distribute-rgt-out17.8%
unpow217.8%
+-commutative17.8%
Simplified17.8%
associate--r+62.2%
+-inverses62.2%
metadata-eval62.2%
associate-*l*62.3%
cancel-sign-sub-inv62.3%
metadata-eval62.3%
Applied egg-rr62.3%
if -3.4000000000000003e-30 < wj < 2.5999999999999999e-55Initial program 81.3%
sub-neg81.3%
div-sub81.3%
sub-neg81.3%
+-commutative81.3%
distribute-neg-in81.3%
remove-double-neg81.3%
sub-neg81.3%
div-sub81.3%
distribute-rgt1-in81.3%
associate-/l/81.3%
Simplified81.3%
Taylor expanded in wj around 0 92.9%
if 2.5999999999999999e-55 < wj < 8.90000000000000034e-45Initial program 17.7%
sub-neg17.7%
div-sub17.7%
sub-neg17.7%
+-commutative17.7%
distribute-neg-in17.7%
remove-double-neg17.7%
sub-neg17.7%
div-sub17.7%
distribute-rgt1-in17.7%
associate-/l/17.7%
Simplified17.7%
Taylor expanded in x around 0 3.8%
+-commutative3.8%
Simplified3.8%
Taylor expanded in wj around 0 86.0%
unpow286.0%
Simplified86.0%
if 8.90000000000000034e-45 < wj Initial program 65.2%
sub-neg65.2%
div-sub65.2%
sub-neg65.2%
+-commutative65.2%
distribute-neg-in65.2%
remove-double-neg65.2%
sub-neg65.2%
div-sub65.2%
distribute-rgt1-in65.4%
associate-/l/65.6%
Simplified75.9%
Taylor expanded in wj around 0 69.3%
associate-*r*69.3%
neg-mul-169.3%
distribute-lft1-in69.3%
+-commutative69.3%
sub-neg69.3%
Simplified69.3%
Final simplification89.6%
(FPCore (wj x) :precision binary64 (+ (+ (* wj wj) (* 2.0 (* x (* wj wj)))) (- x (* wj (+ x x)))))
double code(double wj, double x) {
return ((wj * wj) + (2.0 * (x * (wj * wj)))) + (x - (wj * (x + x)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = ((wj * wj) + (2.0d0 * (x * (wj * wj)))) + (x - (wj * (x + x)))
end function
public static double code(double wj, double x) {
return ((wj * wj) + (2.0 * (x * (wj * wj)))) + (x - (wj * (x + x)));
}
def code(wj, x): return ((wj * wj) + (2.0 * (x * (wj * wj)))) + (x - (wj * (x + x)))
function code(wj, x) return Float64(Float64(Float64(wj * wj) + Float64(2.0 * Float64(x * Float64(wj * wj)))) + Float64(x - Float64(wj * Float64(x + x)))) end
function tmp = code(wj, x) tmp = ((wj * wj) + (2.0 * (x * (wj * wj)))) + (x - (wj * (x + x))); end
code[wj_, x_] := N[(N[(N[(wj * wj), $MachinePrecision] + N[(2.0 * N[(x * N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x - N[(wj * N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(wj \cdot wj + 2 \cdot \left(x \cdot \left(wj \cdot wj\right)\right)\right) + \left(x - wj \cdot \left(x + x\right)\right)
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around 0 76.1%
associate-*r*76.1%
neg-mul-176.1%
distribute-lft1-in76.1%
+-commutative76.1%
sub-neg76.1%
Simplified76.1%
Taylor expanded in wj around 0 96.7%
Taylor expanded in x around 0 96.7%
unpow296.7%
*-commutative96.7%
unpow296.7%
Simplified96.7%
Final simplification96.7%
(FPCore (wj x) :precision binary64 (+ (* wj wj) (- x (* wj (+ x x)))))
double code(double wj, double x) {
return (wj * wj) + (x - (wj * (x + x)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (wj * wj) + (x - (wj * (x + x)))
end function
public static double code(double wj, double x) {
return (wj * wj) + (x - (wj * (x + x)));
}
def code(wj, x): return (wj * wj) + (x - (wj * (x + x)))
function code(wj, x) return Float64(Float64(wj * wj) + Float64(x - Float64(wj * Float64(x + x)))) end
function tmp = code(wj, x) tmp = (wj * wj) + (x - (wj * (x + x))); end
code[wj_, x_] := N[(N[(wj * wj), $MachinePrecision] + N[(x - N[(wj * N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj \cdot wj + \left(x - wj \cdot \left(x + x\right)\right)
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around 0 76.1%
associate-*r*76.1%
neg-mul-176.1%
distribute-lft1-in76.1%
+-commutative76.1%
sub-neg76.1%
Simplified76.1%
Taylor expanded in wj around 0 96.7%
Taylor expanded in x around 0 96.7%
unpow296.7%
*-commutative96.7%
unpow296.7%
Simplified96.7%
Taylor expanded in x around 0 96.7%
unpow296.7%
Simplified96.7%
Final simplification96.7%
(FPCore (wj x) :precision binary64 (if (<= wj -1.45e-30) (* wj wj) (if (<= wj 2.6e-55) x (if (<= wj 8.8e-45) (* wj wj) x))))
double code(double wj, double x) {
double tmp;
if (wj <= -1.45e-30) {
tmp = wj * wj;
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-1.45d-30)) then
tmp = wj * wj
else if (wj <= 2.6d-55) then
tmp = x
else if (wj <= 8.8d-45) then
tmp = wj * wj
else
tmp = x
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -1.45e-30) {
tmp = wj * wj;
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = x;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -1.45e-30: tmp = wj * wj elif wj <= 2.6e-55: tmp = x elif wj <= 8.8e-45: tmp = wj * wj else: tmp = x return tmp
function code(wj, x) tmp = 0.0 if (wj <= -1.45e-30) tmp = Float64(wj * wj); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = Float64(wj * wj); else tmp = x; end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -1.45e-30) tmp = wj * wj; elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = wj * wj; else tmp = x; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -1.45e-30], N[(wj * wj), $MachinePrecision], If[LessEqual[wj, 2.6e-55], x, If[LessEqual[wj, 8.8e-45], N[(wj * wj), $MachinePrecision], x]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1.45 \cdot 10^{-30}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-55}:\\
\;\;\;\;x\\
\mathbf{elif}\;wj \leq 8.8 \cdot 10^{-45}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;x\\
\end{array}
\end{array}
if wj < -1.44999999999999995e-30 or 2.5999999999999999e-55 < wj < 8.79999999999999974e-45Initial program 40.2%
sub-neg40.2%
div-sub40.2%
sub-neg40.2%
+-commutative40.2%
distribute-neg-in40.2%
remove-double-neg40.2%
sub-neg40.2%
div-sub40.2%
distribute-rgt1-in40.3%
associate-/l/40.2%
Simplified40.2%
Taylor expanded in x around 0 14.6%
+-commutative14.6%
Simplified14.6%
Taylor expanded in wj around 0 67.1%
unpow267.1%
Simplified67.1%
if -1.44999999999999995e-30 < wj < 2.5999999999999999e-55 or 8.79999999999999974e-45 < wj Initial program 80.0%
sub-neg80.0%
div-sub80.0%
sub-neg80.0%
+-commutative80.0%
distribute-neg-in80.0%
remove-double-neg80.0%
sub-neg80.0%
div-sub80.0%
distribute-rgt1-in80.0%
associate-/l/80.0%
Simplified80.8%
Taylor expanded in wj around 0 89.8%
Final simplification88.1%
(FPCore (wj x) :precision binary64 (if (<= wj -1.45e-30) (* wj wj) (if (<= wj 2.6e-55) x (if (<= wj 8.8e-45) (* wj wj) (+ wj x)))))
double code(double wj, double x) {
double tmp;
if (wj <= -1.45e-30) {
tmp = wj * wj;
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-1.45d-30)) then
tmp = wj * wj
else if (wj <= 2.6d-55) then
tmp = x
else if (wj <= 8.8d-45) then
tmp = wj * wj
else
tmp = wj + x
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -1.45e-30) {
tmp = wj * wj;
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -1.45e-30: tmp = wj * wj elif wj <= 2.6e-55: tmp = x elif wj <= 8.8e-45: tmp = wj * wj else: tmp = wj + x return tmp
function code(wj, x) tmp = 0.0 if (wj <= -1.45e-30) tmp = Float64(wj * wj); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = Float64(wj * wj); else tmp = Float64(wj + x); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -1.45e-30) tmp = wj * wj; elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = wj * wj; else tmp = wj + x; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -1.45e-30], N[(wj * wj), $MachinePrecision], If[LessEqual[wj, 2.6e-55], x, If[LessEqual[wj, 8.8e-45], N[(wj * wj), $MachinePrecision], N[(wj + x), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1.45 \cdot 10^{-30}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-55}:\\
\;\;\;\;x\\
\mathbf{elif}\;wj \leq 8.8 \cdot 10^{-45}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj + x\\
\end{array}
\end{array}
if wj < -1.44999999999999995e-30 or 2.5999999999999999e-55 < wj < 8.79999999999999974e-45Initial program 40.2%
sub-neg40.2%
div-sub40.2%
sub-neg40.2%
+-commutative40.2%
distribute-neg-in40.2%
remove-double-neg40.2%
sub-neg40.2%
div-sub40.2%
distribute-rgt1-in40.3%
associate-/l/40.2%
Simplified40.2%
Taylor expanded in x around 0 14.6%
+-commutative14.6%
Simplified14.6%
Taylor expanded in wj around 0 67.1%
unpow267.1%
Simplified67.1%
if -1.44999999999999995e-30 < wj < 2.5999999999999999e-55Initial program 81.3%
sub-neg81.3%
div-sub81.3%
sub-neg81.3%
+-commutative81.3%
distribute-neg-in81.3%
remove-double-neg81.3%
sub-neg81.3%
div-sub81.3%
distribute-rgt1-in81.3%
associate-/l/81.3%
Simplified81.3%
Taylor expanded in wj around 0 92.9%
if 8.79999999999999974e-45 < wj Initial program 65.2%
distribute-rgt1-in65.4%
Simplified65.4%
Taylor expanded in wj around 0 59.0%
neg-mul-159.0%
Simplified59.0%
Final simplification88.5%
(FPCore (wj x) :precision binary64 (if (<= wj -3.2e-30) (* wj (- wj (* wj wj))) (if (<= wj 2.6e-55) x (if (<= wj 8.8e-45) (* wj wj) (+ wj x)))))
double code(double wj, double x) {
double tmp;
if (wj <= -3.2e-30) {
tmp = wj * (wj - (wj * wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-3.2d-30)) then
tmp = wj * (wj - (wj * wj))
else if (wj <= 2.6d-55) then
tmp = x
else if (wj <= 8.8d-45) then
tmp = wj * wj
else
tmp = wj + x
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -3.2e-30) {
tmp = wj * (wj - (wj * wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -3.2e-30: tmp = wj * (wj - (wj * wj)) elif wj <= 2.6e-55: tmp = x elif wj <= 8.8e-45: tmp = wj * wj else: tmp = wj + x return tmp
function code(wj, x) tmp = 0.0 if (wj <= -3.2e-30) tmp = Float64(wj * Float64(wj - Float64(wj * wj))); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = Float64(wj * wj); else tmp = Float64(wj + x); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -3.2e-30) tmp = wj * (wj - (wj * wj)); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = wj * wj; else tmp = wj + x; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -3.2e-30], N[(wj * N[(wj - N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-55], x, If[LessEqual[wj, 8.8e-45], N[(wj * wj), $MachinePrecision], N[(wj + x), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.2 \cdot 10^{-30}:\\
\;\;\;\;wj \cdot \left(wj - wj \cdot wj\right)\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-55}:\\
\;\;\;\;x\\
\mathbf{elif}\;wj \leq 8.8 \cdot 10^{-45}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj + x\\
\end{array}
\end{array}
if wj < -3.2e-30Initial program 53.3%
sub-neg53.3%
div-sub53.3%
sub-neg53.3%
+-commutative53.3%
distribute-neg-in53.3%
remove-double-neg53.3%
sub-neg53.3%
div-sub53.3%
distribute-rgt1-in53.4%
associate-/l/53.3%
Simplified53.3%
Taylor expanded in x around 0 20.8%
+-commutative20.8%
Simplified20.8%
Taylor expanded in wj around 0 62.2%
mul-1-neg62.2%
unsub-neg62.2%
unpow262.2%
Simplified62.2%
cube-mult62.2%
distribute-lft-out--62.2%
Applied egg-rr62.2%
if -3.2e-30 < wj < 2.5999999999999999e-55Initial program 81.3%
sub-neg81.3%
div-sub81.3%
sub-neg81.3%
+-commutative81.3%
distribute-neg-in81.3%
remove-double-neg81.3%
sub-neg81.3%
div-sub81.3%
distribute-rgt1-in81.3%
associate-/l/81.3%
Simplified81.3%
Taylor expanded in wj around 0 92.9%
if 2.5999999999999999e-55 < wj < 8.79999999999999974e-45Initial program 17.7%
sub-neg17.7%
div-sub17.7%
sub-neg17.7%
+-commutative17.7%
distribute-neg-in17.7%
remove-double-neg17.7%
sub-neg17.7%
div-sub17.7%
distribute-rgt1-in17.7%
associate-/l/17.7%
Simplified17.7%
Taylor expanded in x around 0 3.8%
+-commutative3.8%
Simplified3.8%
Taylor expanded in wj around 0 86.0%
unpow286.0%
Simplified86.0%
if 8.79999999999999974e-45 < wj Initial program 65.2%
distribute-rgt1-in65.4%
Simplified65.4%
Taylor expanded in wj around 0 59.0%
neg-mul-159.0%
Simplified59.0%
Final simplification88.8%
(FPCore (wj x) :precision binary64 (if (<= wj -3.1e-30) (* wj (* wj (- 1.0 wj))) (if (<= wj 2.6e-55) x (if (<= wj 8.8e-45) (* wj wj) (+ wj x)))))
double code(double wj, double x) {
double tmp;
if (wj <= -3.1e-30) {
tmp = wj * (wj * (1.0 - wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= (-3.1d-30)) then
tmp = wj * (wj * (1.0d0 - wj))
else if (wj <= 2.6d-55) then
tmp = x
else if (wj <= 8.8d-45) then
tmp = wj * wj
else
tmp = wj + x
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= -3.1e-30) {
tmp = wj * (wj * (1.0 - wj));
} else if (wj <= 2.6e-55) {
tmp = x;
} else if (wj <= 8.8e-45) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= -3.1e-30: tmp = wj * (wj * (1.0 - wj)) elif wj <= 2.6e-55: tmp = x elif wj <= 8.8e-45: tmp = wj * wj else: tmp = wj + x return tmp
function code(wj, x) tmp = 0.0 if (wj <= -3.1e-30) tmp = Float64(wj * Float64(wj * Float64(1.0 - wj))); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = Float64(wj * wj); else tmp = Float64(wj + x); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= -3.1e-30) tmp = wj * (wj * (1.0 - wj)); elseif (wj <= 2.6e-55) tmp = x; elseif (wj <= 8.8e-45) tmp = wj * wj; else tmp = wj + x; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, -3.1e-30], N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 2.6e-55], x, If[LessEqual[wj, 8.8e-45], N[(wj * wj), $MachinePrecision], N[(wj + x), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.1 \cdot 10^{-30}:\\
\;\;\;\;wj \cdot \left(wj \cdot \left(1 - wj\right)\right)\\
\mathbf{elif}\;wj \leq 2.6 \cdot 10^{-55}:\\
\;\;\;\;x\\
\mathbf{elif}\;wj \leq 8.8 \cdot 10^{-45}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj + x\\
\end{array}
\end{array}
if wj < -3.09999999999999991e-30Initial program 53.3%
sub-neg53.3%
div-sub53.3%
sub-neg53.3%
+-commutative53.3%
distribute-neg-in53.3%
remove-double-neg53.3%
sub-neg53.3%
div-sub53.3%
distribute-rgt1-in53.4%
associate-/l/53.3%
Simplified53.3%
Taylor expanded in x around 0 20.8%
+-commutative20.8%
Simplified20.8%
Taylor expanded in wj around 0 17.8%
associate-+r+17.8%
+-commutative17.8%
cube-mult17.8%
unpow217.8%
distribute-rgt-out17.8%
unpow217.8%
+-commutative17.8%
Simplified17.8%
associate--r+62.2%
+-inverses62.2%
metadata-eval62.2%
associate-*l*62.3%
cancel-sign-sub-inv62.3%
metadata-eval62.3%
Applied egg-rr62.3%
if -3.09999999999999991e-30 < wj < 2.5999999999999999e-55Initial program 81.3%
sub-neg81.3%
div-sub81.3%
sub-neg81.3%
+-commutative81.3%
distribute-neg-in81.3%
remove-double-neg81.3%
sub-neg81.3%
div-sub81.3%
distribute-rgt1-in81.3%
associate-/l/81.3%
Simplified81.3%
Taylor expanded in wj around 0 92.9%
if 2.5999999999999999e-55 < wj < 8.79999999999999974e-45Initial program 17.7%
sub-neg17.7%
div-sub17.7%
sub-neg17.7%
+-commutative17.7%
distribute-neg-in17.7%
remove-double-neg17.7%
sub-neg17.7%
div-sub17.7%
distribute-rgt1-in17.7%
associate-/l/17.7%
Simplified17.7%
Taylor expanded in x around 0 3.8%
+-commutative3.8%
Simplified3.8%
Taylor expanded in wj around 0 86.0%
unpow286.0%
Simplified86.0%
if 8.79999999999999974e-45 < wj Initial program 65.2%
distribute-rgt1-in65.4%
Simplified65.4%
Taylor expanded in wj around 0 59.0%
neg-mul-159.0%
Simplified59.0%
Final simplification88.8%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around inf 4.4%
Final simplification4.4%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 77.0%
sub-neg77.0%
div-sub77.0%
sub-neg77.0%
+-commutative77.0%
distribute-neg-in77.0%
remove-double-neg77.0%
sub-neg77.0%
div-sub77.0%
distribute-rgt1-in77.0%
associate-/l/77.0%
Simplified77.8%
Taylor expanded in wj around 0 83.9%
Final simplification83.9%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2023182
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))