
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))) (t_1 (- wj (/ x (exp wj)))))
(if (<= wj -4.2e-5)
(- wj (/ t_1 (+ wj 1.0)))
(if (<= wj 9.2e-8)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(-
-1.0
(+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))
(+ wj (* t_1 (/ 1.0 (- -1.0 wj))))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double t_1 = wj - (x / exp(wj));
double tmp;
if (wj <= -4.2e-5) {
tmp = wj - (t_1 / (wj + 1.0));
} else if (wj <= 9.2e-8) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (t_1 * (1.0 / (-1.0 - wj)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
t_1 = wj - (x / exp(wj))
if (wj <= (-4.2d-5)) then
tmp = wj - (t_1 / (wj + 1.0d0))
else if (wj <= 9.2d-8) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
else
tmp = wj + (t_1 * (1.0d0 / ((-1.0d0) - wj)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double t_1 = wj - (x / Math.exp(wj));
double tmp;
if (wj <= -4.2e-5) {
tmp = wj - (t_1 / (wj + 1.0));
} else if (wj <= 9.2e-8) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (t_1 * (1.0 / (-1.0 - wj)));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) t_1 = wj - (x / math.exp(wj)) tmp = 0 if wj <= -4.2e-5: tmp = wj - (t_1 / (wj + 1.0)) elif wj <= 9.2e-8: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) else: tmp = wj + (t_1 * (1.0 / (-1.0 - wj))) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) t_1 = Float64(wj - Float64(x / exp(wj))) tmp = 0.0 if (wj <= -4.2e-5) tmp = Float64(wj - Float64(t_1 / Float64(wj + 1.0))); elseif (wj <= 9.2e-8) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(t_1 * Float64(1.0 / Float64(-1.0 - wj)))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); t_1 = wj - (x / exp(wj)); tmp = 0.0; if (wj <= -4.2e-5) tmp = wj - (t_1 / (wj + 1.0)); elseif (wj <= 9.2e-8) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); else tmp = wj + (t_1 * (1.0 / (-1.0 - wj))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -4.2e-5], N[(wj - N[(t$95$1 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 9.2e-8], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(t$95$1 * N[(1.0 / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
t_1 := wj - \frac{x}{e^{wj}}\\
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-5}:\\
\;\;\;\;wj - \frac{t\_1}{wj + 1}\\
\mathbf{elif}\;wj \leq 9.2 \cdot 10^{-8}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + t\_1 \cdot \frac{1}{-1 - wj}\\
\end{array}
\end{array}
if wj < -4.19999999999999977e-5Initial program 47.9%
distribute-rgt1-in97.9%
associate-/l/97.9%
div-sub47.9%
associate-/l*47.9%
*-inverses97.9%
*-rgt-identity97.9%
Simplified97.9%
if -4.19999999999999977e-5 < wj < 9.2000000000000003e-8Initial program 77.5%
distribute-rgt1-in77.4%
associate-/l/77.5%
div-sub77.5%
associate-/l*77.5%
*-inverses77.5%
*-rgt-identity77.5%
Simplified77.5%
Taylor expanded in wj around 0 99.9%
if 9.2000000000000003e-8 < wj Initial program 34.2%
distribute-rgt1-in34.2%
associate-/l/34.5%
div-sub34.5%
associate-/l*34.5%
*-inverses97.0%
*-rgt-identity97.0%
Simplified97.0%
clear-num97.0%
associate-/r/97.1%
Applied egg-rr97.1%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))) (t_1 (+ (* x -4.0) (* x 1.5))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) 4e-12)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_1) (* x 0.6666666666666666))))))
t_1))
(* x 2.0))))
(*
x
(+ (/ (exp (- wj)) (+ wj 1.0)) (+ (/ wj x) (/ wj (* x (- -1.0 wj)))))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-12) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = (x * (-4.0d0)) + (x * 1.5d0)
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4d-12) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_1) + (x * 0.6666666666666666d0)))))) - t_1)) - (x * 2.0d0)))
else
tmp = x * ((exp(-wj) / (wj + 1.0d0)) + ((wj / x) + (wj / (x * ((-1.0d0) - wj)))))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj + ((x - t_0) / (Math.exp(wj) + t_0))) <= 4e-12) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0)));
} else {
tmp = x * ((Math.exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj)))));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = (x * -4.0) + (x * 1.5) tmp = 0 if (wj + ((x - t_0) / (math.exp(wj) + t_0))) <= 4e-12: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))) else: tmp = x * ((math.exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj))))) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= 4e-12) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_1) + Float64(x * 0.6666666666666666)))))) - t_1)) - Float64(x * 2.0)))); else tmp = Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64(Float64(wj / x) + Float64(wj / Float64(x * Float64(-1.0 - wj)))))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = (x * -4.0) + (x * 1.5); tmp = 0.0; if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= 4e-12) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_1) + (x * 0.6666666666666666)))))) - t_1)) - (x * 2.0))); else tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj / x) + (wj / (x * (-1.0 - wj))))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 4e-12], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$1), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$1), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(wj / x), $MachinePrecision] + N[(wj / N[(x * N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq 4 \cdot 10^{-12}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_1 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_1\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{e^{-wj}}{wj + 1} + \left(\frac{wj}{x} + \frac{wj}{x \cdot \left(-1 - wj\right)}\right)\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 3.99999999999999992e-12Initial program 69.7%
distribute-rgt1-in70.7%
associate-/l/70.8%
div-sub69.7%
associate-/l*69.7%
*-inverses70.8%
*-rgt-identity70.8%
Simplified70.8%
Taylor expanded in wj around 0 97.9%
if 3.99999999999999992e-12 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 89.5%
distribute-rgt1-in92.3%
associate-/l/92.3%
div-sub89.5%
associate-/l*89.5%
*-inverses99.3%
*-rgt-identity99.3%
Simplified99.3%
Taylor expanded in x around inf 99.3%
associate--l+99.3%
associate-/r*99.3%
rec-exp99.4%
+-commutative99.4%
+-commutative99.4%
Simplified99.4%
Final simplification98.3%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (or (<= wj -4.2e-5) (not (<= wj 9.2e-8)))
(- wj (/ (- wj (/ x (exp wj))) (+ wj 1.0)))
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0)))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj <= -4.2e-5) || !(wj <= 9.2e-8)) {
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0));
} else {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if ((wj <= (-4.2d-5)) .or. (.not. (wj <= 9.2d-8))) then
tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0d0))
else
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if ((wj <= -4.2e-5) || !(wj <= 9.2e-8)) {
tmp = wj - ((wj - (x / Math.exp(wj))) / (wj + 1.0));
} else {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if (wj <= -4.2e-5) or not (wj <= 9.2e-8): tmp = wj - ((wj - (x / math.exp(wj))) / (wj + 1.0)) else: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if ((wj <= -4.2e-5) || !(wj <= 9.2e-8)) tmp = Float64(wj - Float64(Float64(wj - Float64(x / exp(wj))) / Float64(wj + 1.0))); else tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if ((wj <= -4.2e-5) || ~((wj <= 9.2e-8))) tmp = wj - ((wj - (x / exp(wj))) / (wj + 1.0)); else tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[wj, -4.2e-5], N[Not[LessEqual[wj, 9.2e-8]], $MachinePrecision]], N[(wj - N[(N[(wj - N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-5} \lor \neg \left(wj \leq 9.2 \cdot 10^{-8}\right):\\
\;\;\;\;wj - \frac{wj - \frac{x}{e^{wj}}}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\end{array}
\end{array}
if wj < -4.19999999999999977e-5 or 9.2000000000000003e-8 < wj Initial program 41.0%
distribute-rgt1-in66.0%
associate-/l/66.2%
div-sub41.2%
associate-/l*41.2%
*-inverses97.4%
*-rgt-identity97.4%
Simplified97.4%
if -4.19999999999999977e-5 < wj < 9.2000000000000003e-8Initial program 77.5%
distribute-rgt1-in77.4%
associate-/l/77.5%
div-sub77.5%
associate-/l*77.5%
*-inverses77.5%
*-rgt-identity77.5%
Simplified77.5%
Taylor expanded in wj around 0 99.9%
Final simplification99.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj -0.49)
(/ (/ x (exp wj)) (+ wj 1.0))
(if (<= wj 0.165)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(-
-1.0
(+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))
(+ wj (/ wj (- -1.0 wj)))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -0.49) {
tmp = (x / exp(wj)) / (wj + 1.0);
} else if (wj <= 0.165) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= (-0.49d0)) then
tmp = (x / exp(wj)) / (wj + 1.0d0)
else if (wj <= 0.165d0) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= -0.49) {
tmp = (x / Math.exp(wj)) / (wj + 1.0);
} else if (wj <= 0.165) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= -0.49: tmp = (x / math.exp(wj)) / (wj + 1.0) elif wj <= 0.165: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= -0.49) tmp = Float64(Float64(x / exp(wj)) / Float64(wj + 1.0)); elseif (wj <= 0.165) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= -0.49) tmp = (x / exp(wj)) / (wj + 1.0); elseif (wj <= 0.165) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -0.49], N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 0.165], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq -0.49:\\
\;\;\;\;\frac{\frac{x}{e^{wj}}}{wj + 1}\\
\mathbf{elif}\;wj \leq 0.165:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < -0.48999999999999999Initial program 33.3%
distribute-rgt1-in100.0%
associate-/l/100.0%
div-sub33.3%
associate-/l*33.3%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 84.4%
associate-/r*84.4%
+-commutative84.4%
Simplified84.4%
if -0.48999999999999999 < wj < 0.165000000000000008Initial program 77.7%
distribute-rgt1-in77.7%
associate-/l/77.8%
div-sub77.8%
associate-/l*77.8%
*-inverses77.8%
*-rgt-identity77.8%
Simplified77.8%
Taylor expanded in wj around 0 99.0%
if 0.165000000000000008 < wj Initial program 0.0%
distribute-rgt1-in0.0%
associate-/l/0.0%
div-sub0.0%
associate-/l*0.0%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification98.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))))
(if (<= wj 0.12)
(+
x
(*
wj
(-
(*
wj
(-
(+
1.0
(*
wj
(- -1.0 (+ (* x -3.0) (+ (* -2.0 t_0) (* x 0.6666666666666666))))))
t_0))
(* x 2.0))))
(+ wj (/ wj (- -1.0 wj))))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 0.12) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * (-4.0d0)) + (x * 1.5d0)
if (wj <= 0.12d0) then
tmp = x + (wj * ((wj * ((1.0d0 + (wj * ((-1.0d0) - ((x * (-3.0d0)) + (((-2.0d0) * t_0) + (x * 0.6666666666666666d0)))))) - t_0)) - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double tmp;
if (wj <= 0.12) {
tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): t_0 = (x * -4.0) + (x * 1.5) tmp = 0 if wj <= 0.12: tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) tmp = 0.0 if (wj <= 0.12) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 + Float64(wj * Float64(-1.0 - Float64(Float64(x * -3.0) + Float64(Float64(-2.0 * t_0) + Float64(x * 0.6666666666666666)))))) - t_0)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) t_0 = (x * -4.0) + (x * 1.5); tmp = 0.0; if (wj <= 0.12) tmp = x + (wj * ((wj * ((1.0 + (wj * (-1.0 - ((x * -3.0) + ((-2.0 * t_0) + (x * 0.6666666666666666)))))) - t_0)) - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 0.12], N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 + N[(wj * N[(-1.0 - N[(N[(x * -3.0), $MachinePrecision] + N[(N[(-2.0 * t$95$0), $MachinePrecision] + N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
\mathbf{if}\;wj \leq 0.12:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(\left(1 + wj \cdot \left(-1 - \left(x \cdot -3 + \left(-2 \cdot t\_0 + x \cdot 0.6666666666666666\right)\right)\right)\right) - t\_0\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.12Initial program 76.7%
distribute-rgt1-in78.3%
associate-/l/78.3%
div-sub76.7%
associate-/l*76.7%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 96.8%
if 0.12 < wj Initial program 0.0%
distribute-rgt1-in0.0%
associate-/l/0.0%
div-sub0.0%
associate-/l*0.0%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification96.8%
(FPCore (wj x) :precision binary64 (if (<= wj 0.12) (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.12) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.12d0) then
tmp = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.12) {
tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.12: tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.12) tmp = Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.12) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.12], N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.12:\\
\;\;\;\;x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 0.12Initial program 76.7%
distribute-rgt1-in78.3%
associate-/l/78.3%
div-sub76.7%
associate-/l*76.7%
*-inverses78.3%
*-rgt-identity78.3%
Simplified78.3%
Taylor expanded in wj around 0 96.8%
Taylor expanded in x around 0 96.4%
Simplified96.4%
if 0.12 < wj Initial program 0.0%
distribute-rgt1-in0.0%
associate-/l/0.0%
div-sub0.0%
associate-/l*0.0%
*-inverses100.0%
*-rgt-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
+-commutative100.0%
Simplified100.0%
Final simplification96.5%
(FPCore (wj x) :precision binary64 (if (<= wj 1.8e-5) (+ x (* wj (- wj (* x 2.0)))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 1.8e-5) {
tmp = x + (wj * (wj - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.8d-5) then
tmp = x + (wj * (wj - (x * 2.0d0)))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.8e-5) {
tmp = x + (wj * (wj - (x * 2.0)));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.8e-5: tmp = x + (wj * (wj - (x * 2.0))) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.8e-5) tmp = Float64(x + Float64(wj * Float64(wj - Float64(x * 2.0)))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.8e-5) tmp = x + (wj * (wj - (x * 2.0))); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.8e-5], N[(x + N[(wj * N[(wj - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1.8 \cdot 10^{-5}:\\
\;\;\;\;x + wj \cdot \left(wj - x \cdot 2\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 1.80000000000000005e-5Initial program 76.6%
distribute-rgt1-in78.2%
associate-/l/78.2%
div-sub76.6%
associate-/l*76.6%
*-inverses78.2%
*-rgt-identity78.2%
Simplified78.2%
Taylor expanded in wj around 0 97.1%
Taylor expanded in x around 0 96.8%
Simplified96.8%
Taylor expanded in wj around 0 96.6%
if 1.80000000000000005e-5 < wj Initial program 25.1%
distribute-rgt1-in25.1%
associate-/l/25.2%
div-sub25.2%
associate-/l*25.2%
*-inverses96.6%
*-rgt-identity96.6%
Simplified96.6%
Taylor expanded in x around 0 82.5%
+-commutative82.5%
Simplified82.5%
Final simplification96.2%
(FPCore (wj x) :precision binary64 (if (<= wj 3e-6) (+ x (* -2.0 (* wj x))) (+ wj (/ wj (- -1.0 wj)))))
double code(double wj, double x) {
double tmp;
if (wj <= 3e-6) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 3d-6) then
tmp = x + ((-2.0d0) * (wj * x))
else
tmp = wj + (wj / ((-1.0d0) - wj))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 3e-6) {
tmp = x + (-2.0 * (wj * x));
} else {
tmp = wj + (wj / (-1.0 - wj));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 3e-6: tmp = x + (-2.0 * (wj * x)) else: tmp = wj + (wj / (-1.0 - wj)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 3e-6) tmp = Float64(x + Float64(-2.0 * Float64(wj * x))); else tmp = Float64(wj + Float64(wj / Float64(-1.0 - wj))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 3e-6) tmp = x + (-2.0 * (wj * x)); else tmp = wj + (wj / (-1.0 - wj)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 3e-6], N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3 \cdot 10^{-6}:\\
\;\;\;\;x + -2 \cdot \left(wj \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{wj}{-1 - wj}\\
\end{array}
\end{array}
if wj < 3.0000000000000001e-6Initial program 76.6%
distribute-rgt1-in78.2%
associate-/l/78.2%
div-sub76.6%
associate-/l*76.6%
*-inverses78.2%
*-rgt-identity78.2%
Simplified78.2%
Taylor expanded in wj around 0 85.3%
*-commutative85.3%
Simplified85.3%
if 3.0000000000000001e-6 < wj Initial program 25.1%
distribute-rgt1-in25.1%
associate-/l/25.2%
div-sub25.2%
associate-/l*25.2%
*-inverses96.6%
*-rgt-identity96.6%
Simplified96.6%
Taylor expanded in x around 0 82.5%
+-commutative82.5%
Simplified82.5%
Final simplification85.2%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 75.2%
distribute-rgt1-in76.7%
associate-/l/76.8%
div-sub75.2%
associate-/l*75.2%
*-inverses78.7%
*-rgt-identity78.7%
Simplified78.7%
Taylor expanded in wj around 0 83.2%
*-commutative83.2%
Simplified83.2%
Final simplification83.2%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 75.2%
distribute-rgt1-in76.7%
associate-/l/76.8%
div-sub75.2%
associate-/l*75.2%
*-inverses78.7%
*-rgt-identity78.7%
Simplified78.7%
Taylor expanded in wj around 0 82.5%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 75.2%
distribute-rgt1-in76.7%
associate-/l/76.8%
div-sub75.2%
associate-/l*75.2%
*-inverses78.7%
*-rgt-identity78.7%
Simplified78.7%
Taylor expanded in wj around inf 4.6%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 75.2%
distribute-rgt1-in76.7%
associate-/l/76.8%
div-sub75.2%
associate-/l*75.2%
*-inverses78.7%
*-rgt-identity78.7%
Simplified78.7%
Taylor expanded in wj around inf 4.8%
Taylor expanded in wj around 0 3.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024130
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))