
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (exp (- wj))))
(if (<= wj -4.5e-6)
(fma (fma wj (exp wj) (- x)) (/ t_0 (- -1.0 wj)) wj)
(if (<= wj 1.65e-7)
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x)
(- wj (* x (- (/ wj (fma x wj x)) (/ t_0 (+ wj 1.0)))))))))
double code(double wj, double x) {
double t_0 = exp(-wj);
double tmp;
if (wj <= -4.5e-6) {
tmp = fma(fma(wj, exp(wj), -x), (t_0 / (-1.0 - wj)), wj);
} else if (wj <= 1.65e-7) {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
} else {
tmp = wj - (x * ((wj / fma(x, wj, x)) - (t_0 / (wj + 1.0))));
}
return tmp;
}
function code(wj, x) t_0 = exp(Float64(-wj)) tmp = 0.0 if (wj <= -4.5e-6) tmp = fma(fma(wj, exp(wj), Float64(-x)), Float64(t_0 / Float64(-1.0 - wj)), wj); elseif (wj <= 1.65e-7) tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); else tmp = Float64(wj - Float64(x * Float64(Float64(wj / fma(x, wj, x)) - Float64(t_0 / Float64(wj + 1.0))))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[Exp[(-wj)], $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], N[(N[(wj * N[Exp[wj], $MachinePrecision] + (-x)), $MachinePrecision] * N[(t$95$0 / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision], If[LessEqual[wj, 1.65e-7], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj - N[(x * N[(N[(wj / N[(x * wj + x), $MachinePrecision]), $MachinePrecision] - N[(t$95$0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-wj}\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(wj, e^{wj}, -x\right), \frac{t\_0}{-1 - wj}, wj\right)\\
\mathbf{elif}\;wj \leq 1.65 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - x \cdot \left(\frac{wj}{\mathsf{fma}\left(x, wj, x\right)} - \frac{t\_0}{wj + 1}\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 68.9%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-/.f64N/A
distribute-neg-frac2N/A
div-invN/A
lift--.f64N/A
flip--N/A
div-invN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites97.4%
lift-fma.f64N/A
Applied rewrites97.5%
if -4.50000000000000011e-6 < wj < 1.6500000000000001e-7Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites99.9%
Taylor expanded in x around 0
Applied rewrites99.9%
if 1.6500000000000001e-7 < wj Initial program 56.9%
Taylor expanded in x around inf
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
unsub-negN/A
mul-1-negN/A
+-commutativeN/A
Applied rewrites100.0%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_0) (+ (exp wj) t_0))) -2e+50)
(/ x (* (exp wj) (+ wj 1.0)))
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_0) / (exp(wj) + t_0))) <= -2e+50) {
tmp = x / (exp(wj) * (wj + 1.0));
} else {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) <= -2e+50) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); else tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -2e+50], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t\_0}{e^{wj} + t\_0} \leq -2 \cdot 10^{+50}:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -2.0000000000000002e50Initial program 98.3%
Taylor expanded in x around inf
lower-/.f64N/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-*.f64N/A
lower-exp.f64N/A
+-commutativeN/A
lower-+.f6499.9
Applied rewrites99.9%
if -2.0000000000000002e50 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 68.7%
Taylor expanded in wj around 0
Applied rewrites97.5%
Taylor expanded in x around 0
Applied rewrites97.5%
Final simplification98.1%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ (exp (- wj)) (+ wj 1.0))))
(if (<= wj -4.5e-6)
(fma (- t_0 (/ wj (fma wj x x))) x (/ (* wj x) x))
(if (<= wj 1.65e-7)
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x)
(- wj (* x (- (/ wj (fma x wj x)) t_0)))))))
double code(double wj, double x) {
double t_0 = exp(-wj) / (wj + 1.0);
double tmp;
if (wj <= -4.5e-6) {
tmp = fma((t_0 - (wj / fma(wj, x, x))), x, ((wj * x) / x));
} else if (wj <= 1.65e-7) {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
} else {
tmp = wj - (x * ((wj / fma(x, wj, x)) - t_0));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) tmp = 0.0 if (wj <= -4.5e-6) tmp = fma(Float64(t_0 - Float64(wj / fma(wj, x, x))), x, Float64(Float64(wj * x) / x)); elseif (wj <= 1.65e-7) tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); else tmp = Float64(wj - Float64(x * Float64(Float64(wj / fma(x, wj, x)) - t_0))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], N[(N[(t$95$0 - N[(wj / N[(wj * x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x + N[(N[(wj * x), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 1.65e-7], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj - N[(x * N[(N[(wj / N[(x * wj + x), $MachinePrecision]), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{e^{-wj}}{wj + 1}\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(t\_0 - \frac{wj}{\mathsf{fma}\left(wj, x, x\right)}, x, \frac{wj \cdot x}{x}\right)\\
\mathbf{elif}\;wj \leq 1.65 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - x \cdot \left(\frac{wj}{\mathsf{fma}\left(x, wj, x\right)} - t\_0\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 68.9%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-/.f64N/A
distribute-neg-frac2N/A
div-invN/A
lift--.f64N/A
flip--N/A
div-invN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites97.4%
Taylor expanded in x around inf
lower-*.f64N/A
+-commutativeN/A
+-commutativeN/A
associate-+l+N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
Applied rewrites96.9%
Applied rewrites97.5%
if -4.50000000000000011e-6 < wj < 1.6500000000000001e-7Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites99.9%
Taylor expanded in x around 0
Applied rewrites99.9%
if 1.6500000000000001e-7 < wj Initial program 56.9%
Taylor expanded in x around inf
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
unsub-negN/A
mul-1-negN/A
+-commutativeN/A
Applied rewrites100.0%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -4.5e-6)
(- wj (fma x (/ -1.0 (* (exp wj) (+ wj 1.0))) (/ wj (+ wj 1.0))))
(if (<= wj 1.65e-7)
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x)
(- wj (* x (- (/ wj (fma x wj x)) (/ (exp (- wj)) (+ wj 1.0))))))))
double code(double wj, double x) {
double tmp;
if (wj <= -4.5e-6) {
tmp = wj - fma(x, (-1.0 / (exp(wj) * (wj + 1.0))), (wj / (wj + 1.0)));
} else if (wj <= 1.65e-7) {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
} else {
tmp = wj - (x * ((wj / fma(x, wj, x)) - (exp(-wj) / (wj + 1.0))));
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -4.5e-6) tmp = Float64(wj - fma(x, Float64(-1.0 / Float64(exp(wj) * Float64(wj + 1.0))), Float64(wj / Float64(wj + 1.0)))); elseif (wj <= 1.65e-7) tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); else tmp = Float64(wj - Float64(x * Float64(Float64(wj / fma(x, wj, x)) - Float64(exp(Float64(-wj)) / Float64(wj + 1.0))))); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -4.5e-6], N[(wj - N[(x * N[(-1.0 / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 1.65e-7], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj - N[(x * N[(N[(wj / N[(x * wj + x), $MachinePrecision]), $MachinePrecision] - N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;wj - \mathsf{fma}\left(x, \frac{-1}{e^{wj} \cdot \left(wj + 1\right)}, \frac{wj}{wj + 1}\right)\\
\mathbf{elif}\;wj \leq 1.65 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - x \cdot \left(\frac{wj}{\mathsf{fma}\left(x, wj, x\right)} - \frac{e^{-wj}}{wj + 1}\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 68.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
distribute-lft-neg-inN/A
distribute-rgt-neg-inN/A
distribute-frac-neg2N/A
div-invN/A
lower-fma.f64N/A
Applied rewrites97.5%
if -4.50000000000000011e-6 < wj < 1.6500000000000001e-7Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites99.9%
Taylor expanded in x around 0
Applied rewrites99.9%
if 1.6500000000000001e-7 < wj Initial program 56.9%
Taylor expanded in x around inf
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
unsub-negN/A
mul-1-negN/A
+-commutativeN/A
Applied rewrites100.0%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) (+ wj 1.0))))
(if (<= wj -4.5e-6)
(- wj (fma x (/ -1.0 t_0) (/ wj (+ wj 1.0))))
(if (<= wj 1.65e-7)
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x)
(+ wj (+ (/ x t_0) (/ wj (- -1.0 wj))))))))
double code(double wj, double x) {
double t_0 = exp(wj) * (wj + 1.0);
double tmp;
if (wj <= -4.5e-6) {
tmp = wj - fma(x, (-1.0 / t_0), (wj / (wj + 1.0)));
} else if (wj <= 1.65e-7) {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
} else {
tmp = wj + ((x / t_0) + (wj / (-1.0 - wj)));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * Float64(wj + 1.0)) tmp = 0.0 if (wj <= -4.5e-6) tmp = Float64(wj - fma(x, Float64(-1.0 / t_0), Float64(wj / Float64(wj + 1.0)))); elseif (wj <= 1.65e-7) tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); else tmp = Float64(wj + Float64(Float64(x / t_0) + Float64(wj / Float64(-1.0 - wj)))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], N[(wj - N[(x * N[(-1.0 / t$95$0), $MachinePrecision] + N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[wj, 1.65e-7], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(wj + N[(N[(x / t$95$0), $MachinePrecision] + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot \left(wj + 1\right)\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;wj - \mathsf{fma}\left(x, \frac{-1}{t\_0}, \frac{wj}{wj + 1}\right)\\
\mathbf{elif}\;wj \leq 1.65 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \left(\frac{x}{t\_0} + \frac{wj}{-1 - wj}\right)\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6Initial program 68.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
distribute-lft-neg-inN/A
distribute-rgt-neg-inN/A
distribute-frac-neg2N/A
div-invN/A
lower-fma.f64N/A
Applied rewrites97.5%
if -4.50000000000000011e-6 < wj < 1.6500000000000001e-7Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites99.9%
Taylor expanded in x around 0
Applied rewrites99.9%
if 1.6500000000000001e-7 < wj Initial program 56.9%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-+.f64N/A
lower-/.f6499.8
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
*-commutativeN/A
lower-*.f64N/A
lower-+.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ wj (+ (/ x (* (exp wj) (+ wj 1.0))) (/ wj (- -1.0 wj))))))
(if (<= wj -4.5e-6)
t_0
(if (<= wj 1.65e-7)
(fma
wj
(fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0))
x)
t_0))))
double code(double wj, double x) {
double t_0 = wj + ((x / (exp(wj) * (wj + 1.0))) + (wj / (-1.0 - wj)));
double tmp;
if (wj <= -4.5e-6) {
tmp = t_0;
} else if (wj <= 1.65e-7) {
tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
} else {
tmp = t_0;
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj + Float64(Float64(x / Float64(exp(wj) * Float64(wj + 1.0))) + Float64(wj / Float64(-1.0 - wj)))) tmp = 0.0 if (wj <= -4.5e-6) tmp = t_0; elseif (wj <= 1.65e-7) tmp = fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x); else tmp = t_0; end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj + N[(N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, -4.5e-6], t$95$0, If[LessEqual[wj, 1.65e-7], N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj + \left(\frac{x}{e^{wj} \cdot \left(wj + 1\right)} + \frac{wj}{-1 - wj}\right)\\
\mathbf{if}\;wj \leq -4.5 \cdot 10^{-6}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;wj \leq 1.65 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if wj < -4.50000000000000011e-6 or 1.6500000000000001e-7 < wj Initial program 62.9%
lift-/.f64N/A
lift--.f64N/A
div-subN/A
lower--.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-+.f64N/A
lower-/.f6484.2
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
*-commutativeN/A
lower-*.f64N/A
lower-+.f6498.5
Applied rewrites98.5%
if -4.50000000000000011e-6 < wj < 1.6500000000000001e-7Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites99.9%
Taylor expanded in x around 0
Applied rewrites99.9%
Final simplification99.8%
(FPCore (wj x) :precision binary64 (fma wj (fma wj (fma x (fma wj -2.6666666666666665 2.5) (- 1.0 wj)) (* x -2.0)) x))
double code(double wj, double x) {
return fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), (1.0 - wj)), (x * -2.0)), x);
}
function code(wj, x) return fma(wj, fma(wj, fma(x, fma(wj, -2.6666666666666665, 2.5), Float64(1.0 - wj)), Float64(x * -2.0)), x) end
code[wj_, x_] := N[(wj * N[(wj * N[(x * N[(wj * -2.6666666666666665 + 2.5), $MachinePrecision] + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, -2.6666666666666665, 2.5\right), 1 - wj\right), x \cdot -2\right), x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.9%
Taylor expanded in x around 0
Applied rewrites95.9%
(FPCore (wj x) :precision binary64 (fma wj wj (fma wj (* x (fma wj 2.5 -2.0)) x)))
double code(double wj, double x) {
return fma(wj, wj, fma(wj, (x * fma(wj, 2.5, -2.0)), x));
}
function code(wj, x) return fma(wj, wj, fma(wj, Float64(x * fma(wj, 2.5, -2.0)), x)) end
code[wj_, x_] := N[(wj * wj + N[(wj * N[(x * N[(wj * 2.5 + -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj, \mathsf{fma}\left(wj, x \cdot \mathsf{fma}\left(wj, 2.5, -2\right), x\right)\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites95.2%
(FPCore (wj x) :precision binary64 (fma wj (fma x (fma wj 2.5 -2.0) wj) x))
double code(double wj, double x) {
return fma(wj, fma(x, fma(wj, 2.5, -2.0), wj), x);
}
function code(wj, x) return fma(wj, fma(x, fma(wj, 2.5, -2.0), wj), x) end
code[wj_, x_] := N[(wj * N[(x * N[(wj * 2.5 + -2.0), $MachinePrecision] + wj), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(x, \mathsf{fma}\left(wj, 2.5, -2\right), wj\right), x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.9%
Taylor expanded in wj around 0
+-commutativeN/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-rgt-inN/A
distribute-rgt-outN/A
metadata-evalN/A
*-commutativeN/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-rgt-inN/A
+-commutativeN/A
Applied rewrites95.2%
(FPCore (wj x) :precision binary64 (fma wj (* wj (- 1.0 wj)) x))
double code(double wj, double x) {
return fma(wj, (wj * (1.0 - wj)), x);
}
function code(wj, x) return fma(wj, Float64(wj * Float64(1.0 - wj)), x) end
code[wj_, x_] := N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj \cdot \left(1 - wj\right), x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.9%
Taylor expanded in x around 0
Applied rewrites95.1%
(FPCore (wj x) :precision binary64 (fma wj (fma x -2.0 wj) x))
double code(double wj, double x) {
return fma(wj, fma(x, -2.0, wj), x);
}
function code(wj, x) return fma(wj, fma(x, -2.0, wj), x) end
code[wj_, x_] := N[(wj * N[(x * -2.0 + wj), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(x, -2, wj\right), x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.9%
Taylor expanded in wj around 0
+-commutativeN/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-rgt-inN/A
distribute-rgt-outN/A
metadata-evalN/A
*-commutativeN/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-rgt-inN/A
+-commutativeN/A
Applied rewrites95.2%
Taylor expanded in wj around 0
Applied rewrites94.9%
(FPCore (wj x) :precision binary64 (fma x (* wj -2.0) x))
double code(double wj, double x) {
return fma(x, (wj * -2.0), x);
}
function code(wj, x) return fma(x, Float64(wj * -2.0), x) end
code[wj_, x_] := N[(x * N[(wj * -2.0), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, wj \cdot -2, x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6485.0
Applied rewrites85.0%
(FPCore (wj x) :precision binary64 (- (- x)))
double code(double wj, double x) {
return -(-x);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -(-x)
end function
public static double code(double wj, double x) {
return -(-x);
}
def code(wj, x): return -(-x)
function code(wj, x) return Float64(-Float64(-x)) end
function tmp = code(wj, x) tmp = -(-x); end
code[wj_, x_] := (-(-x))
\begin{array}{l}
\\
-\left(-x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.9%
Taylor expanded in x around 0
Applied rewrites95.9%
Taylor expanded in x around -inf
Applied rewrites95.8%
Taylor expanded in wj around 0
Applied rewrites84.7%
(FPCore (wj x) :precision binary64 (- wj 1.0))
double code(double wj, double x) {
return wj - 1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - 1.0d0
end function
public static double code(double wj, double x) {
return wj - 1.0;
}
def code(wj, x): return wj - 1.0
function code(wj, x) return Float64(wj - 1.0) end
function tmp = code(wj, x) tmp = wj - 1.0; end
code[wj_, x_] := N[(wj - 1.0), $MachinePrecision]
\begin{array}{l}
\\
wj - 1
\end{array}
Initial program 75.6%
Taylor expanded in wj around inf
Applied rewrites4.2%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024233
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))