
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -3.6e-6)
(-
wj
(/ (- (* (exp wj) wj) x) (/ (* (fma wj wj -1.0) (exp wj)) (- wj 1.0))))
(fma
(fma
(fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -3.6e-6) {
tmp = wj - (((exp(wj) * wj) - x) / ((fma(wj, wj, -1.0) * exp(wj)) / (wj - 1.0)));
} else {
tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -3.6e-6) tmp = Float64(wj - Float64(Float64(Float64(exp(wj) * wj) - x) / Float64(Float64(fma(wj, wj, -1.0) * exp(wj)) / Float64(wj - 1.0)))); else tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -3.6e-6], N[(wj - N[(N[(N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision] - x), $MachinePrecision] / N[(N[(N[(wj * wj + -1.0), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision] / N[(wj - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.6 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{e^{wj} \cdot wj - x}{\frac{\mathsf{fma}\left(wj, wj, -1\right) \cdot e^{wj}}{wj - 1}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -3.59999999999999984e-6Initial program 49.7%
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
flip-+N/A
associate-*l/N/A
lower-/.f64N/A
lower-*.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
lower--.f6499.2
Applied rewrites99.2%
if -3.59999999999999984e-6 < wj Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites97.9%
Final simplification97.9%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) wj))
(t_1 (- wj (/ (- t_0 x) (+ t_0 (exp wj)))))
(t_2 (fma (* x wj) -2.0 x)))
(if (<= t_1 -5e-201) t_2 (if (<= t_1 0.0) (* (* (- 1.0 wj) wj) wj) t_2))))
double code(double wj, double x) {
double t_0 = exp(wj) * wj;
double t_1 = wj - ((t_0 - x) / (t_0 + exp(wj)));
double t_2 = fma((x * wj), -2.0, x);
double tmp;
if (t_1 <= -5e-201) {
tmp = t_2;
} else if (t_1 <= 0.0) {
tmp = ((1.0 - wj) * wj) * wj;
} else {
tmp = t_2;
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * wj) t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) t_2 = fma(Float64(x * wj), -2.0, x) tmp = 0.0 if (t_1 <= -5e-201) tmp = t_2; elseif (t_1 <= 0.0) tmp = Float64(Float64(Float64(1.0 - wj) * wj) * wj); else tmp = t_2; end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(N[(x * wj), $MachinePrecision] * -2.0 + x), $MachinePrecision]}, If[LessEqual[t$95$1, -5e-201], t$95$2, If[LessEqual[t$95$1, 0.0], N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj), $MachinePrecision], t$95$2]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
t_1 := wj - \frac{t\_0 - x}{t\_0 + e^{wj}}\\
t_2 := \mathsf{fma}\left(x \cdot wj, -2, x\right)\\
\mathbf{if}\;t\_1 \leq -5 \cdot 10^{-201}:\\
\;\;\;\;t\_2\\
\mathbf{elif}\;t\_1 \leq 0:\\
\;\;\;\;\left(\left(1 - wj\right) \cdot wj\right) \cdot wj\\
\mathbf{else}:\\
\;\;\;\;t\_2\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -4.9999999999999999e-201 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 94.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6491.3
Applied rewrites91.3%
if -4.9999999999999999e-201 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0Initial program 5.3%
Taylor expanded in wj around 0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites60.6%
Final simplification84.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) wj)))
(if (<= (- wj (/ (- t_0 x) (+ t_0 (exp wj)))) 0.05)
(fma
(fma
(fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
wj
(* -2.0 x))
wj
x)
(- wj (/ (/ x (- -1.0 wj)) (exp wj))))))
double code(double wj, double x) {
double t_0 = exp(wj) * wj;
double tmp;
if ((wj - ((t_0 - x) / (t_0 + exp(wj)))) <= 0.05) {
tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - ((x / (-1.0 - wj)) / exp(wj));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * wj) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) <= 0.05) tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(Float64(x / Float64(-1.0 - wj)) / exp(wj))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.05], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(x / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
\mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 0.05:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{\frac{x}{-1 - wj}}{e^{wj}}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.050000000000000003Initial program 70.7%
Taylor expanded in wj around 0
Applied rewrites97.5%
if 0.050000000000000003 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 89.5%
Taylor expanded in x around inf
mul-1-negN/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6496.6
Applied rewrites96.6%
Final simplification97.3%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) wj)))
(if (<= (- wj (/ (- t_0 x) (+ t_0 (exp wj)))) 5e+305)
(fma
(fma
(fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
wj
(* -2.0 x))
wj
x)
(- wj (/ wj (- wj -1.0))))))
double code(double wj, double x) {
double t_0 = exp(wj) * wj;
double tmp;
if ((wj - ((t_0 - x) / (t_0 + exp(wj)))) <= 5e+305) {
tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - (wj / (wj - -1.0));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * wj) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) <= 5e+305) tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(wj / Float64(wj - -1.0))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e+305], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
\mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 5 \cdot 10^{+305}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj - -1}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.00000000000000009e305Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites97.8%
if 5.00000000000000009e305 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 0.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-+.f6458.5
Applied rewrites58.5%
Final simplification96.7%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) wj)))
(if (<= (- wj (/ (- t_0 x) (+ t_0 (exp wj)))) 5e+305)
(fma (fma (- 1.0 wj) wj (* -2.0 x)) wj x)
(- wj (/ wj (- wj -1.0))))))
double code(double wj, double x) {
double t_0 = exp(wj) * wj;
double tmp;
if ((wj - ((t_0 - x) / (t_0 + exp(wj)))) <= 5e+305) {
tmp = fma(fma((1.0 - wj), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - (wj / (wj - -1.0));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * wj) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) <= 5e+305) tmp = fma(fma(Float64(1.0 - wj), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(wj / Float64(wj - -1.0))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e+305], N[(N[(N[(1.0 - wj), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
\mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 5 \cdot 10^{+305}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(1 - wj, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj - -1}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.00000000000000009e305Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites97.8%
Taylor expanded in x around 0
Applied rewrites97.5%
if 5.00000000000000009e305 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 0.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-+.f6458.5
Applied rewrites58.5%
Final simplification96.5%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* (exp wj) wj)))
(if (<= (- wj (/ (- t_0 x) (+ t_0 (exp wj)))) 5e+305)
(fma (* (- 1.0 wj) wj) wj x)
(- wj (/ wj (- wj -1.0))))))
double code(double wj, double x) {
double t_0 = exp(wj) * wj;
double tmp;
if ((wj - ((t_0 - x) / (t_0 + exp(wj)))) <= 5e+305) {
tmp = fma(((1.0 - wj) * wj), wj, x);
} else {
tmp = wj - (wj / (wj - -1.0));
}
return tmp;
}
function code(wj, x) t_0 = Float64(exp(wj) * wj) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(t_0 + exp(wj)))) <= 5e+305) tmp = fma(Float64(Float64(1.0 - wj) * wj), wj, x); else tmp = Float64(wj - Float64(wj / Float64(wj - -1.0))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(t$95$0 + N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e+305], N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(wj / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{wj} \cdot wj\\
\mathbf{if}\;wj - \frac{t\_0 - x}{t\_0 + e^{wj}} \leq 5 \cdot 10^{+305}:\\
\;\;\;\;\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj}{wj - -1}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.00000000000000009e305Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites97.8%
Taylor expanded in x around 0
Applied rewrites97.5%
Taylor expanded in x around 0
Applied rewrites96.9%
if 5.00000000000000009e305 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 0.0%
Taylor expanded in x around 0
distribute-rgt1-inN/A
+-commutativeN/A
times-fracN/A
*-inversesN/A
associate-*l/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-+.f6458.5
Applied rewrites58.5%
Final simplification95.9%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.0052)
(/ x (* (- wj -1.0) (exp wj)))
(fma
(fma
(fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.0052) {
tmp = x / ((wj - -1.0) * exp(wj));
} else {
tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.0052) tmp = Float64(x / Float64(Float64(wj - -1.0) * exp(wj))); else tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.0052], N[(x / N[(N[(wj - -1.0), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.0052:\\
\;\;\;\;\frac{x}{\left(wj - -1\right) \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -0.0051999999999999998Initial program 49.7%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-exp.f64N/A
lower-exp.f6414.8
Applied rewrites14.8%
Applied rewrites64.8%
if -0.0051999999999999998 < wj Initial program 76.4%
Taylor expanded in wj around 0
Applied rewrites97.9%
Final simplification96.9%
(FPCore (wj x)
:precision binary64
(if (<= wj -1.0)
(/ x (* (exp wj) wj))
(fma
(fma
(fma 2.5 x (- 1.0 (* (fma 0.6666666666666666 x (fma 2.0 x 1.0)) wj)))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -1.0) {
tmp = x / (exp(wj) * wj);
} else {
tmp = fma(fma(fma(2.5, x, (1.0 - (fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -1.0) tmp = Float64(x / Float64(exp(wj) * wj)); else tmp = fma(fma(fma(2.5, x, Float64(1.0 - Float64(fma(0.6666666666666666, x, fma(2.0, x, 1.0)) * wj))), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -1.0], N[(x / N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision], N[(N[(N[(2.5 * x + N[(1.0 - N[(N[(0.6666666666666666 * x + N[(2.0 * x + 1.0), $MachinePrecision]), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1:\\
\;\;\;\;\frac{x}{e^{wj} \cdot wj}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, x, 1 - \mathsf{fma}\left(0.6666666666666666, x, \mathsf{fma}\left(2, x, 1\right)\right) \cdot wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -1Initial program 33.3%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-exp.f64N/A
lower-exp.f641.6
Applied rewrites1.6%
Taylor expanded in wj around inf
Applied rewrites68.3%
if -1 < wj Initial program 76.6%
Taylor expanded in wj around 0
Applied rewrites97.4%
(FPCore (wj x) :precision binary64 (fma (* (- 1.0 wj) wj) wj x))
double code(double wj, double x) {
return fma(((1.0 - wj) * wj), wj, x);
}
function code(wj, x) return fma(Float64(Float64(1.0 - wj) * wj), wj, x) end
code[wj_, x_] := N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.2%
Taylor expanded in x around 0
Applied rewrites94.9%
Taylor expanded in x around 0
Applied rewrites94.3%
(FPCore (wj x) :precision binary64 (fma (* x wj) -2.0 x))
double code(double wj, double x) {
return fma((x * wj), -2.0, x);
}
function code(wj, x) return fma(Float64(x * wj), -2.0, x) end
code[wj_, x_] := N[(N[(x * wj), $MachinePrecision] * -2.0 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot wj, -2, x\right)
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6480.4
Applied rewrites80.4%
Final simplification80.4%
(FPCore (wj x) :precision binary64 (* 1.0 x))
double code(double wj, double x) {
return 1.0 * x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = 1.0d0 * x
end function
public static double code(double wj, double x) {
return 1.0 * x;
}
def code(wj, x): return 1.0 * x
function code(wj, x) return Float64(1.0 * x) end
function tmp = code(wj, x) tmp = 1.0 * x; end
code[wj_, x_] := N[(1.0 * x), $MachinePrecision]
\begin{array}{l}
\\
1 \cdot x
\end{array}
Initial program 75.6%
Taylor expanded in wj around 0
Applied rewrites95.2%
Taylor expanded in x around inf
Applied rewrites95.1%
Taylor expanded in wj around 0
Applied rewrites79.8%
(FPCore (wj x) :precision binary64 (- wj 1.0))
double code(double wj, double x) {
return wj - 1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - 1.0d0
end function
public static double code(double wj, double x) {
return wj - 1.0;
}
def code(wj, x): return wj - 1.0
function code(wj, x) return Float64(wj - 1.0) end
function tmp = code(wj, x) tmp = wj - 1.0; end
code[wj_, x_] := N[(wj - 1.0), $MachinePrecision]
\begin{array}{l}
\\
wj - 1
\end{array}
Initial program 75.6%
Taylor expanded in wj around inf
Applied rewrites4.5%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 75.6%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites51.7%
Taylor expanded in wj around inf
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
lower-/.f644.5
Applied rewrites4.5%
Taylor expanded in wj around 0
Applied rewrites3.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024250
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))