
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -2.8e-6)
(fma (/ -1.0 (exp wj)) (/ (fma wj (exp wj) (- x)) (+ wj 1.0)) wj)
(fma
wj
(fma
wj
(- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj))
(* x -2.0))
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -2.8e-6) {
tmp = fma((-1.0 / exp(wj)), (fma(wj, exp(wj), -x) / (wj + 1.0)), wj);
} else {
tmp = fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -2.8e-6) tmp = fma(Float64(-1.0 / exp(wj)), Float64(fma(wj, exp(wj), Float64(-x)) / Float64(wj + 1.0)), wj); else tmp = fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -2.8e-6], N[(N[(-1.0 / N[Exp[wj], $MachinePrecision]), $MachinePrecision] * N[(N[(wj * N[Exp[wj], $MachinePrecision] + (-x)), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision], N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -2.8 \cdot 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(\frac{-1}{e^{wj}}, \frac{\mathsf{fma}\left(wj, e^{wj}, -x\right)}{wj + 1}, wj\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)\\
\end{array}
\end{array}
if wj < -2.79999999999999987e-6Initial program 61.6%
lift-exp.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
sub-negN/A
+-commutativeN/A
Applied rewrites94.4%
if -2.79999999999999987e-6 < wj Initial program 78.3%
Taylor expanded in wj around 0
Applied rewrites99.0%
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj))) (t_1 (+ wj (/ (- x t_0) (+ (exp wj) t_0))))) (if (<= t_1 -5e-260) (+ wj x) (if (<= t_1 0.0) (* wj wj) (+ wj x)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = wj + ((x - t_0) / (exp(wj) + t_0));
double tmp;
if (t_1 <= -5e-260) {
tmp = wj + x;
} else if (t_1 <= 0.0) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = wj + ((x - t_0) / (exp(wj) + t_0))
if (t_1 <= (-5d-260)) then
tmp = wj + x
else if (t_1 <= 0.0d0) then
tmp = wj * wj
else
tmp = wj + x
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = wj + ((x - t_0) / (Math.exp(wj) + t_0));
double tmp;
if (t_1 <= -5e-260) {
tmp = wj + x;
} else if (t_1 <= 0.0) {
tmp = wj * wj;
} else {
tmp = wj + x;
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = wj + ((x - t_0) / (math.exp(wj) + t_0)) tmp = 0 if t_1 <= -5e-260: tmp = wj + x elif t_1 <= 0.0: tmp = wj * wj else: tmp = wj + x return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(wj + Float64(Float64(x - t_0) / Float64(exp(wj) + t_0))) tmp = 0.0 if (t_1 <= -5e-260) tmp = Float64(wj + x); elseif (t_1 <= 0.0) tmp = Float64(wj * wj); else tmp = Float64(wj + x); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = wj + ((x - t_0) / (exp(wj) + t_0)); tmp = 0.0; if (t_1 <= -5e-260) tmp = wj + x; elseif (t_1 <= 0.0) tmp = wj * wj; else tmp = wj + x; end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj + N[(N[(x - t$95$0), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -5e-260], N[(wj + x), $MachinePrecision], If[LessEqual[t$95$1, 0.0], N[(wj * wj), $MachinePrecision], N[(wj + x), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := wj + \frac{x - t\_0}{e^{wj} + t\_0}\\
\mathbf{if}\;t\_1 \leq -5 \cdot 10^{-260}:\\
\;\;\;\;wj + x\\
\mathbf{elif}\;t\_1 \leq 0:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;wj + x\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -5.0000000000000003e-260 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 95.3%
Taylor expanded in wj around 0
mul-1-negN/A
lower-neg.f6489.2
Applied rewrites89.2%
if -5.0000000000000003e-260 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0Initial program 5.4%
Taylor expanded in wj around 0
Applied rewrites100.0%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
lft-mult-inverseN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
distribute-rgt-inN/A
lft-mult-inverseN/A
neg-mul-1N/A
sub-negN/A
lower--.f6463.4
Applied rewrites63.4%
Taylor expanded in wj around 0
unpow2N/A
lower-*.f6463.4
Applied rewrites63.4%
Final simplification84.1%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.24)
(/ x (* (exp wj) (+ wj 1.0)))
(fma
wj
(fma
wj
(- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj))
(* x -2.0))
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.24) {
tmp = x / (exp(wj) * (wj + 1.0));
} else {
tmp = fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.24) tmp = Float64(x / Float64(exp(wj) * Float64(wj + 1.0))); else tmp = fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.24], N[(x / N[(N[Exp[wj], $MachinePrecision] * N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.24:\\
\;\;\;\;\frac{x}{e^{wj} \cdot \left(wj + 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)\\
\end{array}
\end{array}
if wj < -0.23999999999999999Initial program 49.7%
Taylor expanded in x around inf
lower-/.f64N/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-*.f64N/A
lower-exp.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
if -0.23999999999999999 < wj Initial program 78.4%
Taylor expanded in wj around 0
Applied rewrites98.5%
(FPCore (wj x)
:precision binary64
(if (<= wj -1.0)
(/ x (* wj (exp wj)))
(fma
wj
(fma
wj
(- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj))
(* x -2.0))
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -1.0) {
tmp = x / (wj * exp(wj));
} else {
tmp = fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -1.0) tmp = Float64(x / Float64(wj * exp(wj))); else tmp = fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -1.0], N[(x / N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1:\\
\;\;\;\;\frac{x}{wj \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)\\
\end{array}
\end{array}
if wj < -1Initial program 49.7%
Taylor expanded in x around inf
lower-/.f64N/A
distribute-rgt1-inN/A
+-commutativeN/A
*-commutativeN/A
lower-*.f64N/A
lower-exp.f64N/A
+-commutativeN/A
lower-+.f64100.0
Applied rewrites100.0%
Taylor expanded in wj around inf
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f6487.0
Applied rewrites87.0%
if -1 < wj Initial program 78.4%
Taylor expanded in wj around 0
Applied rewrites98.5%
(FPCore (wj x) :precision binary64 (fma wj (fma wj (- (fma x 2.5 1.0) (fma wj (fma x 0.6666666666666666 (* x 2.0)) wj)) (* x -2.0)) x))
double code(double wj, double x) {
return fma(wj, fma(wj, (fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, (x * 2.0)), wj)), (x * -2.0)), x);
}
function code(wj, x) return fma(wj, fma(wj, Float64(fma(x, 2.5, 1.0) - fma(wj, fma(x, 0.6666666666666666, Float64(x * 2.0)), wj)), Float64(x * -2.0)), x) end
code[wj_, x_] := N[(wj * N[(wj * N[(N[(x * 2.5 + 1.0), $MachinePrecision] - N[(wj * N[(x * 0.6666666666666666 + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 2.5, 1\right) - \mathsf{fma}\left(wj, \mathsf{fma}\left(x, 0.6666666666666666, x \cdot 2\right), wj\right), x \cdot -2\right), x\right)
\end{array}
Initial program 77.8%
Taylor expanded in wj around 0
Applied rewrites96.2%
(FPCore (wj x) :precision binary64 (fma wj (fma x -2.0 (fma (* wj x) 2.5 wj)) x))
double code(double wj, double x) {
return fma(wj, fma(x, -2.0, fma((wj * x), 2.5, wj)), x);
}
function code(wj, x) return fma(wj, fma(x, -2.0, fma(Float64(wj * x), 2.5, wj)), x) end
code[wj_, x_] := N[(wj * N[(x * -2.0 + N[(N[(wj * x), $MachinePrecision] * 2.5 + wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, \mathsf{fma}\left(x, -2, \mathsf{fma}\left(wj \cdot x, 2.5, wj\right)\right), x\right)
\end{array}
Initial program 77.8%
Taylor expanded in wj around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.6%
Final simplification95.6%
(FPCore (wj x) :precision binary64 (fma wj (* wj (- 1.0 wj)) x))
double code(double wj, double x) {
return fma(wj, (wj * (1.0 - wj)), x);
}
function code(wj, x) return fma(wj, Float64(wj * Float64(1.0 - wj)), x) end
code[wj_, x_] := N[(wj * N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj \cdot \left(1 - wj\right), x\right)
\end{array}
Initial program 77.8%
Taylor expanded in wj around 0
Applied rewrites96.2%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
lft-mult-inverseN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
distribute-rgt-inN/A
lft-mult-inverseN/A
neg-mul-1N/A
sub-negN/A
lower--.f6495.5
Applied rewrites95.5%
(FPCore (wj x) :precision binary64 (if (<= x -3e-66) (+ wj -1.0) (* wj wj)))
double code(double wj, double x) {
double tmp;
if (x <= -3e-66) {
tmp = wj + -1.0;
} else {
tmp = wj * wj;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-3d-66)) then
tmp = wj + (-1.0d0)
else
tmp = wj * wj
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (x <= -3e-66) {
tmp = wj + -1.0;
} else {
tmp = wj * wj;
}
return tmp;
}
def code(wj, x): tmp = 0 if x <= -3e-66: tmp = wj + -1.0 else: tmp = wj * wj return tmp
function code(wj, x) tmp = 0.0 if (x <= -3e-66) tmp = Float64(wj + -1.0); else tmp = Float64(wj * wj); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (x <= -3e-66) tmp = wj + -1.0; else tmp = wj * wj; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[x, -3e-66], N[(wj + -1.0), $MachinePrecision], N[(wj * wj), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3 \cdot 10^{-66}:\\
\;\;\;\;wj + -1\\
\mathbf{else}:\\
\;\;\;\;wj \cdot wj\\
\end{array}
\end{array}
if x < -3.0000000000000002e-66Initial program 97.3%
Taylor expanded in wj around inf
Applied rewrites8.2%
if -3.0000000000000002e-66 < x Initial program 69.8%
Taylor expanded in wj around 0
Applied rewrites95.7%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
lft-mult-inverseN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
distribute-rgt-inN/A
lft-mult-inverseN/A
neg-mul-1N/A
sub-negN/A
lower--.f6422.5
Applied rewrites22.5%
Taylor expanded in wj around 0
unpow2N/A
lower-*.f6421.8
Applied rewrites21.8%
Final simplification17.9%
(FPCore (wj x) :precision binary64 (fma wj wj x))
double code(double wj, double x) {
return fma(wj, wj, x);
}
function code(wj, x) return fma(wj, wj, x) end
code[wj_, x_] := N[(wj * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(wj, wj, x\right)
\end{array}
Initial program 77.8%
Taylor expanded in wj around 0
Applied rewrites96.2%
Taylor expanded in x around 0
sub-negN/A
neg-mul-1N/A
lft-mult-inverseN/A
distribute-rgt-inN/A
metadata-evalN/A
sub-negN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
distribute-rgt-inN/A
lft-mult-inverseN/A
neg-mul-1N/A
sub-negN/A
lower--.f6495.5
Applied rewrites95.5%
Taylor expanded in wj around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6495.0
Applied rewrites95.0%
(FPCore (wj x) :precision binary64 (+ wj -1.0))
double code(double wj, double x) {
return wj + -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj + (-1.0d0)
end function
public static double code(double wj, double x) {
return wj + -1.0;
}
def code(wj, x): return wj + -1.0
function code(wj, x) return Float64(wj + -1.0) end
function tmp = code(wj, x) tmp = wj + -1.0; end
code[wj_, x_] := N[(wj + -1.0), $MachinePrecision]
\begin{array}{l}
\\
wj + -1
\end{array}
Initial program 77.8%
Taylor expanded in wj around inf
Applied rewrites4.0%
Final simplification4.0%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 77.8%
Taylor expanded in wj around inf
Applied rewrites4.0%
Taylor expanded in wj around 0
Applied rewrites3.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024216
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))