
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (- wj (/ (- t_0 x) (+ (exp wj) t_0))) 1e-31)
(+
x
(*
wj
(-
(-
(*
wj
(-
(+ 1.0 (+ (* x 0.5) (* wj (- (- -1.0 (+ x (* x 0.5))) x))))
(* x -2.0)))
x)
x)))
(+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0))))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 1e-31) {
tmp = x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x));
} else {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj * exp(wj)
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 1d-31) then
tmp = x + (wj * (((wj * ((1.0d0 + ((x * 0.5d0) + (wj * (((-1.0d0) - (x + (x * 0.5d0))) - x)))) - (x * (-2.0d0)))) - x) - x))
else
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (Math.exp(wj) + t_0))) <= 1e-31) {
tmp = x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x));
} else {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) tmp = 0 if (wj - ((t_0 - x) / (math.exp(wj) + t_0))) <= 1e-31: tmp = x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x)) else: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) <= 1e-31) tmp = Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(Float64(1.0 + Float64(Float64(x * 0.5) + Float64(wj * Float64(Float64(-1.0 - Float64(x + Float64(x * 0.5))) - x)))) - Float64(x * -2.0))) - x) - x))); else tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); tmp = 0.0; if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 1e-31) tmp = x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x)); else tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1e-31], N[(x + N[(wj * N[(N[(N[(wj * N[(N[(1.0 + N[(N[(x * 0.5), $MachinePrecision] + N[(wj * N[(N[(-1.0 - N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj - \frac{t\_0 - x}{e^{wj} + t\_0} \leq 10^{-31}:\\
\;\;\;\;x + wj \cdot \left(\left(wj \cdot \left(\left(1 + \left(x \cdot 0.5 + wj \cdot \left(\left(-1 - \left(x + x \cdot 0.5\right)\right) - x\right)\right)\right) - x \cdot -2\right) - x\right) - x\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 1e-31Initial program 72.3%
distribute-rgt1-in73.4%
associate-/l/73.4%
div-sub72.2%
associate-/l*72.2%
*-inverses73.4%
*-rgt-identity73.4%
Simplified73.4%
Taylor expanded in wj around 0 72.3%
associate-*r*72.3%
neg-mul-172.3%
distribute-rgt-out72.3%
metadata-eval72.3%
Simplified72.3%
Taylor expanded in wj around 0 98.9%
if 1e-31 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 94.9%
distribute-rgt1-in97.4%
associate-/l/97.4%
div-sub94.9%
associate-/l*94.9%
*-inverses99.9%
*-rgt-identity99.9%
Simplified99.9%
Final simplification99.2%
(FPCore (wj x)
:precision binary64
(+
x
(*
wj
(-
(-
(*
wj
(-
(+ 1.0 (+ (* x 0.5) (* wj (- (- -1.0 (+ x (* x 0.5))) x))))
(* x -2.0)))
x)
x))))
double code(double wj, double x) {
return x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (((wj * ((1.0d0 + ((x * 0.5d0) + (wj * (((-1.0d0) - (x + (x * 0.5d0))) - x)))) - (x * (-2.0d0)))) - x) - x))
end function
public static double code(double wj, double x) {
return x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x));
}
def code(wj, x): return x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(Float64(wj * Float64(Float64(1.0 + Float64(Float64(x * 0.5) + Float64(wj * Float64(Float64(-1.0 - Float64(x + Float64(x * 0.5))) - x)))) - Float64(x * -2.0))) - x) - x))) end
function tmp = code(wj, x) tmp = x + (wj * (((wj * ((1.0 + ((x * 0.5) + (wj * ((-1.0 - (x + (x * 0.5))) - x)))) - (x * -2.0))) - x) - x)); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(N[(wj * N[(N[(1.0 + N[(N[(x * 0.5), $MachinePrecision] + N[(wj * N[(N[(-1.0 - N[(x + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(\left(wj \cdot \left(\left(1 + \left(x \cdot 0.5 + wj \cdot \left(\left(-1 - \left(x + x \cdot 0.5\right)\right) - x\right)\right)\right) - x \cdot -2\right) - x\right) - x\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 79.6%
associate-*r*79.6%
neg-mul-179.6%
distribute-rgt-out79.6%
metadata-eval79.6%
Simplified79.6%
Taylor expanded in wj around 0 96.9%
Final simplification96.9%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ (* wj (- 1.0 (* x -2.5))) (* x -2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (1.0d0 - (x * (-2.5d0)))) + (x * (-2.0d0))))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)));
}
def code(wj, x): return x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - Float64(x * -2.5))) + Float64(x * -2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (1.0 - (x * -2.5))) + (x * -2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(1.0 - N[(x * -2.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - x \cdot -2.5\right) + x \cdot -2\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 96.9%
cancel-sign-sub-inv96.9%
metadata-eval96.9%
distribute-rgt-out96.9%
metadata-eval96.9%
*-commutative96.9%
Simplified96.9%
Final simplification96.9%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ wj (* x -2.0)))))
double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj + (x * (-2.0d0))))
end function
public static double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
def code(wj, x): return x + (wj * (wj + (x * -2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj + (x * -2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj + x \cdot -2\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 96.9%
cancel-sign-sub-inv96.9%
metadata-eval96.9%
distribute-rgt-out96.9%
metadata-eval96.9%
*-commutative96.9%
Simplified96.9%
Taylor expanded in x around 0 96.6%
Final simplification96.6%
(FPCore (wj x) :precision binary64 (* x (+ 1.0 (* wj -2.0))))
double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (1.0d0 + (wj * (-2.0d0)))
end function
public static double code(double wj, double x) {
return x * (1.0 + (wj * -2.0));
}
def code(wj, x): return x * (1.0 + (wj * -2.0))
function code(wj, x) return Float64(x * Float64(1.0 + Float64(wj * -2.0))) end
function tmp = code(wj, x) tmp = x * (1.0 + (wj * -2.0)); end
code[wj_, x_] := N[(x * N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + wj \cdot -2\right)
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 96.9%
cancel-sign-sub-inv96.9%
metadata-eval96.9%
distribute-rgt-out96.9%
metadata-eval96.9%
*-commutative96.9%
Simplified96.9%
Taylor expanded in x around 0 96.6%
Taylor expanded in x around inf 85.9%
Final simplification85.9%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around inf 4.0%
Final simplification4.0%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.3%
distribute-rgt1-in80.9%
associate-/l/80.9%
div-sub79.3%
associate-/l*79.3%
*-inverses81.6%
*-rgt-identity81.6%
Simplified81.6%
Taylor expanded in wj around 0 85.2%
Final simplification85.2%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024131
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))