
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x) :precision binary64 (* x (+ (/ (exp (- wj)) (+ wj 1.0)) (* (pow wj 2.0) (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x))))))))
double code(double wj, double x) {
return x * ((exp(-wj) / (wj + 1.0)) + (pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * ((exp(-wj) / (wj + 1.0d0)) + ((wj ** 2.0d0) * ((1.0d0 / x) + (wj * ((wj / x) + ((-1.0d0) / x))))))
end function
public static double code(double wj, double x) {
return x * ((Math.exp(-wj) / (wj + 1.0)) + (Math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))));
}
def code(wj, x): return x * ((math.exp(-wj) / (wj + 1.0)) + (math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))))
function code(wj, x) return Float64(x * Float64(Float64(exp(Float64(-wj)) / Float64(wj + 1.0)) + Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))))) end
function tmp = code(wj, x) tmp = x * ((exp(-wj) / (wj + 1.0)) + ((wj ^ 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x)))))); end
code[wj_, x_] := N[(x * N[(N[(N[Exp[(-wj)], $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\frac{e^{-wj}}{wj + 1} + {wj}^{2} \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right)\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in x around inf 80.7%
associate--l+89.2%
associate-/r*89.2%
rec-exp89.2%
+-commutative89.2%
+-commutative89.2%
Simplified89.2%
Taylor expanded in wj around 0 99.0%
Final simplification99.0%
(FPCore (wj x) :precision binary64 (* x (+ (* (pow wj 2.0) (+ (/ 1.0 x) (* wj (+ (/ wj x) (/ -1.0 x))))) (+ 1.0 (* wj -2.0)))))
double code(double wj, double x) {
return x * ((pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))) + (1.0 + (wj * -2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * (((wj ** 2.0d0) * ((1.0d0 / x) + (wj * ((wj / x) + ((-1.0d0) / x))))) + (1.0d0 + (wj * (-2.0d0))))
end function
public static double code(double wj, double x) {
return x * ((Math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))) + (1.0 + (wj * -2.0)));
}
def code(wj, x): return x * ((math.pow(wj, 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))) + (1.0 + (wj * -2.0)))
function code(wj, x) return Float64(x * Float64(Float64((wj ^ 2.0) * Float64(Float64(1.0 / x) + Float64(wj * Float64(Float64(wj / x) + Float64(-1.0 / x))))) + Float64(1.0 + Float64(wj * -2.0)))) end
function tmp = code(wj, x) tmp = x * (((wj ^ 2.0) * ((1.0 / x) + (wj * ((wj / x) + (-1.0 / x))))) + (1.0 + (wj * -2.0))); end
code[wj_, x_] := N[(x * N[(N[(N[Power[wj, 2.0], $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] + N[(wj * N[(N[(wj / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(wj * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left({wj}^{2} \cdot \left(\frac{1}{x} + wj \cdot \left(\frac{wj}{x} + \frac{-1}{x}\right)\right) + \left(1 + wj \cdot -2\right)\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in x around inf 80.7%
associate--l+89.2%
associate-/r*89.2%
rec-exp89.2%
+-commutative89.2%
+-commutative89.2%
Simplified89.2%
Taylor expanded in wj around 0 99.0%
Taylor expanded in wj around 0 98.0%
*-commutative98.0%
Simplified98.0%
Final simplification98.0%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- (- 1.0 wj) (+ (* x -4.0) (* x 1.5)))) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * ((1.0d0 - wj) - ((x * (-4.0d0)) + (x * 1.5d0)))) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(Float64(1.0 - wj) - Float64(Float64(x * -4.0) + Float64(x * 1.5)))) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * ((1.0 - wj) - ((x * -4.0) + (x * 1.5)))) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(N[(1.0 - wj), $MachinePrecision] - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(\left(1 - wj\right) - \left(x \cdot -4 + x \cdot 1.5\right)\right) - x \cdot 2\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 97.9%
Taylor expanded in x around 0 98.0%
mul-1-neg98.0%
Simplified98.0%
Final simplification98.0%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (* x (- (+ (/ 1.0 x) 2.5) (/ wj x)))) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (x * (((1.0 / x) + 2.5) - (wj / x)))) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (x * (((1.0d0 / x) + 2.5d0) - (wj / x)))) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (x * (((1.0 / x) + 2.5) - (wj / x)))) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * (x * (((1.0 / x) + 2.5) - (wj / x)))) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(x * Float64(Float64(Float64(1.0 / x) + 2.5) - Float64(wj / x)))) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (x * (((1.0 / x) + 2.5) - (wj / x)))) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(x * N[(N[(N[(1.0 / x), $MachinePrecision] + 2.5), $MachinePrecision] - N[(wj / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(x \cdot \left(\left(\frac{1}{x} + 2.5\right) - \frac{wj}{x}\right)\right) - x \cdot 2\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 97.9%
Taylor expanded in x around 0 98.0%
mul-1-neg98.0%
Simplified98.0%
Taylor expanded in x around inf 97.9%
Final simplification97.9%
(FPCore (wj x) :precision binary64 (+ x (* wj (- (* wj (- 1.0 wj)) (* x 2.0)))))
double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * ((wj * (1.0d0 - wj)) - (x * 2.0d0)))
end function
public static double code(double wj, double x) {
return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)));
}
def code(wj, x): return x + (wj * ((wj * (1.0 - wj)) - (x * 2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(Float64(wj * Float64(1.0 - wj)) - Float64(x * 2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * ((wj * (1.0 - wj)) - (x * 2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(N[(wj * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj \cdot \left(1 - wj\right) - x \cdot 2\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 97.9%
Taylor expanded in x around 0 98.0%
mul-1-neg98.0%
Simplified98.0%
Taylor expanded in x around 0 97.8%
Final simplification97.8%
(FPCore (wj x) :precision binary64 (+ x (* wj (+ wj (* x -2.0)))))
double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + (wj * (wj + (x * (-2.0d0))))
end function
public static double code(double wj, double x) {
return x + (wj * (wj + (x * -2.0)));
}
def code(wj, x): return x + (wj * (wj + (x * -2.0)))
function code(wj, x) return Float64(x + Float64(wj * Float64(wj + Float64(x * -2.0)))) end
function tmp = code(wj, x) tmp = x + (wj * (wj + (x * -2.0))); end
code[wj_, x_] := N[(x + N[(wj * N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + wj \cdot \left(wj + x \cdot -2\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 97.2%
cancel-sign-sub-inv97.2%
metadata-eval97.2%
distribute-rgt-out97.2%
metadata-eval97.2%
*-commutative97.2%
Simplified97.2%
Taylor expanded in x around 0 97.1%
Final simplification97.1%
(FPCore (wj x) :precision binary64 (/ x (+ 1.0 (* wj 2.0))))
double code(double wj, double x) {
return x / (1.0 + (wj * 2.0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x / (1.0d0 + (wj * 2.0d0))
end function
public static double code(double wj, double x) {
return x / (1.0 + (wj * 2.0));
}
def code(wj, x): return x / (1.0 + (wj * 2.0))
function code(wj, x) return Float64(x / Float64(1.0 + Float64(wj * 2.0))) end
function tmp = code(wj, x) tmp = x / (1.0 + (wj * 2.0)); end
code[wj_, x_] := N[(x / N[(1.0 + N[(wj * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{1 + wj \cdot 2}
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in x around inf 86.7%
+-commutative86.7%
Simplified86.7%
Taylor expanded in wj around 0 85.9%
*-commutative85.9%
Simplified85.9%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* x wj))))
double code(double wj, double x) {
return x + (-2.0 * (x * wj));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (x * wj))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (x * wj));
}
def code(wj, x): return x + (-2.0 * (x * wj))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(x * wj))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (x * wj)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(x * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(x \cdot wj\right)
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 85.8%
*-commutative85.8%
Simplified85.8%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around 0 85.4%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around inf 4.0%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 79.4%
distribute-rgt1-in79.8%
associate-/l/79.8%
div-sub79.4%
associate-/l*79.4%
*-inverses80.2%
*-rgt-identity80.2%
Simplified80.2%
Taylor expanded in wj around inf 3.9%
Taylor expanded in wj around 0 3.4%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024132
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))