
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t_0 - x}{e^{wj} + t_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -3.2e-7)
(fma (- (/ x (exp wj)) wj) (/ 1.0 (+ wj 1.0)) wj)
(if (<= wj 2.9e-9)
(- (+ (+ x (* -2.0 (* wj x))) (* wj wj)) (* wj (* wj wj)))
(+ wj (/ (- (* x (exp (- wj))) wj) (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (wj <= -3.2e-7) {
tmp = fma(((x / exp(wj)) - wj), (1.0 / (wj + 1.0)), wj);
} else if (wj <= 2.9e-9) {
tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
} else {
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -3.2e-7) tmp = fma(Float64(Float64(x / exp(wj)) - wj), Float64(1.0 / Float64(wj + 1.0)), wj); elseif (wj <= 2.9e-9) tmp = Float64(Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)) - Float64(wj * Float64(wj * wj))); else tmp = Float64(wj + Float64(Float64(Float64(x * exp(Float64(-wj))) - wj) / Float64(wj + 1.0))); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -3.2e-7], N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision], If[LessEqual[wj, 2.9e-9], N[(N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision] - N[(wj * N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.2 \cdot 10^{-7}:\\
\;\;\;\;\mathsf{fma}\left(\frac{x}{e^{wj}} - wj, \frac{1}{wj + 1}, wj\right)\\
\mathbf{elif}\;wj \leq 2.9 \cdot 10^{-9}:\\
\;\;\;\;\left(\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj\right) - wj \cdot \left(wj \cdot wj\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{x \cdot e^{-wj} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < -3.2000000000000001e-7Initial program 60.1%
sub-neg60.1%
div-sub60.1%
sub-neg60.1%
+-commutative60.1%
distribute-neg-in60.1%
remove-double-neg60.1%
sub-neg60.1%
div-sub60.1%
distribute-rgt1-in92.8%
associate-/l/93.4%
Simplified93.4%
+-commutative93.4%
div-inv93.4%
fma-def95.2%
Applied egg-rr95.2%
if -3.2000000000000001e-7 < wj < 2.89999999999999991e-9Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.0%
associate-/l/82.0%
Simplified82.0%
Taylor expanded in wj around 0 100.0%
Taylor expanded in x around 0 100.0%
unpow2100.0%
Simplified100.0%
Taylor expanded in x around 0 100.0%
unpow3100.0%
Applied egg-rr100.0%
if 2.89999999999999991e-9 < wj Initial program 99.2%
sub-neg99.2%
div-sub99.2%
sub-neg99.2%
+-commutative99.2%
distribute-neg-in99.2%
remove-double-neg99.2%
sub-neg99.2%
div-sub99.2%
distribute-rgt1-in99.0%
associate-/l/99.4%
Simplified99.4%
clear-num99.4%
associate-/r/99.4%
rec-exp99.6%
Applied egg-rr99.6%
Final simplification99.8%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (+ (* x -4.0) (* x 1.5))) (t_1 (* wj (exp wj))))
(if (<= (+ wj (/ (- x t_1) (+ (exp wj) t_1))) 2e-14)
(+
(*
(pow wj 3.0)
(- (- (- -1.0 (* -2.0 t_0)) (* x -3.0)) (* x 0.6666666666666666)))
(+ (* (- 1.0 t_0) (pow wj 2.0)) (+ x (* -2.0 (* wj x)))))
(fma (- (/ x (exp wj)) wj) (/ 1.0 (+ wj 1.0)) wj))))
double code(double wj, double x) {
double t_0 = (x * -4.0) + (x * 1.5);
double t_1 = wj * exp(wj);
double tmp;
if ((wj + ((x - t_1) / (exp(wj) + t_1))) <= 2e-14) {
tmp = (pow(wj, 3.0) * (((-1.0 - (-2.0 * t_0)) - (x * -3.0)) - (x * 0.6666666666666666))) + (((1.0 - t_0) * pow(wj, 2.0)) + (x + (-2.0 * (wj * x))));
} else {
tmp = fma(((x / exp(wj)) - wj), (1.0 / (wj + 1.0)), wj);
}
return tmp;
}
function code(wj, x) t_0 = Float64(Float64(x * -4.0) + Float64(x * 1.5)) t_1 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj + Float64(Float64(x - t_1) / Float64(exp(wj) + t_1))) <= 2e-14) tmp = Float64(Float64((wj ^ 3.0) * Float64(Float64(Float64(-1.0 - Float64(-2.0 * t_0)) - Float64(x * -3.0)) - Float64(x * 0.6666666666666666))) + Float64(Float64(Float64(1.0 - t_0) * (wj ^ 2.0)) + Float64(x + Float64(-2.0 * Float64(wj * x))))); else tmp = fma(Float64(Float64(x / exp(wj)) - wj), Float64(1.0 / Float64(wj + 1.0)), wj); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj + N[(N[(x - t$95$1), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e-14], N[(N[(N[Power[wj, 3.0], $MachinePrecision] * N[(N[(N[(-1.0 - N[(-2.0 * t$95$0), $MachinePrecision]), $MachinePrecision] - N[(x * -3.0), $MachinePrecision]), $MachinePrecision] - N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(1.0 - t$95$0), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] * N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot -4 + x \cdot 1.5\\
t_1 := wj \cdot e^{wj}\\
\mathbf{if}\;wj + \frac{x - t_1}{e^{wj} + t_1} \leq 2 \cdot 10^{-14}:\\
\;\;\;\;{wj}^{3} \cdot \left(\left(\left(-1 - -2 \cdot t_0\right) - x \cdot -3\right) - x \cdot 0.6666666666666666\right) + \left(\left(1 - t_0\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(wj \cdot x\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{x}{e^{wj}} - wj, \frac{1}{wj + 1}, wj\right)\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 2e-14Initial program 75.3%
sub-neg75.3%
div-sub75.3%
sub-neg75.3%
+-commutative75.3%
distribute-neg-in75.3%
remove-double-neg75.3%
sub-neg75.3%
div-sub75.3%
distribute-rgt1-in75.9%
associate-/l/75.9%
Simplified75.9%
Taylor expanded in wj around 0 99.3%
if 2e-14 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 98.1%
sub-neg98.1%
div-sub98.1%
sub-neg98.1%
+-commutative98.1%
distribute-neg-in98.1%
remove-double-neg98.1%
sub-neg98.1%
div-sub98.1%
distribute-rgt1-in99.4%
associate-/l/99.4%
Simplified99.4%
+-commutative99.4%
div-inv99.4%
fma-def99.5%
Applied egg-rr99.5%
Final simplification99.3%
(FPCore (wj x) :precision binary64 (if (<= wj 2.9e-9) (- (+ (+ x (* -2.0 (* wj x))) (* wj wj)) (* wj (* wj wj))) (+ wj (/ (- (* x (exp (- wj))) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 2.9e-9) {
tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
} else {
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 2.9d-9) then
tmp = ((x + ((-2.0d0) * (wj * x))) + (wj * wj)) - (wj * (wj * wj))
else
tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 2.9e-9) {
tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
} else {
tmp = wj + (((x * Math.exp(-wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 2.9e-9: tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj)) else: tmp = wj + (((x * math.exp(-wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 2.9e-9) tmp = Float64(Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)) - Float64(wj * Float64(wj * wj))); else tmp = Float64(wj + Float64(Float64(Float64(x * exp(Float64(-wj))) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 2.9e-9) tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj)); else tmp = wj + (((x * exp(-wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 2.9e-9], N[(N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision] - N[(wj * N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x * N[Exp[(-wj)], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 2.9 \cdot 10^{-9}:\\
\;\;\;\;\left(\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj\right) - wj \cdot \left(wj \cdot wj\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{x \cdot e^{-wj} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 2.89999999999999991e-9Initial program 81.5%
sub-neg81.5%
div-sub81.5%
sub-neg81.5%
+-commutative81.5%
distribute-neg-in81.5%
remove-double-neg81.5%
sub-neg81.5%
div-sub81.5%
distribute-rgt1-in82.3%
associate-/l/82.3%
Simplified82.3%
Taylor expanded in wj around 0 98.3%
Taylor expanded in x around 0 98.2%
unpow298.2%
Simplified98.2%
Taylor expanded in x around 0 98.2%
unpow398.2%
Applied egg-rr98.2%
if 2.89999999999999991e-9 < wj Initial program 99.2%
sub-neg99.2%
div-sub99.2%
sub-neg99.2%
+-commutative99.2%
distribute-neg-in99.2%
remove-double-neg99.2%
sub-neg99.2%
div-sub99.2%
distribute-rgt1-in99.0%
associate-/l/99.4%
Simplified99.4%
clear-num99.4%
associate-/r/99.4%
rec-exp99.6%
Applied egg-rr99.6%
Final simplification98.2%
(FPCore (wj x) :precision binary64 (if (<= wj 2.9e-9) (- (+ (+ x (* -2.0 (* wj x))) (* wj wj)) (* wj (* wj wj))) (+ wj (/ (- (/ x (exp wj)) wj) (+ wj 1.0)))))
double code(double wj, double x) {
double tmp;
if (wj <= 2.9e-9) {
tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
} else {
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 2.9d-9) then
tmp = ((x + ((-2.0d0) * (wj * x))) + (wj * wj)) - (wj * (wj * wj))
else
tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0d0))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 2.9e-9) {
tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
} else {
tmp = wj + (((x / Math.exp(wj)) - wj) / (wj + 1.0));
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 2.9e-9: tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj)) else: tmp = wj + (((x / math.exp(wj)) - wj) / (wj + 1.0)) return tmp
function code(wj, x) tmp = 0.0 if (wj <= 2.9e-9) tmp = Float64(Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)) - Float64(wj * Float64(wj * wj))); else tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(wj + 1.0))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 2.9e-9) tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj)); else tmp = wj + (((x / exp(wj)) - wj) / (wj + 1.0)); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 2.9e-9], N[(N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision] - N[(wj * N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 2.9 \cdot 10^{-9}:\\
\;\;\;\;\left(\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj\right) - wj \cdot \left(wj \cdot wj\right)\\
\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}\\
\end{array}
\end{array}
if wj < 2.89999999999999991e-9Initial program 81.5%
sub-neg81.5%
div-sub81.5%
sub-neg81.5%
+-commutative81.5%
distribute-neg-in81.5%
remove-double-neg81.5%
sub-neg81.5%
div-sub81.5%
distribute-rgt1-in82.3%
associate-/l/82.3%
Simplified82.3%
Taylor expanded in wj around 0 98.3%
Taylor expanded in x around 0 98.2%
unpow298.2%
Simplified98.2%
Taylor expanded in x around 0 98.2%
unpow398.2%
Applied egg-rr98.2%
if 2.89999999999999991e-9 < wj Initial program 99.2%
sub-neg99.2%
div-sub99.2%
sub-neg99.2%
+-commutative99.2%
distribute-neg-in99.2%
remove-double-neg99.2%
sub-neg99.2%
div-sub99.2%
distribute-rgt1-in99.0%
associate-/l/99.4%
Simplified99.4%
Final simplification98.2%
(FPCore (wj x) :precision binary64 (- (+ (+ x (* -2.0 (* wj x))) (* wj wj)) (* wj (* wj wj))))
double code(double wj, double x) {
return ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = ((x + ((-2.0d0) * (wj * x))) + (wj * wj)) - (wj * (wj * wj))
end function
public static double code(double wj, double x) {
return ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj));
}
def code(wj, x): return ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj))
function code(wj, x) return Float64(Float64(Float64(x + Float64(-2.0 * Float64(wj * x))) + Float64(wj * wj)) - Float64(wj * Float64(wj * wj))) end
function tmp = code(wj, x) tmp = ((x + (-2.0 * (wj * x))) + (wj * wj)) - (wj * (wj * wj)); end
code[wj_, x_] := N[(N[(N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(wj * wj), $MachinePrecision]), $MachinePrecision] - N[(wj * N[(wj * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x + -2 \cdot \left(wj \cdot x\right)\right) + wj \cdot wj\right) - wj \cdot \left(wj \cdot wj\right)
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 96.8%
Taylor expanded in x around 0 96.4%
unpow296.4%
Simplified96.4%
Taylor expanded in x around 0 96.4%
unpow396.4%
Applied egg-rr96.4%
Final simplification96.4%
(FPCore (wj x) :precision binary64 (if (<= x -5.2e-152) (+ wj (/ (- (* x (- 1.0 wj)) wj) (+ wj 1.0))) (* x (- (/ 1.0 (+ wj 1.0)) (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (x <= -5.2e-152) {
tmp = wj + (((x * (1.0 - wj)) - wj) / (wj + 1.0));
} else {
tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-5.2d-152)) then
tmp = wj + (((x * (1.0d0 - wj)) - wj) / (wj + 1.0d0))
else
tmp = x * ((1.0d0 / (wj + 1.0d0)) - (wj / (wj + 1.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (x <= -5.2e-152) {
tmp = wj + (((x * (1.0 - wj)) - wj) / (wj + 1.0));
} else {
tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if x <= -5.2e-152: tmp = wj + (((x * (1.0 - wj)) - wj) / (wj + 1.0)) else: tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0))) return tmp
function code(wj, x) tmp = 0.0 if (x <= -5.2e-152) tmp = Float64(wj + Float64(Float64(Float64(x * Float64(1.0 - wj)) - wj) / Float64(wj + 1.0))); else tmp = Float64(x * Float64(Float64(1.0 / Float64(wj + 1.0)) - Float64(wj / Float64(wj + 1.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (x <= -5.2e-152) tmp = wj + (((x * (1.0 - wj)) - wj) / (wj + 1.0)); else tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[x, -5.2e-152], N[(wj + N[(N[(N[(x * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -5.2 \cdot 10^{-152}:\\
\;\;\;\;wj + \frac{x \cdot \left(1 - wj\right) - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{1}{wj + 1} - \frac{wj}{wj + 1}\right)\\
\end{array}
\end{array}
if x < -5.20000000000000026e-152Initial program 96.6%
sub-neg96.6%
div-sub96.6%
sub-neg96.6%
+-commutative96.6%
distribute-neg-in96.6%
remove-double-neg96.6%
sub-neg96.6%
div-sub96.6%
distribute-rgt1-in97.5%
associate-/l/97.5%
Simplified97.5%
Taylor expanded in wj around 0 95.2%
associate-*r*95.2%
neg-mul-195.2%
distribute-lft1-in95.2%
+-commutative95.2%
sub-neg95.2%
Simplified95.2%
if -5.20000000000000026e-152 < x Initial program 71.2%
sub-neg71.2%
div-sub71.2%
sub-neg71.2%
+-commutative71.2%
distribute-neg-in71.2%
remove-double-neg71.2%
sub-neg71.2%
div-sub71.2%
distribute-rgt1-in71.9%
associate-/l/71.9%
Simplified71.9%
Taylor expanded in wj around 0 70.0%
associate-*r*70.0%
neg-mul-170.0%
distribute-lft1-in70.0%
+-commutative70.0%
sub-neg70.0%
Simplified70.0%
Taylor expanded in x around inf 81.5%
Final simplification87.3%
(FPCore (wj x) :precision binary64 (if (<= x -1e-151) (+ wj (/ (- (- x (* wj x)) wj) (+ wj 1.0))) (* x (- (/ 1.0 (+ wj 1.0)) (/ wj (+ wj 1.0))))))
double code(double wj, double x) {
double tmp;
if (x <= -1e-151) {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
} else {
tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (x <= (-1d-151)) then
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0d0))
else
tmp = x * ((1.0d0 / (wj + 1.0d0)) - (wj / (wj + 1.0d0)))
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (x <= -1e-151) {
tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0));
} else {
tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
return tmp;
}
def code(wj, x): tmp = 0 if x <= -1e-151: tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)) else: tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0))) return tmp
function code(wj, x) tmp = 0.0 if (x <= -1e-151) tmp = Float64(wj + Float64(Float64(Float64(x - Float64(wj * x)) - wj) / Float64(wj + 1.0))); else tmp = Float64(x * Float64(Float64(1.0 / Float64(wj + 1.0)) - Float64(wj / Float64(wj + 1.0)))); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (x <= -1e-151) tmp = wj + (((x - (wj * x)) - wj) / (wj + 1.0)); else tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0))); end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[x, -1e-151], N[(wj + N[(N[(N[(x - N[(wj * x), $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x * N[(N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1 \cdot 10^{-151}:\\
\;\;\;\;wj + \frac{\left(x - wj \cdot x\right) - wj}{wj + 1}\\
\mathbf{else}:\\
\;\;\;\;x \cdot \left(\frac{1}{wj + 1} - \frac{wj}{wj + 1}\right)\\
\end{array}
\end{array}
if x < -9.9999999999999994e-152Initial program 96.6%
sub-neg96.6%
div-sub96.6%
sub-neg96.6%
+-commutative96.6%
distribute-neg-in96.6%
remove-double-neg96.6%
sub-neg96.6%
div-sub96.6%
distribute-rgt1-in97.5%
associate-/l/97.5%
Simplified97.5%
Taylor expanded in wj around 0 95.2%
if -9.9999999999999994e-152 < x Initial program 71.2%
sub-neg71.2%
div-sub71.2%
sub-neg71.2%
+-commutative71.2%
distribute-neg-in71.2%
remove-double-neg71.2%
sub-neg71.2%
div-sub71.2%
distribute-rgt1-in71.9%
associate-/l/71.9%
Simplified71.9%
Taylor expanded in wj around 0 70.0%
associate-*r*70.0%
neg-mul-170.0%
distribute-lft1-in70.0%
+-commutative70.0%
sub-neg70.0%
Simplified70.0%
Taylor expanded in x around inf 81.5%
Final simplification87.3%
(FPCore (wj x) :precision binary64 (* x (- (/ 1.0 (+ wj 1.0)) (/ wj (+ wj 1.0)))))
double code(double wj, double x) {
return x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x * ((1.0d0 / (wj + 1.0d0)) - (wj / (wj + 1.0d0)))
end function
public static double code(double wj, double x) {
return x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)));
}
def code(wj, x): return x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0)))
function code(wj, x) return Float64(x * Float64(Float64(1.0 / Float64(wj + 1.0)) - Float64(wj / Float64(wj + 1.0)))) end
function tmp = code(wj, x) tmp = x * ((1.0 / (wj + 1.0)) - (wj / (wj + 1.0))); end
code[wj_, x_] := N[(x * N[(N[(1.0 / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(\frac{1}{wj + 1} - \frac{wj}{wj + 1}\right)
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 80.7%
associate-*r*80.7%
neg-mul-180.7%
distribute-lft1-in80.7%
+-commutative80.7%
sub-neg80.7%
Simplified80.7%
Taylor expanded in x around inf 85.7%
Final simplification85.7%
(FPCore (wj x) :precision binary64 (/ (* x (- 1.0 wj)) (+ wj 1.0)))
double code(double wj, double x) {
return (x * (1.0 - wj)) / (wj + 1.0);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (x * (1.0d0 - wj)) / (wj + 1.0d0)
end function
public static double code(double wj, double x) {
return (x * (1.0 - wj)) / (wj + 1.0);
}
def code(wj, x): return (x * (1.0 - wj)) / (wj + 1.0)
function code(wj, x) return Float64(Float64(x * Float64(1.0 - wj)) / Float64(wj + 1.0)) end
function tmp = code(wj, x) tmp = (x * (1.0 - wj)) / (wj + 1.0); end
code[wj_, x_] := N[(N[(x * N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x \cdot \left(1 - wj\right)}{wj + 1}
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 80.7%
associate-*r*80.7%
neg-mul-180.7%
distribute-lft1-in80.7%
+-commutative80.7%
sub-neg80.7%
Simplified80.7%
Taylor expanded in x around -inf 85.7%
Final simplification85.7%
(FPCore (wj x) :precision binary64 (/ (- x (* wj x)) (+ wj 1.0)))
double code(double wj, double x) {
return (x - (wj * x)) / (wj + 1.0);
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (x - (wj * x)) / (wj + 1.0d0)
end function
public static double code(double wj, double x) {
return (x - (wj * x)) / (wj + 1.0);
}
def code(wj, x): return (x - (wj * x)) / (wj + 1.0)
function code(wj, x) return Float64(Float64(x - Float64(wj * x)) / Float64(wj + 1.0)) end
function tmp = code(wj, x) tmp = (x - (wj * x)) / (wj + 1.0); end
code[wj_, x_] := N[(N[(x - N[(wj * x), $MachinePrecision]), $MachinePrecision] / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - wj \cdot x}{wj + 1}
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 80.7%
associate-*r*80.7%
neg-mul-180.7%
distribute-lft1-in80.7%
+-commutative80.7%
sub-neg80.7%
Simplified80.7%
Taylor expanded in x around -inf 85.7%
Taylor expanded in wj around 0 85.7%
Final simplification85.7%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + -2 \cdot \left(wj \cdot x\right)
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 85.6%
Final simplification85.6%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around inf 3.8%
Final simplification3.8%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 82.0%
sub-neg82.0%
div-sub82.0%
sub-neg82.0%
+-commutative82.0%
distribute-neg-in82.0%
remove-double-neg82.0%
sub-neg82.0%
div-sub82.0%
distribute-rgt1-in82.8%
associate-/l/82.8%
Simplified82.8%
Taylor expanded in wj around 0 84.9%
Final simplification84.9%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2023174
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:herbie-target
(- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))