Jmat.Real.lambertw, newton loop step

?

Percentage Accurate: 77.6% → 97.8%
Time: 8.7s
Precision: binary64
Cost: 7812

?

\[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
\[\begin{array}{l} \mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\ \;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\ \mathbf{else}:\\ \;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\ \end{array} \]
(FPCore (wj x)
 :precision binary64
 (- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))
(FPCore (wj x)
 :precision binary64
 (if (<= wj 1.1e-13)
   (+
    (* (- 1.0 (+ (* x -4.0) (* x 1.5))) (pow wj 2.0))
    (+ x (* -2.0 (* x wj))))
   (+ wj (/ (- (/ x (exp wj)) wj) (+ 1.0 wj)))))
double code(double wj, double x) {
	return wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))));
}
double code(double wj, double x) {
	double tmp;
	if (wj <= 1.1e-13) {
		tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * pow(wj, 2.0)) + (x + (-2.0 * (x * wj)));
	} else {
		tmp = wj + (((x / exp(wj)) - wj) / (1.0 + wj));
	}
	return tmp;
}
real(8) function code(wj, x)
    real(8), intent (in) :: wj
    real(8), intent (in) :: x
    code = wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))))
end function
real(8) function code(wj, x)
    real(8), intent (in) :: wj
    real(8), intent (in) :: x
    real(8) :: tmp
    if (wj <= 1.1d-13) then
        tmp = ((1.0d0 - ((x * (-4.0d0)) + (x * 1.5d0))) * (wj ** 2.0d0)) + (x + ((-2.0d0) * (x * wj)))
    else
        tmp = wj + (((x / exp(wj)) - wj) / (1.0d0 + wj))
    end if
    code = tmp
end function
public static double code(double wj, double x) {
	return wj - (((wj * Math.exp(wj)) - x) / (Math.exp(wj) + (wj * Math.exp(wj))));
}
public static double code(double wj, double x) {
	double tmp;
	if (wj <= 1.1e-13) {
		tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * Math.pow(wj, 2.0)) + (x + (-2.0 * (x * wj)));
	} else {
		tmp = wj + (((x / Math.exp(wj)) - wj) / (1.0 + wj));
	}
	return tmp;
}
def code(wj, x):
	return wj - (((wj * math.exp(wj)) - x) / (math.exp(wj) + (wj * math.exp(wj))))
def code(wj, x):
	tmp = 0
	if wj <= 1.1e-13:
		tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * math.pow(wj, 2.0)) + (x + (-2.0 * (x * wj)))
	else:
		tmp = wj + (((x / math.exp(wj)) - wj) / (1.0 + wj))
	return tmp
function code(wj, x)
	return Float64(wj - Float64(Float64(Float64(wj * exp(wj)) - x) / Float64(exp(wj) + Float64(wj * exp(wj)))))
end
function code(wj, x)
	tmp = 0.0
	if (wj <= 1.1e-13)
		tmp = Float64(Float64(Float64(1.0 - Float64(Float64(x * -4.0) + Float64(x * 1.5))) * (wj ^ 2.0)) + Float64(x + Float64(-2.0 * Float64(x * wj))));
	else
		tmp = Float64(wj + Float64(Float64(Float64(x / exp(wj)) - wj) / Float64(1.0 + wj)));
	end
	return tmp
end
function tmp = code(wj, x)
	tmp = wj - (((wj * exp(wj)) - x) / (exp(wj) + (wj * exp(wj))));
end
function tmp_2 = code(wj, x)
	tmp = 0.0;
	if (wj <= 1.1e-13)
		tmp = ((1.0 - ((x * -4.0) + (x * 1.5))) * (wj ^ 2.0)) + (x + (-2.0 * (x * wj)));
	else
		tmp = wj + (((x / exp(wj)) - wj) / (1.0 + wj));
	end
	tmp_2 = tmp;
end
code[wj_, x_] := N[(wj - N[(N[(N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
code[wj_, x_] := If[LessEqual[wj, 1.1e-13], N[(N[(N[(1.0 - N[(N[(x * -4.0), $MachinePrecision] + N[(x * 1.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[wj, 2.0], $MachinePrecision]), $MachinePrecision] + N[(x + N[(-2.0 * N[(x * wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(wj + N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] - wj), $MachinePrecision] / N[(1.0 + wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}
\begin{array}{l}
\mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\
\;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\

\mathbf{else}:\\
\;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\


\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Herbie found 7 alternatives:

AlternativeAccuracySpeedup

Accuracy vs Speed

The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Bogosity?

Bogosity

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original77.6%
Target78.7%
Herbie97.8%
\[wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right) \]

Derivation?

  1. Split input into 2 regimes
  2. if wj < 1.09999999999999998e-13

    1. Initial program 80.1%

      \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
    2. Simplified81.4%

      \[\leadsto \color{blue}{wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}} \]
      Step-by-step derivation

      [Start]80.1%

      \[ wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]

      sub-neg [=>]80.1%

      \[ \color{blue}{wj + \left(-\frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}\right)} \]

      div-sub [=>]80.1%

      \[ wj + \left(-\color{blue}{\left(\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)}\right) \]

      sub-neg [=>]80.1%

      \[ wj + \left(-\color{blue}{\left(\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}} + \left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right)\right)}\right) \]

      +-commutative [=>]80.1%

      \[ wj + \left(-\color{blue}{\left(\left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right) + \frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)}\right) \]

      distribute-neg-in [=>]80.1%

      \[ wj + \color{blue}{\left(\left(-\left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right)\right) + \left(-\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)\right)} \]

      remove-double-neg [=>]80.1%

      \[ wj + \left(\color{blue}{\frac{x}{e^{wj} + wj \cdot e^{wj}}} + \left(-\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)\right) \]

      sub-neg [<=]80.1%

      \[ wj + \color{blue}{\left(\frac{x}{e^{wj} + wj \cdot e^{wj}} - \frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)} \]

      div-sub [<=]80.1%

      \[ wj + \color{blue}{\frac{x - wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]

      distribute-rgt1-in [=>]81.4%

      \[ wj + \frac{x - wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]

      associate-/l/ [<=]81.4%

      \[ wj + \color{blue}{\frac{\frac{x - wj \cdot e^{wj}}{e^{wj}}}{wj + 1}} \]
    3. Taylor expanded in wj around 0 98.5%

      \[\leadsto \color{blue}{\left(1 - \left(-4 \cdot x + 1.5 \cdot x\right)\right) \cdot {wj}^{2} + \left(-2 \cdot \left(wj \cdot x\right) + x\right)} \]

    if 1.09999999999999998e-13 < wj

    1. Initial program 87.3%

      \[wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]
    2. Simplified99.0%

      \[\leadsto \color{blue}{wj + \frac{\frac{x}{e^{wj}} - wj}{wj + 1}} \]
      Step-by-step derivation

      [Start]87.3%

      \[ wj - \frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}} \]

      sub-neg [=>]87.3%

      \[ \color{blue}{wj + \left(-\frac{wj \cdot e^{wj} - x}{e^{wj} + wj \cdot e^{wj}}\right)} \]

      div-sub [=>]87.3%

      \[ wj + \left(-\color{blue}{\left(\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)}\right) \]

      sub-neg [=>]87.3%

      \[ wj + \left(-\color{blue}{\left(\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}} + \left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right)\right)}\right) \]

      +-commutative [=>]87.3%

      \[ wj + \left(-\color{blue}{\left(\left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right) + \frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)}\right) \]

      distribute-neg-in [=>]87.3%

      \[ wj + \color{blue}{\left(\left(-\left(-\frac{x}{e^{wj} + wj \cdot e^{wj}}\right)\right) + \left(-\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)\right)} \]

      remove-double-neg [=>]87.3%

      \[ wj + \left(\color{blue}{\frac{x}{e^{wj} + wj \cdot e^{wj}}} + \left(-\frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)\right) \]

      sub-neg [<=]87.3%

      \[ wj + \color{blue}{\left(\frac{x}{e^{wj} + wj \cdot e^{wj}} - \frac{wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}\right)} \]

      div-sub [<=]87.3%

      \[ wj + \color{blue}{\frac{x - wj \cdot e^{wj}}{e^{wj} + wj \cdot e^{wj}}} \]

      distribute-rgt1-in [=>]87.9%

      \[ wj + \frac{x - wj \cdot e^{wj}}{\color{blue}{\left(wj + 1\right) \cdot e^{wj}}} \]

      associate-/l/ [<=]87.9%

      \[ wj + \color{blue}{\frac{\frac{x - wj \cdot e^{wj}}{e^{wj}}}{wj + 1}} \]
  3. Recombined 2 regimes into one program.
  4. Final simplification98.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\ \;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\ \mathbf{else}:\\ \;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\ \end{array} \]

Alternatives

Alternative 1
Accuracy97.8%
Cost7812
\[\begin{array}{l} \mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\ \;\;\;\;\left(1 - \left(x \cdot -4 + x \cdot 1.5\right)\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\\ \mathbf{else}:\\ \;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\ \end{array} \]
Alternative 2
Accuracy96.5%
Cost15552
\[\begin{array}{l} t_0 := x \cdot -4 + x \cdot 1.5\\ {wj}^{3} \cdot \left(\left(\left(-1 - -2 \cdot t_0\right) - x \cdot -3\right) - 0.6666666666666666 \cdot x\right) + \left(\left(1 - t_0\right) \cdot {wj}^{2} + \left(x + -2 \cdot \left(x \cdot wj\right)\right)\right) \end{array} \]
Alternative 3
Accuracy97.7%
Cost7236
\[\begin{array}{l} \mathbf{if}\;wj \leq 1.1 \cdot 10^{-13}:\\ \;\;\;\;\left(x + -2 \cdot \left(x \cdot wj\right)\right) + wj \cdot wj\\ \mathbf{else}:\\ \;\;\;\;wj + \frac{\frac{x}{e^{wj}} - wj}{1 + wj}\\ \end{array} \]
Alternative 4
Accuracy95.9%
Cost704
\[\left(x + -2 \cdot \left(x \cdot wj\right)\right) + wj \cdot wj \]
Alternative 5
Accuracy95.3%
Cost320
\[x + wj \cdot wj \]
Alternative 6
Accuracy4.4%
Cost64
\[wj \]
Alternative 7
Accuracy83.9%
Cost64
\[x \]

Reproduce?

herbie shell --seed 2023178 
(FPCore (wj x)
  :name "Jmat.Real.lambertw, newton loop step"
  :precision binary64

  :herbie-target
  (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj))))))

  (- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))