
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -5.5e-6)
(fma (/ -1.0 (+ 1.0 wj)) (/ (- (* (exp wj) wj) x) (exp wj)) wj)
(fma
(fma (fma (fma -2.6666666666666665 wj 2.5) x (- 1.0 wj)) wj (* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -5.5e-6) {
tmp = fma((-1.0 / (1.0 + wj)), (((exp(wj) * wj) - x) / exp(wj)), wj);
} else {
tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, (1.0 - wj)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -5.5e-6) tmp = fma(Float64(-1.0 / Float64(1.0 + wj)), Float64(Float64(Float64(exp(wj) * wj) - x) / exp(wj)), wj); else tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, Float64(1.0 - wj)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -5.5e-6], N[(N[(-1.0 / N[(1.0 + wj), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision] - x), $MachinePrecision] / N[Exp[wj], $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision], N[(N[(N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] * x + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -5.5 \cdot 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(\frac{-1}{1 + wj}, \frac{e^{wj} \cdot wj - x}{e^{wj}}, wj\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), x, 1 - wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -5.4999999999999999e-6Initial program 74.4%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-/.f64N/A
distribute-neg-fracN/A
neg-mul-1N/A
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
times-fracN/A
lower-fma.f64N/A
Applied rewrites99.6%
if -5.4999999999999999e-6 < wj Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj))) (t_1 (- wj (/ (- t_0 x) (+ (exp wj) t_0))))) (if (or (<= t_1 -1e-273) (not (<= t_1 1e-306))) (- wj (- x)) (* wj wj))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = wj - ((t_0 - x) / (exp(wj) + t_0));
double tmp;
if ((t_1 <= -1e-273) || !(t_1 <= 1e-306)) {
tmp = wj - -x;
} else {
tmp = wj * wj;
}
return tmp;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = wj - ((t_0 - x) / (exp(wj) + t_0))
if ((t_1 <= (-1d-273)) .or. (.not. (t_1 <= 1d-306))) then
tmp = wj - -x
else
tmp = wj * wj
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = wj - ((t_0 - x) / (Math.exp(wj) + t_0));
double tmp;
if ((t_1 <= -1e-273) || !(t_1 <= 1e-306)) {
tmp = wj - -x;
} else {
tmp = wj * wj;
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = wj - ((t_0 - x) / (math.exp(wj) + t_0)) tmp = 0 if (t_1 <= -1e-273) or not (t_1 <= 1e-306): tmp = wj - -x else: tmp = wj * wj return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) tmp = 0.0 if ((t_1 <= -1e-273) || !(t_1 <= 1e-306)) tmp = Float64(wj - Float64(-x)); else tmp = Float64(wj * wj); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = wj - ((t_0 - x) / (exp(wj) + t_0)); tmp = 0.0; if ((t_1 <= -1e-273) || ~((t_1 <= 1e-306))) tmp = wj - -x; else tmp = wj * wj; end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -1e-273], N[Not[LessEqual[t$95$1, 1e-306]], $MachinePrecision]], N[(wj - (-x)), $MachinePrecision], N[(wj * wj), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := wj - \frac{t\_0 - x}{e^{wj} + t\_0}\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{-273} \lor \neg \left(t\_1 \leq 10^{-306}\right):\\
\;\;\;\;wj - \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;wj \cdot wj\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -1e-273 or 1.00000000000000003e-306 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 96.0%
Taylor expanded in wj around 0
mul-1-negN/A
lower-neg.f6488.1
Applied rewrites88.1%
if -1e-273 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 1.00000000000000003e-306Initial program 7.0%
Taylor expanded in wj around 0
Applied rewrites100.0%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites46.8%
Final simplification79.5%
(FPCore (wj x)
:precision binary64
(if (<= wj -5.9e-6)
(fma (fma (exp wj) wj (- x)) (* (exp (- wj)) (/ -1.0 (+ 1.0 wj))) wj)
(fma
(fma (fma (fma -2.6666666666666665 wj 2.5) x (- 1.0 wj)) wj (* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -5.9e-6) {
tmp = fma(fma(exp(wj), wj, -x), (exp(-wj) * (-1.0 / (1.0 + wj))), wj);
} else {
tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, (1.0 - wj)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -5.9e-6) tmp = fma(fma(exp(wj), wj, Float64(-x)), Float64(exp(Float64(-wj)) * Float64(-1.0 / Float64(1.0 + wj))), wj); else tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, Float64(1.0 - wj)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -5.9e-6], N[(N[(N[Exp[wj], $MachinePrecision] * wj + (-x)), $MachinePrecision] * N[(N[Exp[(-wj)], $MachinePrecision] * N[(-1.0 / N[(1.0 + wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + wj), $MachinePrecision], N[(N[(N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] * x + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -5.9 \cdot 10^{-6}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(e^{wj}, wj, -x\right), e^{-wj} \cdot \frac{-1}{1 + wj}, wj\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), x, 1 - wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -5.90000000000000026e-6Initial program 74.4%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-/.f64N/A
distribute-neg-fracN/A
neg-mul-1N/A
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
times-fracN/A
lower-fma.f64N/A
Applied rewrites99.6%
lift-fma.f64N/A
*-commutativeN/A
lift-/.f64N/A
div-invN/A
associate-*l*N/A
lower-fma.f64N/A
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lift-neg.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lift-exp.f64N/A
rec-expN/A
lower-exp.f64N/A
lower-neg.f6499.3
Applied rewrites99.3%
if -5.90000000000000026e-6 < wj Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.0068)
(- wj (/ (/ x (- -1.0 wj)) (exp wj)))
(fma
(fma (fma (fma -2.6666666666666665 wj 2.5) x (- 1.0 wj)) wj (* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.0068) {
tmp = wj - ((x / (-1.0 - wj)) / exp(wj));
} else {
tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, (1.0 - wj)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.0068) tmp = Float64(wj - Float64(Float64(x / Float64(-1.0 - wj)) / exp(wj))); else tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, Float64(1.0 - wj)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.0068], N[(wj - N[(N[(x / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] * x + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.0068:\\
\;\;\;\;wj - \frac{\frac{x}{-1 - wj}}{e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), x, 1 - wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -0.00679999999999999962Initial program 74.4%
Taylor expanded in x around inf
mul-1-negN/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6489.5
Applied rewrites89.5%
if -0.00679999999999999962 < wj Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
Final simplification98.8%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.0056)
(/ x (* (+ wj 1.0) (exp wj)))
(fma
(fma (fma (fma -2.6666666666666665 wj 2.5) x (- 1.0 wj)) wj (* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.0056) {
tmp = x / ((wj + 1.0) * exp(wj));
} else {
tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, (1.0 - wj)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.0056) tmp = Float64(x / Float64(Float64(wj + 1.0) * exp(wj))); else tmp = fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, Float64(1.0 - wj)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.0056], N[(x / N[(N[(wj + 1.0), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] * x + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.0056:\\
\;\;\;\;\frac{x}{\left(wj + 1\right) \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), x, 1 - wj\right), wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -0.00559999999999999994Initial program 74.4%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-exp.f64N/A
lower-exp.f6462.6
Applied rewrites62.6%
Applied rewrites87.6%
if -0.00559999999999999994 < wj Initial program 77.7%
Taylor expanded in wj around 0
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (wj x) :precision binary64 (fma (fma (fma (fma -2.6666666666666665 wj 2.5) x (- 1.0 wj)) wj (* -2.0 x)) wj x))
double code(double wj, double x) {
return fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, (1.0 - wj)), wj, (-2.0 * x)), wj, x);
}
function code(wj, x) return fma(fma(fma(fma(-2.6666666666666665, wj, 2.5), x, Float64(1.0 - wj)), wj, Float64(-2.0 * x)), wj, x) end
code[wj_, x_] := N[(N[(N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] * x + N[(1.0 - wj), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right), x, 1 - wj\right), wj, -2 \cdot x\right), wj, x\right)
\end{array}
Initial program 77.6%
Taylor expanded in wj around 0
Applied rewrites96.4%
Taylor expanded in x around 0
Applied rewrites96.4%
(FPCore (wj x) :precision binary64 (fma (fma (fma 2.5 wj -2.0) x wj) wj x))
double code(double wj, double x) {
return fma(fma(fma(2.5, wj, -2.0), x, wj), wj, x);
}
function code(wj, x) return fma(fma(fma(2.5, wj, -2.0), x, wj), wj, x) end
code[wj_, x_] := N[(N[(N[(2.5 * wj + -2.0), $MachinePrecision] * x + wj), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(2.5, wj, -2\right), x, wj\right), wj, x\right)
\end{array}
Initial program 77.6%
Taylor expanded in wj around 0
Applied rewrites96.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.8%
(FPCore (wj x) :precision binary64 (fma (fma -2.0 x wj) wj x))
double code(double wj, double x) {
return fma(fma(-2.0, x, wj), wj, x);
}
function code(wj, x) return fma(fma(-2.0, x, wj), wj, x) end
code[wj_, x_] := N[(N[(-2.0 * x + wj), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(-2, x, wj\right), wj, x\right)
\end{array}
Initial program 77.6%
Taylor expanded in wj around 0
Applied rewrites96.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.8%
Taylor expanded in wj around 0
Applied rewrites95.5%
(FPCore (wj x) :precision binary64 (* (fma -2.0 wj 1.0) x))
double code(double wj, double x) {
return fma(-2.0, wj, 1.0) * x;
}
function code(wj, x) return Float64(fma(-2.0, wj, 1.0) * x) end
code[wj_, x_] := N[(N[(-2.0 * wj + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-2, wj, 1\right) \cdot x
\end{array}
Initial program 77.6%
Taylor expanded in wj around 0
Applied rewrites96.4%
Taylor expanded in wj around 0
associate-*r*N/A
distribute-rgt1-inN/A
lower-*.f64N/A
lower-fma.f6484.5
Applied rewrites84.5%
(FPCore (wj x) :precision binary64 (* wj wj))
double code(double wj, double x) {
return wj * wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj * wj
end function
public static double code(double wj, double x) {
return wj * wj;
}
def code(wj, x): return wj * wj
function code(wj, x) return Float64(wj * wj) end
function tmp = code(wj, x) tmp = wj * wj; end
code[wj_, x_] := N[(wj * wj), $MachinePrecision]
\begin{array}{l}
\\
wj \cdot wj
\end{array}
Initial program 77.6%
Taylor expanded in wj around 0
Applied rewrites96.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.8%
Taylor expanded in x around 0
Applied rewrites13.4%
(FPCore (wj x) :precision binary64 (+ -1.0 wj))
double code(double wj, double x) {
return -1.0 + wj;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = (-1.0d0) + wj
end function
public static double code(double wj, double x) {
return -1.0 + wj;
}
def code(wj, x): return -1.0 + wj
function code(wj, x) return Float64(-1.0 + wj) end
function tmp = code(wj, x) tmp = -1.0 + wj; end
code[wj_, x_] := N[(-1.0 + wj), $MachinePrecision]
\begin{array}{l}
\\
-1 + wj
\end{array}
Initial program 77.6%
Taylor expanded in wj around inf
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
distribute-lft-neg-outN/A
lft-mult-inverseN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f643.4
Applied rewrites3.4%
(FPCore (wj x) :precision binary64 -1.0)
double code(double wj, double x) {
return -1.0;
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double wj, double x) {
return -1.0;
}
def code(wj, x): return -1.0
function code(wj, x) return -1.0 end
function tmp = code(wj, x) tmp = -1.0; end
code[wj_, x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 77.6%
Taylor expanded in wj around inf
sub-negN/A
distribute-rgt-inN/A
*-lft-identityN/A
distribute-lft-neg-outN/A
lft-mult-inverseN/A
metadata-evalN/A
+-commutativeN/A
lower-+.f643.4
Applied rewrites3.4%
Taylor expanded in wj around 0
Applied rewrites3.1%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
real(8) function code(wj, x)
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024313
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))