
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Initial program 99.8%
Final simplification99.8%
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l): return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l) return Float64((l ^ exp(w)) / exp(w)) end
function tmp = code(w, l) tmp = (l ^ exp(w)) / exp(w); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}
\end{array}
Initial program 99.8%
exp-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (w l) :precision binary64 (if (or (<= w -0.7) (not (<= w 1.05))) (exp (- w)) (- l (* w l))))
double code(double w, double l) {
double tmp;
if ((w <= -0.7) || !(w <= 1.05)) {
tmp = exp(-w);
} else {
tmp = l - (w * l);
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if ((w <= (-0.7d0)) .or. (.not. (w <= 1.05d0))) then
tmp = exp(-w)
else
tmp = l - (w * l)
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if ((w <= -0.7) || !(w <= 1.05)) {
tmp = Math.exp(-w);
} else {
tmp = l - (w * l);
}
return tmp;
}
def code(w, l): tmp = 0 if (w <= -0.7) or not (w <= 1.05): tmp = math.exp(-w) else: tmp = l - (w * l) return tmp
function code(w, l) tmp = 0.0 if ((w <= -0.7) || !(w <= 1.05)) tmp = exp(Float64(-w)); else tmp = Float64(l - Float64(w * l)); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if ((w <= -0.7) || ~((w <= 1.05))) tmp = exp(-w); else tmp = l - (w * l); end tmp_2 = tmp; end
code[w_, l_] := If[Or[LessEqual[w, -0.7], N[Not[LessEqual[w, 1.05]], $MachinePrecision]], N[Exp[(-w)], $MachinePrecision], N[(l - N[(w * l), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -0.7 \lor \neg \left(w \leq 1.05\right):\\
\;\;\;\;e^{-w}\\
\mathbf{else}:\\
\;\;\;\;\ell - w \cdot \ell\\
\end{array}
\end{array}
if w < -0.69999999999999996 or 1.05000000000000004 < w Initial program 100.0%
exp-neg100.0%
associate-*l/100.0%
*-lft-identity100.0%
Simplified100.0%
Taylor expanded in l around 0 100.0%
Taylor expanded in l around inf 100.0%
div-exp100.0%
mul-1-neg100.0%
log-rec100.0%
Simplified100.0%
Taylor expanded in w around inf 100.0%
neg-mul-1100.0%
Simplified100.0%
if -0.69999999999999996 < w < 1.05000000000000004Initial program 99.6%
exp-neg99.6%
associate-*l/99.6%
*-lft-identity99.6%
Simplified99.6%
Taylor expanded in w around 0 98.2%
Taylor expanded in w around 0 98.2%
Final simplification98.9%
(FPCore (w l) :precision binary64 (/ l (exp w)))
double code(double w, double l) {
return l / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l / exp(w)
end function
public static double code(double w, double l) {
return l / Math.exp(w);
}
def code(w, l): return l / math.exp(w)
function code(w, l) return Float64(l / exp(w)) end
function tmp = code(w, l) tmp = l / exp(w); end
code[w_, l_] := N[(l / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{e^{w}}
\end{array}
Initial program 99.8%
exp-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
Simplified99.8%
Taylor expanded in w around 0 98.5%
Final simplification98.5%
(FPCore (w l) :precision binary64 (- l (* w l)))
double code(double w, double l) {
return l - (w * l);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l - (w * l)
end function
public static double code(double w, double l) {
return l - (w * l);
}
def code(w, l): return l - (w * l)
function code(w, l) return Float64(l - Float64(w * l)) end
function tmp = code(w, l) tmp = l - (w * l); end
code[w_, l_] := N[(l - N[(w * l), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell - w \cdot \ell
\end{array}
Initial program 99.8%
exp-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
Simplified99.8%
Taylor expanded in w around 0 98.5%
Taylor expanded in w around 0 67.3%
Final simplification67.3%
(FPCore (w l) :precision binary64 l)
double code(double w, double l) {
return l;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l
end function
public static double code(double w, double l) {
return l;
}
def code(w, l): return l
function code(w, l) return l end
function tmp = code(w, l) tmp = l; end
code[w_, l_] := l
\begin{array}{l}
\\
\ell
\end{array}
Initial program 99.8%
exp-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
Simplified99.8%
Taylor expanded in w around 0 98.5%
Taylor expanded in w around 0 62.3%
Final simplification62.3%
herbie shell --seed 2023275
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))