
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Initial program 99.7%
Final simplification99.7%
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l): return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l) return Float64((l ^ exp(w)) / exp(w)) end
function tmp = code(w, l) tmp = (l ^ exp(w)) / exp(w); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}
\end{array}
Initial program 99.7%
exp-neg99.7%
associate-*l/99.7%
*-lft-identity99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (w l) :precision binary64 (* (exp (- w)) l))
double code(double w, double l) {
return exp(-w) * l;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * l
end function
public static double code(double w, double l) {
return Math.exp(-w) * l;
}
def code(w, l): return math.exp(-w) * l
function code(w, l) return Float64(exp(Float64(-w)) * l) end
function tmp = code(w, l) tmp = exp(-w) * l; end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * l), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot \ell
\end{array}
Initial program 99.7%
Taylor expanded in w around 0 98.2%
Final simplification98.2%
(FPCore (w l) :precision binary64 (/ l (exp w)))
double code(double w, double l) {
return l / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l / exp(w)
end function
public static double code(double w, double l) {
return l / Math.exp(w);
}
def code(w, l): return l / math.exp(w)
function code(w, l) return Float64(l / exp(w)) end
function tmp = code(w, l) tmp = l / exp(w); end
code[w_, l_] := N[(l / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{e^{w}}
\end{array}
Initial program 99.7%
exp-neg99.7%
associate-*l/99.7%
*-lft-identity99.7%
Simplified99.7%
Taylor expanded in w around 0 98.2%
Final simplification98.2%
(FPCore (w l) :precision binary64 (if (<= w -0.01) (* (- w) l) l))
double code(double w, double l) {
double tmp;
if (w <= -0.01) {
tmp = -w * l;
} else {
tmp = l;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-0.01d0)) then
tmp = -w * l
else
tmp = l
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -0.01) {
tmp = -w * l;
} else {
tmp = l;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -0.01: tmp = -w * l else: tmp = l return tmp
function code(w, l) tmp = 0.0 if (w <= -0.01) tmp = Float64(Float64(-w) * l); else tmp = l; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -0.01) tmp = -w * l; else tmp = l; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -0.01], N[((-w) * l), $MachinePrecision], l]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -0.01:\\
\;\;\;\;\left(-w\right) \cdot \ell\\
\mathbf{else}:\\
\;\;\;\;\ell\\
\end{array}
\end{array}
if w < -0.0100000000000000002Initial program 99.9%
Taylor expanded in w around 0 97.3%
Taylor expanded in w around 0 31.7%
mul-1-neg31.7%
unsub-neg31.7%
Simplified31.7%
Taylor expanded in w around inf 31.7%
associate-*r*31.7%
neg-mul-131.7%
Simplified31.7%
if -0.0100000000000000002 < w Initial program 99.6%
Taylor expanded in w around 0 98.5%
Taylor expanded in w around 0 81.7%
Final simplification68.8%
(FPCore (w l) :precision binary64 (* l (- 1.0 w)))
double code(double w, double l) {
return l * (1.0 - w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l * (1.0d0 - w)
end function
public static double code(double w, double l) {
return l * (1.0 - w);
}
def code(w, l): return l * (1.0 - w)
function code(w, l) return Float64(l * Float64(1.0 - w)) end
function tmp = code(w, l) tmp = l * (1.0 - w); end
code[w_, l_] := N[(l * N[(1.0 - w), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell \cdot \left(1 - w\right)
\end{array}
Initial program 99.7%
exp-neg99.7%
associate-*l/99.7%
*-lft-identity99.7%
Simplified99.7%
Taylor expanded in w around 0 98.2%
Taylor expanded in w around 0 68.5%
*-rgt-identity68.5%
mul-1-neg68.5%
distribute-rgt-neg-out68.5%
distribute-lft-in68.5%
sub-neg68.5%
Simplified68.5%
Final simplification68.5%
(FPCore (w l) :precision binary64 l)
double code(double w, double l) {
return l;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l
end function
public static double code(double w, double l) {
return l;
}
def code(w, l): return l
function code(w, l) return l end
function tmp = code(w, l) tmp = l; end
code[w_, l_] := l
\begin{array}{l}
\\
\ell
\end{array}
Initial program 99.7%
Taylor expanded in w around 0 98.2%
Taylor expanded in w around 0 61.6%
Final simplification61.6%
herbie shell --seed 2023334
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))