
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Initial program 99.8%
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l): return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l) return Float64((l ^ exp(w)) / exp(w)) end
function tmp = code(w, l) tmp = (l ^ exp(w)) / exp(w); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
(FPCore (w l) :precision binary64 (if (or (<= w -0.7) (not (<= w 580.0))) (exp (- w)) (- l (* w (+ l (* w (* l -0.5)))))))
double code(double w, double l) {
double tmp;
if ((w <= -0.7) || !(w <= 580.0)) {
tmp = exp(-w);
} else {
tmp = l - (w * (l + (w * (l * -0.5))));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if ((w <= (-0.7d0)) .or. (.not. (w <= 580.0d0))) then
tmp = exp(-w)
else
tmp = l - (w * (l + (w * (l * (-0.5d0)))))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if ((w <= -0.7) || !(w <= 580.0)) {
tmp = Math.exp(-w);
} else {
tmp = l - (w * (l + (w * (l * -0.5))));
}
return tmp;
}
def code(w, l): tmp = 0 if (w <= -0.7) or not (w <= 580.0): tmp = math.exp(-w) else: tmp = l - (w * (l + (w * (l * -0.5)))) return tmp
function code(w, l) tmp = 0.0 if ((w <= -0.7) || !(w <= 580.0)) tmp = exp(Float64(-w)); else tmp = Float64(l - Float64(w * Float64(l + Float64(w * Float64(l * -0.5))))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if ((w <= -0.7) || ~((w <= 580.0))) tmp = exp(-w); else tmp = l - (w * (l + (w * (l * -0.5)))); end tmp_2 = tmp; end
code[w_, l_] := If[Or[LessEqual[w, -0.7], N[Not[LessEqual[w, 580.0]], $MachinePrecision]], N[Exp[(-w)], $MachinePrecision], N[(l - N[(w * N[(l + N[(w * N[(l * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -0.7 \lor \neg \left(w \leq 580\right):\\
\;\;\;\;e^{-w}\\
\mathbf{else}:\\
\;\;\;\;\ell - w \cdot \left(\ell + w \cdot \left(\ell \cdot -0.5\right)\right)\\
\end{array}
\end{array}
if w < -0.69999999999999996 or 580 < w Initial program 100.0%
exp-neg100.0%
remove-double-neg100.0%
associate-*l/100.0%
*-lft-identity100.0%
remove-double-neg100.0%
Simplified100.0%
add-exp-log100.0%
pow1100.0%
log-pow100.0%
exp-prod99.9%
exp-1-e99.9%
log-div99.9%
log-pow99.9%
add-log-exp99.9%
fma-neg99.9%
Applied egg-rr99.9%
Taylor expanded in w around inf 99.9%
mul-1-neg99.9%
Simplified99.9%
Taylor expanded in w around inf 100.0%
exp-prod99.9%
log-E99.9%
*-rgt-identity99.9%
exp-prod100.0%
neg-mul-1100.0%
Simplified100.0%
if -0.69999999999999996 < w < 580Initial program 99.7%
exp-neg99.7%
remove-double-neg99.7%
associate-*l/99.7%
*-lft-identity99.7%
remove-double-neg99.7%
Simplified99.7%
Taylor expanded in w around 0 96.4%
Taylor expanded in w around 0 96.4%
associate-*r*96.4%
mul-1-neg96.4%
distribute-rgt-out96.4%
metadata-eval96.4%
Simplified96.4%
Final simplification98.0%
(FPCore (w l) :precision binary64 (/ l (exp w)))
double code(double w, double l) {
return l / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l / exp(w)
end function
public static double code(double w, double l) {
return l / Math.exp(w);
}
def code(w, l): return l / math.exp(w)
function code(w, l) return Float64(l / exp(w)) end
function tmp = code(w, l) tmp = l / exp(w); end
code[w_, l_] := N[(l / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{e^{w}}
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.6%
(FPCore (w l)
:precision binary64
(+
l
(*
w
(-
(*
w
(+
(- l (* l 0.5))
(* w (- (- (* l 0.5) l) (+ (* l -0.5) (* l 0.16666666666666666))))))
l))))
double code(double w, double l) {
return l + (w * ((w * ((l - (l * 0.5)) + (w * (((l * 0.5) - l) - ((l * -0.5) + (l * 0.16666666666666666)))))) - l));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l + (w * ((w * ((l - (l * 0.5d0)) + (w * (((l * 0.5d0) - l) - ((l * (-0.5d0)) + (l * 0.16666666666666666d0)))))) - l))
end function
public static double code(double w, double l) {
return l + (w * ((w * ((l - (l * 0.5)) + (w * (((l * 0.5) - l) - ((l * -0.5) + (l * 0.16666666666666666)))))) - l));
}
def code(w, l): return l + (w * ((w * ((l - (l * 0.5)) + (w * (((l * 0.5) - l) - ((l * -0.5) + (l * 0.16666666666666666)))))) - l))
function code(w, l) return Float64(l + Float64(w * Float64(Float64(w * Float64(Float64(l - Float64(l * 0.5)) + Float64(w * Float64(Float64(Float64(l * 0.5) - l) - Float64(Float64(l * -0.5) + Float64(l * 0.16666666666666666)))))) - l))) end
function tmp = code(w, l) tmp = l + (w * ((w * ((l - (l * 0.5)) + (w * (((l * 0.5) - l) - ((l * -0.5) + (l * 0.16666666666666666)))))) - l)); end
code[w_, l_] := N[(l + N[(w * N[(N[(w * N[(N[(l - N[(l * 0.5), $MachinePrecision]), $MachinePrecision] + N[(w * N[(N[(N[(l * 0.5), $MachinePrecision] - l), $MachinePrecision] - N[(N[(l * -0.5), $MachinePrecision] + N[(l * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell + w \cdot \left(w \cdot \left(\left(\ell - \ell \cdot 0.5\right) + w \cdot \left(\left(\ell \cdot 0.5 - \ell\right) - \left(\ell \cdot -0.5 + \ell \cdot 0.16666666666666666\right)\right)\right) - \ell\right)
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.6%
Taylor expanded in w around 0 76.8%
Final simplification76.8%
(FPCore (w l) :precision binary64 (if (<= w -1.9e+146) (+ 1.0 (* w (+ -1.0 (* w 0.5)))) (+ l (* w (* l (* w 0.5))))))
double code(double w, double l) {
double tmp;
if (w <= -1.9e+146) {
tmp = 1.0 + (w * (-1.0 + (w * 0.5)));
} else {
tmp = l + (w * (l * (w * 0.5)));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.9d+146)) then
tmp = 1.0d0 + (w * ((-1.0d0) + (w * 0.5d0)))
else
tmp = l + (w * (l * (w * 0.5d0)))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.9e+146) {
tmp = 1.0 + (w * (-1.0 + (w * 0.5)));
} else {
tmp = l + (w * (l * (w * 0.5)));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.9e+146: tmp = 1.0 + (w * (-1.0 + (w * 0.5))) else: tmp = l + (w * (l * (w * 0.5))) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.9e+146) tmp = Float64(1.0 + Float64(w * Float64(-1.0 + Float64(w * 0.5)))); else tmp = Float64(l + Float64(w * Float64(l * Float64(w * 0.5)))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.9e+146) tmp = 1.0 + (w * (-1.0 + (w * 0.5))); else tmp = l + (w * (l * (w * 0.5))); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.9e+146], N[(1.0 + N[(w * N[(-1.0 + N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(l + N[(w * N[(l * N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.9 \cdot 10^{+146}:\\
\;\;\;\;1 + w \cdot \left(-1 + w \cdot 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\ell + w \cdot \left(\ell \cdot \left(w \cdot 0.5\right)\right)\\
\end{array}
\end{array}
if w < -1.8999999999999999e146Initial program 100.0%
exp-neg100.0%
remove-double-neg100.0%
associate-*l/100.0%
*-lft-identity100.0%
remove-double-neg100.0%
Simplified100.0%
add-exp-log100.0%
pow1100.0%
log-pow100.0%
exp-prod100.0%
exp-1-e100.0%
log-div100.0%
log-pow100.0%
add-log-exp100.0%
fma-neg100.0%
Applied egg-rr100.0%
Taylor expanded in w around inf 100.0%
mul-1-neg100.0%
Simplified100.0%
Taylor expanded in w around 0 95.8%
log-E95.8%
metadata-eval95.8%
associate-*r*95.8%
log-E95.8%
metadata-eval95.8%
associate-*r*95.8%
Simplified95.8%
if -1.8999999999999999e146 < w Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.1%
Taylor expanded in w around 0 70.5%
associate-*r*70.5%
mul-1-neg70.5%
distribute-rgt-out70.5%
metadata-eval70.5%
Simplified70.5%
Taylor expanded in w around inf 70.5%
*-commutative70.5%
associate-*l*70.5%
Simplified70.5%
Final simplification74.7%
(FPCore (w l) :precision binary64 (+ l (* l (* w (+ -1.0 (* w 0.5))))))
double code(double w, double l) {
return l + (l * (w * (-1.0 + (w * 0.5))));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l + (l * (w * ((-1.0d0) + (w * 0.5d0))))
end function
public static double code(double w, double l) {
return l + (l * (w * (-1.0 + (w * 0.5))));
}
def code(w, l): return l + (l * (w * (-1.0 + (w * 0.5))))
function code(w, l) return Float64(l + Float64(l * Float64(w * Float64(-1.0 + Float64(w * 0.5))))) end
function tmp = code(w, l) tmp = l + (l * (w * (-1.0 + (w * 0.5)))); end
code[w_, l_] := N[(l + N[(l * N[(w * N[(-1.0 + N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell + \ell \cdot \left(w \cdot \left(-1 + w \cdot 0.5\right)\right)
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.6%
Taylor expanded in w around 0 71.3%
associate-*r*71.3%
mul-1-neg71.3%
distribute-rgt-out71.3%
metadata-eval71.3%
Simplified71.3%
Taylor expanded in l around 0 74.6%
Final simplification74.6%
(FPCore (w l) :precision binary64 (if (<= w -0.7) (- 1.0 w) (+ l (* w l))))
double code(double w, double l) {
double tmp;
if (w <= -0.7) {
tmp = 1.0 - w;
} else {
tmp = l + (w * l);
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-0.7d0)) then
tmp = 1.0d0 - w
else
tmp = l + (w * l)
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -0.7) {
tmp = 1.0 - w;
} else {
tmp = l + (w * l);
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -0.7: tmp = 1.0 - w else: tmp = l + (w * l) return tmp
function code(w, l) tmp = 0.0 if (w <= -0.7) tmp = Float64(1.0 - w); else tmp = Float64(l + Float64(w * l)); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -0.7) tmp = 1.0 - w; else tmp = l + (w * l); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -0.7], N[(1.0 - w), $MachinePrecision], N[(l + N[(w * l), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -0.7:\\
\;\;\;\;1 - w\\
\mathbf{else}:\\
\;\;\;\;\ell + w \cdot \ell\\
\end{array}
\end{array}
if w < -0.69999999999999996Initial program 100.0%
exp-neg100.0%
remove-double-neg100.0%
associate-*l/100.0%
*-lft-identity100.0%
remove-double-neg100.0%
Simplified100.0%
add-exp-log100.0%
pow1100.0%
log-pow100.0%
exp-prod99.9%
exp-1-e99.9%
log-div99.9%
log-pow99.9%
add-log-exp99.9%
fma-neg99.9%
Applied egg-rr99.9%
Taylor expanded in w around inf 99.9%
mul-1-neg99.9%
Simplified99.9%
Taylor expanded in w around 0 5.5%
mul-1-neg5.5%
log-E5.5%
metadata-eval5.5%
log-E5.5%
unsub-neg5.5%
log-E5.5%
metadata-eval5.5%
*-rgt-identity5.5%
Simplified5.5%
if -0.69999999999999996 < w Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.0%
add-log-exp25.1%
div-inv25.1%
rec-exp25.1%
add-sqr-sqrt3.8%
sqrt-unprod8.2%
sqr-neg8.2%
sqrt-unprod4.4%
add-sqr-sqrt8.2%
Applied egg-rr8.2%
Taylor expanded in w around 0 80.5%
*-commutative80.5%
Simplified80.5%
(FPCore (w l) :precision binary64 (+ l (* w (* l (* w 0.5)))))
double code(double w, double l) {
return l + (w * (l * (w * 0.5)));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l + (w * (l * (w * 0.5d0)))
end function
public static double code(double w, double l) {
return l + (w * (l * (w * 0.5)));
}
def code(w, l): return l + (w * (l * (w * 0.5)))
function code(w, l) return Float64(l + Float64(w * Float64(l * Float64(w * 0.5)))) end
function tmp = code(w, l) tmp = l + (w * (l * (w * 0.5))); end
code[w_, l_] := N[(l + N[(w * N[(l * N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell + w \cdot \left(\ell \cdot \left(w \cdot 0.5\right)\right)
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.6%
Taylor expanded in w around 0 71.3%
associate-*r*71.3%
mul-1-neg71.3%
distribute-rgt-out71.3%
metadata-eval71.3%
Simplified71.3%
Taylor expanded in w around inf 71.3%
*-commutative71.3%
associate-*l*71.3%
Simplified71.3%
(FPCore (w l) :precision binary64 (- l (* w l)))
double code(double w, double l) {
return l - (w * l);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l - (w * l)
end function
public static double code(double w, double l) {
return l - (w * l);
}
def code(w, l): return l - (w * l)
function code(w, l) return Float64(l - Float64(w * l)) end
function tmp = code(w, l) tmp = l - (w * l); end
code[w_, l_] := N[(l - N[(w * l), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\ell - w \cdot \ell
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
Taylor expanded in w around 0 97.6%
Taylor expanded in w around 0 63.9%
mul-1-neg63.9%
*-commutative63.9%
unsub-neg63.9%
*-commutative63.9%
Simplified63.9%
Final simplification63.9%
(FPCore (w l) :precision binary64 (- 1.0 w))
double code(double w, double l) {
return 1.0 - w;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 1.0d0 - w
end function
public static double code(double w, double l) {
return 1.0 - w;
}
def code(w, l): return 1.0 - w
function code(w, l) return Float64(1.0 - w) end
function tmp = code(w, l) tmp = 1.0 - w; end
code[w_, l_] := N[(1.0 - w), $MachinePrecision]
\begin{array}{l}
\\
1 - w
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
add-exp-log95.2%
pow195.2%
log-pow95.2%
exp-prod94.6%
exp-1-e94.6%
log-div94.6%
log-pow94.6%
add-log-exp94.6%
fma-neg94.6%
Applied egg-rr94.6%
Taylor expanded in w around inf 46.5%
mul-1-neg46.5%
Simplified46.5%
Taylor expanded in w around 0 5.1%
mul-1-neg5.1%
log-E5.1%
metadata-eval5.1%
log-E5.1%
unsub-neg5.1%
log-E5.1%
metadata-eval5.1%
*-rgt-identity5.1%
Simplified5.1%
(FPCore (w l) :precision binary64 1.0)
double code(double w, double l) {
return 1.0;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 1.0d0
end function
public static double code(double w, double l) {
return 1.0;
}
def code(w, l): return 1.0
function code(w, l) return 1.0 end
function tmp = code(w, l) tmp = 1.0; end
code[w_, l_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.8%
exp-neg99.8%
remove-double-neg99.8%
associate-*l/99.8%
*-lft-identity99.8%
remove-double-neg99.8%
Simplified99.8%
add-exp-log95.2%
pow195.2%
log-pow95.2%
exp-prod94.6%
exp-1-e94.6%
log-div94.6%
log-pow94.6%
add-log-exp94.6%
fma-neg94.6%
Applied egg-rr94.6%
Taylor expanded in w around inf 46.5%
mul-1-neg46.5%
Simplified46.5%
Taylor expanded in w around 0 4.5%
herbie shell --seed 2024101
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))