
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l) :precision binary64 (if (<= w -1.12e-14) (exp (fma (log l) (exp w) (- w))) (* 1.0 (pow l (+ 1.0 w)))))
double code(double w, double l) {
double tmp;
if (w <= -1.12e-14) {
tmp = exp(fma(log(l), exp(w), -w));
} else {
tmp = 1.0 * pow(l, (1.0 + w));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= -1.12e-14) tmp = exp(fma(log(l), exp(w), Float64(-w))); else tmp = Float64(1.0 * (l ^ Float64(1.0 + w))); end return tmp end
code[w_, l_] := If[LessEqual[w, -1.12e-14], N[Exp[N[(N[Log[l], $MachinePrecision] * N[Exp[w], $MachinePrecision] + (-w)), $MachinePrecision]], $MachinePrecision], N[(1.0 * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.12 \cdot 10^{-14}:\\
\;\;\;\;e^{\mathsf{fma}\left(\log \ell, e^{w}, -w\right)}\\
\mathbf{else}:\\
\;\;\;\;1 \cdot {\ell}^{\left(1 + w\right)}\\
\end{array}
\end{array}
if w < -1.12000000000000006e-14Initial program 99.8%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
pow-to-expN/A
lift-exp.f64N/A
prod-expN/A
lower-exp.f64N/A
lower-fma.f64N/A
lower-log.f6499.7
Applied rewrites99.7%
if -1.12000000000000006e-14 < w Initial program 98.7%
Taylor expanded in w around 0
lower-+.f6498.4
Applied rewrites98.4%
Taylor expanded in w around 0
Applied rewrites99.5%
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow (pow l -1.0) -1.0)))
double code(double w, double l) {
return exp(-w) * pow(pow(l, -1.0), -1.0);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * ((l ** (-1.0d0)) ** (-1.0d0))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(Math.pow(l, -1.0), -1.0);
}
def code(w, l): return math.exp(-w) * math.pow(math.pow(l, -1.0), -1.0)
function code(w, l) return Float64(exp(Float64(-w)) * ((l ^ -1.0) ^ -1.0)) end
function tmp = code(w, l) tmp = exp(-w) * ((l ^ -1.0) ^ -1.0); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[N[Power[l, -1.0], $MachinePrecision], -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\left({\ell}^{-1}\right)}^{-1}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Final simplification97.3%
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Initial program 99.0%
(FPCore (w l) :precision binary64 (/ (exp (- w)) (pow l -1.0)))
double code(double w, double l) {
return exp(-w) / pow(l, -1.0);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) / (l ** (-1.0d0))
end function
public static double code(double w, double l) {
return Math.exp(-w) / Math.pow(l, -1.0);
}
def code(w, l): return math.exp(-w) / math.pow(l, -1.0)
function code(w, l) return Float64(exp(Float64(-w)) / (l ^ -1.0)) end
function tmp = code(w, l) tmp = exp(-w) / (l ^ -1.0); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{-w}}{{\ell}^{-1}}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6498.9
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Final simplification97.3%
(FPCore (w l) :precision binary64 (if (<= l 0.41) (* (- 1.0 w) (pow l (+ 1.0 w))) (* (fma (- (* 0.5 w) 1.0) w 1.0) (pow l (fma (fma 0.5 w 1.0) w 1.0)))))
double code(double w, double l) {
double tmp;
if (l <= 0.41) {
tmp = (1.0 - w) * pow(l, (1.0 + w));
} else {
tmp = fma(((0.5 * w) - 1.0), w, 1.0) * pow(l, fma(fma(0.5, w, 1.0), w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 0.41) tmp = Float64(Float64(1.0 - w) * (l ^ Float64(1.0 + w))); else tmp = Float64(fma(Float64(Float64(0.5 * w) - 1.0), w, 1.0) * (l ^ fma(fma(0.5, w, 1.0), w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[l, 0.41], N[(N[(1.0 - w), $MachinePrecision] * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(0.5 * w), $MachinePrecision] - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision] * N[Power[l, N[(N[(0.5 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 0.41:\\
\;\;\;\;\left(1 - w\right) \cdot {\ell}^{\left(1 + w\right)}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.5 \cdot w - 1, w, 1\right) \cdot {\ell}^{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, w, 1\right), w, 1\right)\right)}\\
\end{array}
\end{array}
if l < 0.409999999999999976Initial program 99.8%
Taylor expanded in w around 0
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in w around 0
fp-cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower--.f6498.5
Applied rewrites98.5%
if 0.409999999999999976 < l Initial program 97.9%
Taylor expanded in w around 0
lower-+.f6459.2
Applied rewrites59.2%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f6461.3
Applied rewrites61.3%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.6
Applied rewrites99.6%
(FPCore (w l)
:precision binary64
(if (<= l 0.41)
(* (- 1.0 w) (pow l (+ 1.0 w)))
(/
(fma (- (* (fma -0.16666666666666666 w 0.5) w) 1.0) w 1.0)
(pow l -1.0))))
double code(double w, double l) {
double tmp;
if (l <= 0.41) {
tmp = (1.0 - w) * pow(l, (1.0 + w));
} else {
tmp = fma(((fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / pow(l, -1.0);
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 0.41) tmp = Float64(Float64(1.0 - w) * (l ^ Float64(1.0 + w))); else tmp = Float64(fma(Float64(Float64(fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / (l ^ -1.0)); end return tmp end
code[w_, l_] := If[LessEqual[l, 0.41], N[(N[(1.0 - w), $MachinePrecision] * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w), $MachinePrecision] - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 0.41:\\
\;\;\;\;\left(1 - w\right) \cdot {\ell}^{\left(1 + w\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right) \cdot w - 1, w, 1\right)}{{\ell}^{-1}}\\
\end{array}
\end{array}
if l < 0.409999999999999976Initial program 99.8%
Taylor expanded in w around 0
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in w around 0
fp-cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower--.f6498.5
Applied rewrites98.5%
if 0.409999999999999976 < l Initial program 97.9%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6497.8
Applied rewrites97.8%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6497.8
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6497.8
Applied rewrites97.8%
Taylor expanded in w around 0
lower-/.f6497.0
Applied rewrites97.0%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6491.7
Applied rewrites91.7%
Final simplification95.7%
(FPCore (w l)
:precision binary64
(if (<= l 0.41)
(* 1.0 (pow l (+ 1.0 w)))
(/
(fma (- (* (fma -0.16666666666666666 w 0.5) w) 1.0) w 1.0)
(pow l -1.0))))
double code(double w, double l) {
double tmp;
if (l <= 0.41) {
tmp = 1.0 * pow(l, (1.0 + w));
} else {
tmp = fma(((fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / pow(l, -1.0);
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 0.41) tmp = Float64(1.0 * (l ^ Float64(1.0 + w))); else tmp = Float64(fma(Float64(Float64(fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / (l ^ -1.0)); end return tmp end
code[w_, l_] := If[LessEqual[l, 0.41], N[(1.0 * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w), $MachinePrecision] - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 0.41:\\
\;\;\;\;1 \cdot {\ell}^{\left(1 + w\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right) \cdot w - 1, w, 1\right)}{{\ell}^{-1}}\\
\end{array}
\end{array}
if l < 0.409999999999999976Initial program 99.8%
Taylor expanded in w around 0
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in w around 0
Applied rewrites98.3%
if 0.409999999999999976 < l Initial program 97.9%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6497.8
Applied rewrites97.8%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6497.8
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6497.8
Applied rewrites97.8%
Taylor expanded in w around 0
lower-/.f6497.0
Applied rewrites97.0%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6491.7
Applied rewrites91.7%
Final simplification95.6%
(FPCore (w l) :precision binary64 (/ (fma (- (* (fma -0.16666666666666666 w 0.5) w) 1.0) w 1.0) (pow l -1.0)))
double code(double w, double l) {
return fma(((fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / pow(l, -1.0);
}
function code(w, l) return Float64(fma(Float64(Float64(fma(-0.16666666666666666, w, 0.5) * w) - 1.0), w, 1.0) / (l ^ -1.0)) end
code[w_, l_] := N[(N[(N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w), $MachinePrecision] - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right) \cdot w - 1, w, 1\right)}{{\ell}^{-1}}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6498.9
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6477.5
Applied rewrites77.5%
Final simplification77.5%
(FPCore (w l) :precision binary64 (/ (fma (- (* 0.5 w) 1.0) w 1.0) (pow l -1.0)))
double code(double w, double l) {
return fma(((0.5 * w) - 1.0), w, 1.0) / pow(l, -1.0);
}
function code(w, l) return Float64(fma(Float64(Float64(0.5 * w) - 1.0), w, 1.0) / (l ^ -1.0)) end
code[w_, l_] := N[(N[(N[(N[(0.5 * w), $MachinePrecision] - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(0.5 \cdot w - 1, w, 1\right)}{{\ell}^{-1}}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6498.9
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f6473.9
Applied rewrites73.9%
Final simplification73.9%
(FPCore (w l) :precision binary64 (if (<= l 8.5e-10) (* (- 1.0 w) (pow l (+ 1.0 w))) (* 1.0 (pow l (fma (fma 0.5 w 1.0) w 1.0)))))
double code(double w, double l) {
double tmp;
if (l <= 8.5e-10) {
tmp = (1.0 - w) * pow(l, (1.0 + w));
} else {
tmp = 1.0 * pow(l, fma(fma(0.5, w, 1.0), w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 8.5e-10) tmp = Float64(Float64(1.0 - w) * (l ^ Float64(1.0 + w))); else tmp = Float64(1.0 * (l ^ fma(fma(0.5, w, 1.0), w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[l, 8.5e-10], N[(N[(1.0 - w), $MachinePrecision] * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(1.0 * N[Power[l, N[(N[(0.5 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 8.5 \cdot 10^{-10}:\\
\;\;\;\;\left(1 - w\right) \cdot {\ell}^{\left(1 + w\right)}\\
\mathbf{else}:\\
\;\;\;\;1 \cdot {\ell}^{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, w, 1\right), w, 1\right)\right)}\\
\end{array}
\end{array}
if l < 8.4999999999999996e-10Initial program 99.8%
Taylor expanded in w around 0
lower-+.f6498.5
Applied rewrites98.5%
Taylor expanded in w around 0
fp-cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower--.f6498.5
Applied rewrites98.5%
if 8.4999999999999996e-10 < l Initial program 98.0%
Taylor expanded in w around 0
lower-+.f6460.0
Applied rewrites60.0%
Taylor expanded in w around 0
Applied rewrites62.2%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.2
Applied rewrites99.2%
(FPCore (w l) :precision binary64 (/ (- 1.0 w) (pow l -1.0)))
double code(double w, double l) {
return (1.0 - w) / pow(l, -1.0);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (1.0d0 - w) / (l ** (-1.0d0))
end function
public static double code(double w, double l) {
return (1.0 - w) / Math.pow(l, -1.0);
}
def code(w, l): return (1.0 - w) / math.pow(l, -1.0)
function code(w, l) return Float64(Float64(1.0 - w) / (l ^ -1.0)) end
function tmp = code(w, l) tmp = (1.0 - w) / (l ^ -1.0); end
code[w_, l_] := N[(N[(1.0 - w), $MachinePrecision] / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - w}{{\ell}^{-1}}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6498.9
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Taylor expanded in w around 0
fp-cancel-sign-sub-invN/A
metadata-evalN/A
*-lft-identityN/A
lower--.f6462.9
Applied rewrites62.9%
Final simplification62.9%
(FPCore (w l) :precision binary64 (/ 1.0 (pow l -1.0)))
double code(double w, double l) {
return 1.0 / pow(l, -1.0);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 1.0d0 / (l ** (-1.0d0))
end function
public static double code(double w, double l) {
return 1.0 / Math.pow(l, -1.0);
}
def code(w, l): return 1.0 / math.pow(l, -1.0)
function code(w, l) return Float64(1.0 / (l ^ -1.0)) end
function tmp = code(w, l) tmp = 1.0 / (l ^ -1.0); end
code[w_, l_] := N[(1.0 / N[Power[l, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{{\ell}^{-1}}
\end{array}
Initial program 99.0%
lift-pow.f64N/A
pow-to-expN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
exp-negN/A
pow-to-expN/A
lift-pow.f64N/A
lower-/.f6498.9
Applied rewrites98.9%
lift-*.f64N/A
lift-/.f64N/A
associate-*r/N/A
*-rgt-identityN/A
lower-/.f6498.9
lift-/.f64N/A
lift-pow.f64N/A
pow-flipN/A
lower-pow.f64N/A
lower-neg.f6498.9
Applied rewrites98.9%
Taylor expanded in w around 0
lower-/.f6497.3
Applied rewrites97.3%
Taylor expanded in w around 0
Applied rewrites55.9%
Final simplification55.9%
herbie shell --seed 2024337
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))