
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l)
:precision binary64
(if (<= w -1.42e-5)
(exp (- (* (log l) (exp w)) w))
(/
(pow l (exp w))
(fma (fma (fma 0.16666666666666666 w 0.5) w 1.0) w 1.0))))
double code(double w, double l) {
double tmp;
if (w <= -1.42e-5) {
tmp = exp(((log(l) * exp(w)) - w));
} else {
tmp = pow(l, exp(w)) / fma(fma(fma(0.16666666666666666, w, 0.5), w, 1.0), w, 1.0);
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= -1.42e-5) tmp = exp(Float64(Float64(log(l) * exp(w)) - w)); else tmp = Float64((l ^ exp(w)) / fma(fma(fma(0.16666666666666666, w, 0.5), w, 1.0), w, 1.0)); end return tmp end
code[w_, l_] := If[LessEqual[w, -1.42e-5], N[Exp[N[(N[(N[Log[l], $MachinePrecision] * N[Exp[w], $MachinePrecision]), $MachinePrecision] - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[(N[(N[(0.16666666666666666 * w + 0.5), $MachinePrecision] * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.42 \cdot 10^{-5}:\\
\;\;\;\;e^{\log \ell \cdot e^{w} - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(e^{w}\right)}}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, w, 0.5\right), w, 1\right), w, 1\right)}\\
\end{array}
\end{array}
if w < -1.42e-5Initial program 99.9%
Taylor expanded in w around inf
*-commutativeN/A
exp-to-powN/A
remove-double-negN/A
distribute-lft-neg-outN/A
log-recN/A
*-commutativeN/A
mul-1-negN/A
+-rgt-identityN/A
exp-sumN/A
+-rgt-identityN/A
unsub-negN/A
div-expN/A
lower-/.f64N/A
Applied rewrites99.9%
Applied rewrites99.9%
Applied rewrites99.9%
if -1.42e-5 < w Initial program 99.1%
Taylor expanded in w around inf
*-commutativeN/A
exp-to-powN/A
remove-double-negN/A
distribute-lft-neg-outN/A
log-recN/A
*-commutativeN/A
mul-1-negN/A
+-rgt-identityN/A
exp-sumN/A
+-rgt-identityN/A
unsub-negN/A
div-expN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in w around 0
Applied rewrites99.5%
Final simplification99.6%
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l): return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l) return Float64((l ^ exp(w)) / exp(w)) end
function tmp = code(w, l) tmp = (l ^ exp(w)) / exp(w); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}
\end{array}
Initial program 99.3%
Taylor expanded in w around inf
*-commutativeN/A
exp-to-powN/A
remove-double-negN/A
distribute-lft-neg-outN/A
log-recN/A
*-commutativeN/A
mul-1-negN/A
+-rgt-identityN/A
exp-sumN/A
+-rgt-identityN/A
unsub-negN/A
div-expN/A
lower-/.f64N/A
Applied rewrites99.3%
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Initial program 99.3%
(FPCore (w l)
:precision binary64
(if (<= w -1.6)
(exp (- w))
(/
(pow l (exp w))
(fma (fma (fma 0.16666666666666666 w 0.5) w 1.0) w 1.0))))
double code(double w, double l) {
double tmp;
if (w <= -1.6) {
tmp = exp(-w);
} else {
tmp = pow(l, exp(w)) / fma(fma(fma(0.16666666666666666, w, 0.5), w, 1.0), w, 1.0);
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= -1.6) tmp = exp(Float64(-w)); else tmp = Float64((l ^ exp(w)) / fma(fma(fma(0.16666666666666666, w, 0.5), w, 1.0), w, 1.0)); end return tmp end
code[w_, l_] := If[LessEqual[w, -1.6], N[Exp[(-w)], $MachinePrecision], N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[(N[(N[(0.16666666666666666 * w + 0.5), $MachinePrecision] * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.6:\\
\;\;\;\;e^{-w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(e^{w}\right)}}{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, w, 0.5\right), w, 1\right), w, 1\right)}\\
\end{array}
\end{array}
if w < -1.6000000000000001Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
lift-*.f64N/A
*-rgt-identity100.0
Applied rewrites100.0%
if -1.6000000000000001 < w Initial program 99.1%
Taylor expanded in w around inf
*-commutativeN/A
exp-to-powN/A
remove-double-negN/A
distribute-lft-neg-outN/A
log-recN/A
*-commutativeN/A
mul-1-negN/A
+-rgt-identityN/A
exp-sumN/A
+-rgt-identityN/A
unsub-negN/A
div-expN/A
lower-/.f64N/A
Applied rewrites99.1%
Taylor expanded in w around 0
Applied rewrites99.1%
(FPCore (w l) :precision binary64 (if (<= l 1.0) (* (pow l (+ 1.0 w)) (fma -1.0 w 1.0)) (* (pow l (fma (fma 0.5 w 1.0) w 1.0)) (fma (fma 0.5 w -1.0) w 1.0))))
double code(double w, double l) {
double tmp;
if (l <= 1.0) {
tmp = pow(l, (1.0 + w)) * fma(-1.0, w, 1.0);
} else {
tmp = pow(l, fma(fma(0.5, w, 1.0), w, 1.0)) * fma(fma(0.5, w, -1.0), w, 1.0);
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 1.0) tmp = Float64((l ^ Float64(1.0 + w)) * fma(-1.0, w, 1.0)); else tmp = Float64((l ^ fma(fma(0.5, w, 1.0), w, 1.0)) * fma(fma(0.5, w, -1.0), w, 1.0)); end return tmp end
code[w_, l_] := If[LessEqual[l, 1.0], N[(N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision] * N[(-1.0 * w + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[Power[l, N[(N[(0.5 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]], $MachinePrecision] * N[(N[(0.5 * w + -1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 1:\\
\;\;\;\;{\ell}^{\left(1 + w\right)} \cdot \mathsf{fma}\left(-1, w, 1\right)\\
\mathbf{else}:\\
\;\;\;\;{\ell}^{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, w, 1\right), w, 1\right)\right)} \cdot \mathsf{fma}\left(\mathsf{fma}\left(0.5, w, -1\right), w, 1\right)\\
\end{array}
\end{array}
if l < 1Initial program 99.7%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6473.7
Applied rewrites73.7%
Taylor expanded in w around 0
lower-+.f6486.8
Applied rewrites86.8%
Taylor expanded in w around 0
Applied rewrites98.8%
if 1 < l Initial program 98.9%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6487.9
Applied rewrites87.9%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6499.0
Applied rewrites99.0%
Final simplification98.9%
(FPCore (w l) :precision binary64 (if (<= l 1.0) (* (pow l (+ 1.0 w)) (fma -1.0 w 1.0)) (* 1.0 (pow l (fma (fma 0.5 w 1.0) w 1.0)))))
double code(double w, double l) {
double tmp;
if (l <= 1.0) {
tmp = pow(l, (1.0 + w)) * fma(-1.0, w, 1.0);
} else {
tmp = 1.0 * pow(l, fma(fma(0.5, w, 1.0), w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (l <= 1.0) tmp = Float64((l ^ Float64(1.0 + w)) * fma(-1.0, w, 1.0)); else tmp = Float64(1.0 * (l ^ fma(fma(0.5, w, 1.0), w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[l, 1.0], N[(N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision] * N[(-1.0 * w + 1.0), $MachinePrecision]), $MachinePrecision], N[(1.0 * N[Power[l, N[(N[(0.5 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq 1:\\
\;\;\;\;{\ell}^{\left(1 + w\right)} \cdot \mathsf{fma}\left(-1, w, 1\right)\\
\mathbf{else}:\\
\;\;\;\;1 \cdot {\ell}^{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.5, w, 1\right), w, 1\right)\right)}\\
\end{array}
\end{array}
if l < 1Initial program 99.7%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f6473.7
Applied rewrites73.7%
Taylor expanded in w around 0
lower-+.f6486.8
Applied rewrites86.8%
Taylor expanded in w around 0
Applied rewrites98.8%
if 1 < l Initial program 98.9%
Taylor expanded in w around 0
Applied rewrites68.5%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6498.8
Applied rewrites98.8%
Final simplification98.8%
(FPCore (w l) :precision binary64 (let* ((t_0 (exp (- w)))) (if (<= w -0.7) t_0 (if (<= w 4100000.0) (* (pow l 1.0) 1.0) t_0))))
double code(double w, double l) {
double t_0 = exp(-w);
double tmp;
if (w <= -0.7) {
tmp = t_0;
} else if (w <= 4100000.0) {
tmp = pow(l, 1.0) * 1.0;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: t_0
real(8) :: tmp
t_0 = exp(-w)
if (w <= (-0.7d0)) then
tmp = t_0
else if (w <= 4100000.0d0) then
tmp = (l ** 1.0d0) * 1.0d0
else
tmp = t_0
end if
code = tmp
end function
public static double code(double w, double l) {
double t_0 = Math.exp(-w);
double tmp;
if (w <= -0.7) {
tmp = t_0;
} else if (w <= 4100000.0) {
tmp = Math.pow(l, 1.0) * 1.0;
} else {
tmp = t_0;
}
return tmp;
}
def code(w, l): t_0 = math.exp(-w) tmp = 0 if w <= -0.7: tmp = t_0 elif w <= 4100000.0: tmp = math.pow(l, 1.0) * 1.0 else: tmp = t_0 return tmp
function code(w, l) t_0 = exp(Float64(-w)) tmp = 0.0 if (w <= -0.7) tmp = t_0; elseif (w <= 4100000.0) tmp = Float64((l ^ 1.0) * 1.0); else tmp = t_0; end return tmp end
function tmp_2 = code(w, l) t_0 = exp(-w); tmp = 0.0; if (w <= -0.7) tmp = t_0; elseif (w <= 4100000.0) tmp = (l ^ 1.0) * 1.0; else tmp = t_0; end tmp_2 = tmp; end
code[w_, l_] := Block[{t$95$0 = N[Exp[(-w)], $MachinePrecision]}, If[LessEqual[w, -0.7], t$95$0, If[LessEqual[w, 4100000.0], N[(N[Power[l, 1.0], $MachinePrecision] * 1.0), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-w}\\
\mathbf{if}\;w \leq -0.7:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;w \leq 4100000:\\
\;\;\;\;{\ell}^{1} \cdot 1\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if w < -0.69999999999999996 or 4.1e6 < w Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
lift-*.f64N/A
*-rgt-identity100.0
Applied rewrites100.0%
if -0.69999999999999996 < w < 4.1e6Initial program 98.8%
Taylor expanded in w around 0
Applied rewrites97.7%
Taylor expanded in w around 0
Applied rewrites95.8%
Final simplification97.6%
(FPCore (w l) :precision binary64 (if (<= w -8.5e+56) (exp (- w)) (* 1.0 (pow l (+ 1.0 w)))))
double code(double w, double l) {
double tmp;
if (w <= -8.5e+56) {
tmp = exp(-w);
} else {
tmp = 1.0 * pow(l, (1.0 + w));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-8.5d+56)) then
tmp = exp(-w)
else
tmp = 1.0d0 * (l ** (1.0d0 + w))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -8.5e+56) {
tmp = Math.exp(-w);
} else {
tmp = 1.0 * Math.pow(l, (1.0 + w));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -8.5e+56: tmp = math.exp(-w) else: tmp = 1.0 * math.pow(l, (1.0 + w)) return tmp
function code(w, l) tmp = 0.0 if (w <= -8.5e+56) tmp = exp(Float64(-w)); else tmp = Float64(1.0 * (l ^ Float64(1.0 + w))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -8.5e+56) tmp = exp(-w); else tmp = 1.0 * (l ^ (1.0 + w)); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -8.5e+56], N[Exp[(-w)], $MachinePrecision], N[(1.0 * N[Power[l, N[(1.0 + w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -8.5 \cdot 10^{+56}:\\
\;\;\;\;e^{-w}\\
\mathbf{else}:\\
\;\;\;\;1 \cdot {\ell}^{\left(1 + w\right)}\\
\end{array}
\end{array}
if w < -8.4999999999999998e56Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
lift-*.f64N/A
*-rgt-identity100.0
Applied rewrites100.0%
if -8.4999999999999998e56 < w Initial program 99.1%
Taylor expanded in w around 0
Applied rewrites93.9%
Taylor expanded in w around 0
lower-+.f6498.2
Applied rewrites98.2%
(FPCore (w l) :precision binary64 (exp (- w)))
double code(double w, double l) {
return exp(-w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w)
end function
public static double code(double w, double l) {
return Math.exp(-w);
}
def code(w, l): return math.exp(-w)
function code(w, l) return exp(Float64(-w)) end
function tmp = code(w, l) tmp = exp(-w); end
code[w_, l_] := N[Exp[(-w)], $MachinePrecision]
\begin{array}{l}
\\
e^{-w}
\end{array}
Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval45.1
Applied rewrites45.1%
lift-*.f64N/A
*-rgt-identity45.1
Applied rewrites45.1%
(FPCore (w l)
:precision binary64
(if (<= w -5.6e+102)
(fma (fma (fma -0.16666666666666666 w 0.5) w -1.0) w 1.0)
(if (<= w -1.45e-172)
(/ (* (fma w w 1.0) (fma w w 1.0)) (* (- 1.0 w) (* w w)))
(/ (fma (* w w) 2.0 1.0) (* (- 1.0 w) (fma w w 1.0))))))
double code(double w, double l) {
double tmp;
if (w <= -5.6e+102) {
tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0);
} else if (w <= -1.45e-172) {
tmp = (fma(w, w, 1.0) * fma(w, w, 1.0)) / ((1.0 - w) * (w * w));
} else {
tmp = fma((w * w), 2.0, 1.0) / ((1.0 - w) * fma(w, w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= -5.6e+102) tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0); elseif (w <= -1.45e-172) tmp = Float64(Float64(fma(w, w, 1.0) * fma(w, w, 1.0)) / Float64(Float64(1.0 - w) * Float64(w * w))); else tmp = Float64(fma(Float64(w * w), 2.0, 1.0) / Float64(Float64(1.0 - w) * fma(w, w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[w, -5.6e+102], N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w + -1.0), $MachinePrecision] * w + 1.0), $MachinePrecision], If[LessEqual[w, -1.45e-172], N[(N[(N[(w * w + 1.0), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 - w), $MachinePrecision] * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(w * w), $MachinePrecision] * 2.0 + 1.0), $MachinePrecision] / N[(N[(1.0 - w), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -5.6 \cdot 10^{+102}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right), w, -1\right), w, 1\right)\\
\mathbf{elif}\;w \leq -1.45 \cdot 10^{-172}:\\
\;\;\;\;\frac{\mathsf{fma}\left(w, w, 1\right) \cdot \mathsf{fma}\left(w, w, 1\right)}{\left(1 - w\right) \cdot \left(w \cdot w\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(w \cdot w, 2, 1\right)}{\left(1 - w\right) \cdot \mathsf{fma}\left(w, w, 1\right)}\\
\end{array}
\end{array}
if w < -5.60000000000000037e102Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64100.0
Applied rewrites100.0%
if -5.60000000000000037e102 < w < -1.44999999999999999e-172Initial program 99.4%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval42.2
Applied rewrites42.2%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.3
Applied rewrites4.3%
Applied rewrites19.7%
Taylor expanded in w around inf
Applied rewrites20.3%
if -1.44999999999999999e-172 < w Initial program 99.1%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval31.1
Applied rewrites31.1%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.4
Applied rewrites4.4%
Applied rewrites3.9%
Taylor expanded in w around 0
Applied rewrites12.3%
Final simplification28.8%
(FPCore (w l)
:precision binary64
(let* ((t_0 (* (- 1.0 w) (fma w w 1.0))))
(if (<= w -5.6e+102)
(fma (fma (fma -0.16666666666666666 w 0.5) w -1.0) w 1.0)
(if (<= w -1.02e-113)
(/ (* (* w w) (fma w w 1.0)) t_0)
(/ (fma (* w w) 2.0 1.0) t_0)))))
double code(double w, double l) {
double t_0 = (1.0 - w) * fma(w, w, 1.0);
double tmp;
if (w <= -5.6e+102) {
tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0);
} else if (w <= -1.02e-113) {
tmp = ((w * w) * fma(w, w, 1.0)) / t_0;
} else {
tmp = fma((w * w), 2.0, 1.0) / t_0;
}
return tmp;
}
function code(w, l) t_0 = Float64(Float64(1.0 - w) * fma(w, w, 1.0)) tmp = 0.0 if (w <= -5.6e+102) tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0); elseif (w <= -1.02e-113) tmp = Float64(Float64(Float64(w * w) * fma(w, w, 1.0)) / t_0); else tmp = Float64(fma(Float64(w * w), 2.0, 1.0) / t_0); end return tmp end
code[w_, l_] := Block[{t$95$0 = N[(N[(1.0 - w), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[w, -5.6e+102], N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w + -1.0), $MachinePrecision] * w + 1.0), $MachinePrecision], If[LessEqual[w, -1.02e-113], N[(N[(N[(w * w), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision] / t$95$0), $MachinePrecision], N[(N[(N[(w * w), $MachinePrecision] * 2.0 + 1.0), $MachinePrecision] / t$95$0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(1 - w\right) \cdot \mathsf{fma}\left(w, w, 1\right)\\
\mathbf{if}\;w \leq -5.6 \cdot 10^{+102}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right), w, -1\right), w, 1\right)\\
\mathbf{elif}\;w \leq -1.02 \cdot 10^{-113}:\\
\;\;\;\;\frac{\left(w \cdot w\right) \cdot \mathsf{fma}\left(w, w, 1\right)}{t\_0}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(w \cdot w, 2, 1\right)}{t\_0}\\
\end{array}
\end{array}
if w < -5.60000000000000037e102Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64100.0
Applied rewrites100.0%
if -5.60000000000000037e102 < w < -1.02e-113Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval49.5
Applied rewrites49.5%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.4
Applied rewrites4.4%
Applied rewrites22.8%
Taylor expanded in w around inf
Applied rewrites23.2%
if -1.02e-113 < w Initial program 99.1%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval29.7
Applied rewrites29.7%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.4
Applied rewrites4.4%
Applied rewrites3.9%
Taylor expanded in w around 0
Applied rewrites11.9%
Final simplification28.8%
(FPCore (w l)
:precision binary64
(if (<= w -1e+154)
(fma (fma 2.0 w 1.0) w 1.0)
(if (<= w 750000000.0)
(/ (* (fma w w 1.0) (fma w w 1.0)) (fma (- w 1.0) w 1.0))
(/ (fma (* w w) 2.0 1.0) (* (- 1.0 w) (fma w w 1.0))))))
double code(double w, double l) {
double tmp;
if (w <= -1e+154) {
tmp = fma(fma(2.0, w, 1.0), w, 1.0);
} else if (w <= 750000000.0) {
tmp = (fma(w, w, 1.0) * fma(w, w, 1.0)) / fma((w - 1.0), w, 1.0);
} else {
tmp = fma((w * w), 2.0, 1.0) / ((1.0 - w) * fma(w, w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= -1e+154) tmp = fma(fma(2.0, w, 1.0), w, 1.0); elseif (w <= 750000000.0) tmp = Float64(Float64(fma(w, w, 1.0) * fma(w, w, 1.0)) / fma(Float64(w - 1.0), w, 1.0)); else tmp = Float64(fma(Float64(w * w), 2.0, 1.0) / Float64(Float64(1.0 - w) * fma(w, w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[w, -1e+154], N[(N[(2.0 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision], If[LessEqual[w, 750000000.0], N[(N[(N[(w * w + 1.0), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision] / N[(N[(w - 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(w * w), $MachinePrecision] * 2.0 + 1.0), $MachinePrecision] / N[(N[(1.0 - w), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1 \cdot 10^{+154}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(2, w, 1\right), w, 1\right)\\
\mathbf{elif}\;w \leq 750000000:\\
\;\;\;\;\frac{\mathsf{fma}\left(w, w, 1\right) \cdot \mathsf{fma}\left(w, w, 1\right)}{\mathsf{fma}\left(w - 1, w, 1\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(w \cdot w, 2, 1\right)}{\left(1 - w\right) \cdot \mathsf{fma}\left(w, w, 1\right)}\\
\end{array}
\end{array}
if w < -1.00000000000000004e154Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f647.4
Applied rewrites7.4%
Applied rewrites0.0%
Taylor expanded in w around 0
Applied rewrites100.0%
if -1.00000000000000004e154 < w < 7.5e8Initial program 99.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval22.4
Applied rewrites22.4%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.9
Applied rewrites4.9%
Applied rewrites9.4%
Taylor expanded in w around 0
Applied rewrites15.5%
if 7.5e8 < w Initial program 100.0%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f642.3
Applied rewrites2.3%
Applied rewrites0.7%
Taylor expanded in w around 0
Applied rewrites31.3%
Final simplification28.7%
(FPCore (w l) :precision binary64 (if (<= w 0.216) (fma (fma (fma -0.16666666666666666 w 0.5) w -1.0) w 1.0) (/ (fma (* w w) 2.0 1.0) (* (- 1.0 w) (fma w w 1.0)))))
double code(double w, double l) {
double tmp;
if (w <= 0.216) {
tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0);
} else {
tmp = fma((w * w), 2.0, 1.0) / ((1.0 - w) * fma(w, w, 1.0));
}
return tmp;
}
function code(w, l) tmp = 0.0 if (w <= 0.216) tmp = fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0); else tmp = Float64(fma(Float64(w * w), 2.0, 1.0) / Float64(Float64(1.0 - w) * fma(w, w, 1.0))); end return tmp end
code[w_, l_] := If[LessEqual[w, 0.216], N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w + -1.0), $MachinePrecision] * w + 1.0), $MachinePrecision], N[(N[(N[(w * w), $MachinePrecision] * 2.0 + 1.0), $MachinePrecision] / N[(N[(1.0 - w), $MachinePrecision] * N[(w * w + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.216:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right), w, -1\right), w, 1\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(w \cdot w, 2, 1\right)}{\left(1 - w\right) \cdot \mathsf{fma}\left(w, w, 1\right)}\\
\end{array}
\end{array}
if w < 0.215999999999999998Initial program 99.6%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval34.3
Applied rewrites34.3%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6424.5
Applied rewrites24.5%
if 0.215999999999999998 < w Initial program 97.8%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval95.7
Applied rewrites95.7%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f642.3
Applied rewrites2.3%
Applied rewrites0.7%
Taylor expanded in w around 0
Applied rewrites30.0%
Final simplification25.5%
(FPCore (w l) :precision binary64 (fma (fma (fma -0.16666666666666666 w 0.5) w -1.0) w 1.0))
double code(double w, double l) {
return fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0);
}
function code(w, l) return fma(fma(fma(-0.16666666666666666, w, 0.5), w, -1.0), w, 1.0) end
code[w_, l_] := N[(N[(N[(-0.16666666666666666 * w + 0.5), $MachinePrecision] * w + -1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, w, 0.5\right), w, -1\right), w, 1\right)
\end{array}
Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval45.1
Applied rewrites45.1%
Taylor expanded in w around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f6420.5
Applied rewrites20.5%
(FPCore (w l) :precision binary64 (fma (fma 2.0 w 1.0) w 1.0))
double code(double w, double l) {
return fma(fma(2.0, w, 1.0), w, 1.0);
}
function code(w, l) return fma(fma(2.0, w, 1.0), w, 1.0) end
code[w_, l_] := N[(N[(2.0 * w + 1.0), $MachinePrecision] * w + 1.0), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(2, w, 1\right), w, 1\right)
\end{array}
Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval45.1
Applied rewrites45.1%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.7
Applied rewrites4.7%
Applied rewrites6.7%
Taylor expanded in w around 0
Applied rewrites16.5%
(FPCore (w l) :precision binary64 (- 1.0 w))
double code(double w, double l) {
return 1.0 - w;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 1.0d0 - w
end function
public static double code(double w, double l) {
return 1.0 - w;
}
def code(w, l): return 1.0 - w
function code(w, l) return Float64(1.0 - w) end
function tmp = code(w, l) tmp = 1.0 - w; end
code[w_, l_] := N[(1.0 - w), $MachinePrecision]
\begin{array}{l}
\\
1 - w
\end{array}
Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval45.1
Applied rewrites45.1%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.7
Applied rewrites4.7%
(FPCore (w l) :precision binary64 1.0)
double code(double w, double l) {
return 1.0;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 1.0d0
end function
public static double code(double w, double l) {
return 1.0;
}
def code(w, l): return 1.0
function code(w, l) return 1.0 end
function tmp = code(w, l) tmp = 1.0; end
code[w_, l_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.3%
lift-pow.f64N/A
sqr-powN/A
pow-prod-upN/A
flip-+N/A
+-inversesN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
+-inversesN/A
metadata-evalN/A
flip--N/A
metadata-evalN/A
metadata-eval45.1
Applied rewrites45.1%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
lower--.f644.7
Applied rewrites4.7%
Taylor expanded in w around 0
Applied rewrites4.3%
herbie shell --seed 2024332
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))