
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 20 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (w l) :precision binary64 (* (exp (- w)) (pow l (exp w))))
double code(double w, double l) {
return exp(-w) * pow(l, exp(w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = exp(-w) * (l ** exp(w))
end function
public static double code(double w, double l) {
return Math.exp(-w) * Math.pow(l, Math.exp(w));
}
def code(w, l): return math.exp(-w) * math.pow(l, math.exp(w))
function code(w, l) return Float64(exp(Float64(-w)) * (l ^ exp(w))) end
function tmp = code(w, l) tmp = exp(-w) * (l ^ exp(w)); end
code[w_, l_] := N[(N[Exp[(-w)], $MachinePrecision] * N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{-w} \cdot {\ell}^{\left(e^{w}\right)}
\end{array}
(FPCore (w l)
:precision binary64
(if (<= w -1.6)
(exp (- 0.0 w))
(/
(pow l (exp w))
(+ (+ w 1.0) (* (+ 0.5 (* w 0.16666666666666666)) (* w w))))))
double code(double w, double l) {
double tmp;
if (w <= -1.6) {
tmp = exp((0.0 - w));
} else {
tmp = pow(l, exp(w)) / ((w + 1.0) + ((0.5 + (w * 0.16666666666666666)) * (w * w)));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.6d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l ** exp(w)) / ((w + 1.0d0) + ((0.5d0 + (w * 0.16666666666666666d0)) * (w * w)))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.6) {
tmp = Math.exp((0.0 - w));
} else {
tmp = Math.pow(l, Math.exp(w)) / ((w + 1.0) + ((0.5 + (w * 0.16666666666666666)) * (w * w)));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.6: tmp = math.exp((0.0 - w)) else: tmp = math.pow(l, math.exp(w)) / ((w + 1.0) + ((0.5 + (w * 0.16666666666666666)) * (w * w))) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.6) tmp = exp(Float64(0.0 - w)); else tmp = Float64((l ^ exp(w)) / Float64(Float64(w + 1.0) + Float64(Float64(0.5 + Float64(w * 0.16666666666666666)) * Float64(w * w)))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.6) tmp = exp((0.0 - w)); else tmp = (l ^ exp(w)) / ((w + 1.0) + ((0.5 + (w * 0.16666666666666666)) * (w * w))); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.6], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[(N[(w + 1.0), $MachinePrecision] + N[(N[(0.5 + N[(w * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] * N[(w * w), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.6:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(e^{w}\right)}}{\left(w + 1\right) + \left(0.5 + w \cdot 0.16666666666666666\right) \cdot \left(w \cdot w\right)}\\
\end{array}
\end{array}
if w < -1.6000000000000001Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1.6000000000000001 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.3%
Simplified99.3%
distribute-lft-inN/A
*-rgt-identityN/A
associate-+r+N/A
+-commutativeN/A
+-lowering-+.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.3%
Applied egg-rr99.3%
(FPCore (w l) :precision binary64 (* (pow l (exp w)) (exp (- 0.0 w))))
double code(double w, double l) {
return pow(l, exp(w)) * exp((0.0 - w));
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) * exp((0.0d0 - w))
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) * Math.exp((0.0 - w));
}
def code(w, l): return math.pow(l, math.exp(w)) * math.exp((0.0 - w))
function code(w, l) return Float64((l ^ exp(w)) * exp(Float64(0.0 - w))) end
function tmp = code(w, l) tmp = (l ^ exp(w)) * exp((0.0 - w)); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] * N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
{\ell}^{\left(e^{w}\right)} \cdot e^{0 - w}
\end{array}
Initial program 98.6%
Final simplification98.6%
(FPCore (w l) :precision binary64 (/ (pow l (exp w)) (exp w)))
double code(double w, double l) {
return pow(l, exp(w)) / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = (l ** exp(w)) / exp(w)
end function
public static double code(double w, double l) {
return Math.pow(l, Math.exp(w)) / Math.exp(w);
}
def code(w, l): return math.pow(l, math.exp(w)) / math.exp(w)
function code(w, l) return Float64((l ^ exp(w)) / exp(w)) end
function tmp = code(w, l) tmp = (l ^ exp(w)) / exp(w); end
code[w_, l_] := N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\ell}^{\left(e^{w}\right)}}{e^{w}}
\end{array}
Initial program 98.6%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.6%
Simplified98.6%
(FPCore (w l)
:precision binary64
(if (<= w -1.6)
(exp (- 0.0 w))
(/
(pow l (exp w))
(+ 1.0 (* w (+ 1.0 (* w (+ 0.5 (* w 0.16666666666666666)))))))))
double code(double w, double l) {
double tmp;
if (w <= -1.6) {
tmp = exp((0.0 - w));
} else {
tmp = pow(l, exp(w)) / (1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666))))));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.6d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l ** exp(w)) / (1.0d0 + (w * (1.0d0 + (w * (0.5d0 + (w * 0.16666666666666666d0))))))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.6) {
tmp = Math.exp((0.0 - w));
} else {
tmp = Math.pow(l, Math.exp(w)) / (1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666))))));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.6: tmp = math.exp((0.0 - w)) else: tmp = math.pow(l, math.exp(w)) / (1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666)))))) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.6) tmp = exp(Float64(0.0 - w)); else tmp = Float64((l ^ exp(w)) / Float64(1.0 + Float64(w * Float64(1.0 + Float64(w * Float64(0.5 + Float64(w * 0.16666666666666666))))))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.6) tmp = exp((0.0 - w)); else tmp = (l ^ exp(w)) / (1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666)))))); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.6], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[(1.0 + N[(w * N[(1.0 + N[(w * N[(0.5 + N[(w * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1.6:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(e^{w}\right)}}{1 + w \cdot \left(1 + w \cdot \left(0.5 + w \cdot 0.16666666666666666\right)\right)}\\
\end{array}
\end{array}
if w < -1.6000000000000001Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1.6000000000000001 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.3%
Simplified99.3%
(FPCore (w l) :precision binary64 (if (<= w -3.8) (exp (- 0.0 w)) (/ (pow l (exp w)) (+ 1.0 (* w (+ 1.0 (* w 0.5)))))))
double code(double w, double l) {
double tmp;
if (w <= -3.8) {
tmp = exp((0.0 - w));
} else {
tmp = pow(l, exp(w)) / (1.0 + (w * (1.0 + (w * 0.5))));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-3.8d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l ** exp(w)) / (1.0d0 + (w * (1.0d0 + (w * 0.5d0))))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -3.8) {
tmp = Math.exp((0.0 - w));
} else {
tmp = Math.pow(l, Math.exp(w)) / (1.0 + (w * (1.0 + (w * 0.5))));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -3.8: tmp = math.exp((0.0 - w)) else: tmp = math.pow(l, math.exp(w)) / (1.0 + (w * (1.0 + (w * 0.5)))) return tmp
function code(w, l) tmp = 0.0 if (w <= -3.8) tmp = exp(Float64(0.0 - w)); else tmp = Float64((l ^ exp(w)) / Float64(1.0 + Float64(w * Float64(1.0 + Float64(w * 0.5))))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -3.8) tmp = exp((0.0 - w)); else tmp = (l ^ exp(w)) / (1.0 + (w * (1.0 + (w * 0.5)))); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -3.8], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, N[Exp[w], $MachinePrecision]], $MachinePrecision] / N[(1.0 + N[(w * N[(1.0 + N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -3.8:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(e^{w}\right)}}{1 + w \cdot \left(1 + w \cdot 0.5\right)}\\
\end{array}
\end{array}
if w < -3.7999999999999998Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -3.7999999999999998 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.2%
Simplified99.2%
(FPCore (w l) :precision binary64 (let* ((t_0 (+ 1.0 (* w (+ 1.0 (* w (+ 0.5 (* w 0.16666666666666666)))))))) (if (<= w -1.6) (exp (- 0.0 w)) (/ (pow l t_0) t_0))))
double code(double w, double l) {
double t_0 = 1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666)))));
double tmp;
if (w <= -1.6) {
tmp = exp((0.0 - w));
} else {
tmp = pow(l, t_0) / t_0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: t_0
real(8) :: tmp
t_0 = 1.0d0 + (w * (1.0d0 + (w * (0.5d0 + (w * 0.16666666666666666d0)))))
if (w <= (-1.6d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l ** t_0) / t_0
end if
code = tmp
end function
public static double code(double w, double l) {
double t_0 = 1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666)))));
double tmp;
if (w <= -1.6) {
tmp = Math.exp((0.0 - w));
} else {
tmp = Math.pow(l, t_0) / t_0;
}
return tmp;
}
def code(w, l): t_0 = 1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666))))) tmp = 0 if w <= -1.6: tmp = math.exp((0.0 - w)) else: tmp = math.pow(l, t_0) / t_0 return tmp
function code(w, l) t_0 = Float64(1.0 + Float64(w * Float64(1.0 + Float64(w * Float64(0.5 + Float64(w * 0.16666666666666666)))))) tmp = 0.0 if (w <= -1.6) tmp = exp(Float64(0.0 - w)); else tmp = Float64((l ^ t_0) / t_0); end return tmp end
function tmp_2 = code(w, l) t_0 = 1.0 + (w * (1.0 + (w * (0.5 + (w * 0.16666666666666666))))); tmp = 0.0; if (w <= -1.6) tmp = exp((0.0 - w)); else tmp = (l ^ t_0) / t_0; end tmp_2 = tmp; end
code[w_, l_] := Block[{t$95$0 = N[(1.0 + N[(w * N[(1.0 + N[(w * N[(0.5 + N[(w * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[w, -1.6], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, t$95$0], $MachinePrecision] / t$95$0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 + w \cdot \left(1 + w \cdot \left(0.5 + w \cdot 0.16666666666666666\right)\right)\\
\mathbf{if}\;w \leq -1.6:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{t\_0}}{t\_0}\\
\end{array}
\end{array}
if w < -1.6000000000000001Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1.6000000000000001 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.3%
Simplified99.3%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.1%
Simplified99.1%
(FPCore (w l)
:precision binary64
(if (<= w -1.0)
(exp (- 0.0 w))
(*
(+ w -1.0)
(* (/ l (+ w 1.0)) (/ (pow l (* w (+ 1.0 (* w 0.5)))) (+ w -1.0))))))
double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = exp((0.0 - w));
} else {
tmp = (w + -1.0) * ((l / (w + 1.0)) * (pow(l, (w * (1.0 + (w * 0.5)))) / (w + -1.0)));
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.0d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (w + (-1.0d0)) * ((l / (w + 1.0d0)) * ((l ** (w * (1.0d0 + (w * 0.5d0)))) / (w + (-1.0d0))))
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = Math.exp((0.0 - w));
} else {
tmp = (w + -1.0) * ((l / (w + 1.0)) * (Math.pow(l, (w * (1.0 + (w * 0.5)))) / (w + -1.0)));
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.0: tmp = math.exp((0.0 - w)) else: tmp = (w + -1.0) * ((l / (w + 1.0)) * (math.pow(l, (w * (1.0 + (w * 0.5)))) / (w + -1.0))) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.0) tmp = exp(Float64(0.0 - w)); else tmp = Float64(Float64(w + -1.0) * Float64(Float64(l / Float64(w + 1.0)) * Float64((l ^ Float64(w * Float64(1.0 + Float64(w * 0.5)))) / Float64(w + -1.0)))); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.0) tmp = exp((0.0 - w)); else tmp = (w + -1.0) * ((l / (w + 1.0)) * ((l ^ (w * (1.0 + (w * 0.5)))) / (w + -1.0))); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.0], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[(w + -1.0), $MachinePrecision] * N[(N[(l / N[(w + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[Power[l, N[(w * N[(1.0 + N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[(w + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\left(w + -1\right) \cdot \left(\frac{\ell}{w + 1} \cdot \frac{{\ell}^{\left(w \cdot \left(1 + w \cdot 0.5\right)\right)}}{w + -1}\right)\\
\end{array}
\end{array}
if w < -1Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-commutativeN/A
+-lowering-+.f6499.0%
Simplified99.0%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.0%
Simplified99.0%
flip-+N/A
associate-/r/N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6499.0%
Applied egg-rr99.0%
unpow-prod-upN/A
unpow1N/A
difference-of-sqr--1N/A
times-fracN/A
*-lowering-*.f64N/A
metadata-evalN/A
sub-negN/A
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-lowering-+.f6499.1%
Applied egg-rr99.1%
Final simplification99.3%
(FPCore (w l) :precision binary64 (if (<= w -1.0) (exp (- 0.0 w)) (/ (* l (pow l (* w (+ 1.0 (* w 0.5))))) (+ w 1.0))))
double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = exp((0.0 - w));
} else {
tmp = (l * pow(l, (w * (1.0 + (w * 0.5))))) / (w + 1.0);
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.0d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l * (l ** (w * (1.0d0 + (w * 0.5d0))))) / (w + 1.0d0)
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = Math.exp((0.0 - w));
} else {
tmp = (l * Math.pow(l, (w * (1.0 + (w * 0.5))))) / (w + 1.0);
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.0: tmp = math.exp((0.0 - w)) else: tmp = (l * math.pow(l, (w * (1.0 + (w * 0.5))))) / (w + 1.0) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.0) tmp = exp(Float64(0.0 - w)); else tmp = Float64(Float64(l * (l ^ Float64(w * Float64(1.0 + Float64(w * 0.5))))) / Float64(w + 1.0)); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.0) tmp = exp((0.0 - w)); else tmp = (l * (l ^ (w * (1.0 + (w * 0.5))))) / (w + 1.0); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.0], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[(l * N[Power[l, N[(w * N[(1.0 + N[(w * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(w + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{\ell \cdot {\ell}^{\left(w \cdot \left(1 + w \cdot 0.5\right)\right)}}{w + 1}\\
\end{array}
\end{array}
if w < -1Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-commutativeN/A
+-lowering-+.f6499.0%
Simplified99.0%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6499.0%
Simplified99.0%
+-commutativeN/A
pow-plusN/A
*-lowering-*.f64N/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f6499.1%
Applied egg-rr99.1%
Final simplification99.3%
(FPCore (w l) :precision binary64 (if (<= w -1.0) (exp (- 0.0 w)) (/ (pow l (+ w 1.0)) (+ w 1.0))))
double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = exp((0.0 - w));
} else {
tmp = pow(l, (w + 1.0)) / (w + 1.0);
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-1.0d0)) then
tmp = exp((0.0d0 - w))
else
tmp = (l ** (w + 1.0d0)) / (w + 1.0d0)
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -1.0) {
tmp = Math.exp((0.0 - w));
} else {
tmp = Math.pow(l, (w + 1.0)) / (w + 1.0);
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -1.0: tmp = math.exp((0.0 - w)) else: tmp = math.pow(l, (w + 1.0)) / (w + 1.0) return tmp
function code(w, l) tmp = 0.0 if (w <= -1.0) tmp = exp(Float64(0.0 - w)); else tmp = Float64((l ^ Float64(w + 1.0)) / Float64(w + 1.0)); end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -1.0) tmp = exp((0.0 - w)); else tmp = (l ^ (w + 1.0)) / (w + 1.0); end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -1.0], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], N[(N[Power[l, N[(w + 1.0), $MachinePrecision]], $MachinePrecision] / N[(w + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -1:\\
\;\;\;\;e^{0 - w}\\
\mathbf{else}:\\
\;\;\;\;\frac{{\ell}^{\left(w + 1\right)}}{w + 1}\\
\end{array}
\end{array}
if w < -1Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -1 < w Initial program 98.2%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.2%
Simplified98.2%
Taylor expanded in w around 0
+-commutativeN/A
+-lowering-+.f6499.0%
Simplified99.0%
Taylor expanded in w around 0
+-commutativeN/A
+-lowering-+.f6498.8%
Simplified98.8%
(FPCore (w l) :precision binary64 (if (<= w -0.7) (exp (- 0.0 w)) (if (<= w 0.18) (/ l (+ w 1.0)) 0.0)))
double code(double w, double l) {
double tmp;
if (w <= -0.7) {
tmp = exp((0.0 - w));
} else if (w <= 0.18) {
tmp = l / (w + 1.0);
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-0.7d0)) then
tmp = exp((0.0d0 - w))
else if (w <= 0.18d0) then
tmp = l / (w + 1.0d0)
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -0.7) {
tmp = Math.exp((0.0 - w));
} else if (w <= 0.18) {
tmp = l / (w + 1.0);
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -0.7: tmp = math.exp((0.0 - w)) elif w <= 0.18: tmp = l / (w + 1.0) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= -0.7) tmp = exp(Float64(0.0 - w)); elseif (w <= 0.18) tmp = Float64(l / Float64(w + 1.0)); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -0.7) tmp = exp((0.0 - w)); elseif (w <= 0.18) tmp = l / (w + 1.0); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -0.7], N[Exp[N[(0.0 - w), $MachinePrecision]], $MachinePrecision], If[LessEqual[w, 0.18], N[(l / N[(w + 1.0), $MachinePrecision]), $MachinePrecision], 0.0]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -0.7:\\
\;\;\;\;e^{0 - w}\\
\mathbf{elif}\;w \leq 0.18:\\
\;\;\;\;\frac{\ell}{w + 1}\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < -0.69999999999999996Initial program 100.0%
Taylor expanded in l around inf
prod-expN/A
+-commutativeN/A
sub-negN/A
exp-lowering-exp.f64N/A
--lowering--.f64N/A
mul-1-negN/A
*-commutativeN/A
log-recN/A
distribute-lft-neg-outN/A
remove-double-negN/A
*-lowering-*.f64N/A
log-lowering-log.f64N/A
exp-lowering-exp.f6499.9%
Simplified99.9%
Taylor expanded in w around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f6499.7%
Simplified99.7%
if -0.69999999999999996 < w < 0.17999999999999999Initial program 99.7%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6499.7%
Simplified99.7%
Taylor expanded in w around 0
Simplified97.1%
Taylor expanded in w around 0
+-commutativeN/A
+-lowering-+.f6497.1%
Simplified97.1%
if 0.17999999999999999 < w Initial program 93.3%
Applied egg-rr93.4%
(FPCore (w l) :precision binary64 (/ l (exp w)))
double code(double w, double l) {
return l / exp(w);
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = l / exp(w)
end function
public static double code(double w, double l) {
return l / Math.exp(w);
}
def code(w, l): return l / math.exp(w)
function code(w, l) return Float64(l / exp(w)) end
function tmp = code(w, l) tmp = l / exp(w); end
code[w_, l_] := N[(l / N[Exp[w], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\ell}{e^{w}}
\end{array}
Initial program 98.6%
exp-negN/A
associate-*l/N/A
*-lft-identityN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
exp-lowering-exp.f64N/A
exp-lowering-exp.f6498.6%
Simplified98.6%
Taylor expanded in w around 0
Simplified96.1%
(FPCore (w l)
:precision binary64
(let* ((t_0 (* w (+ -1.0 (* w (+ 0.5 (* w -0.16666666666666666)))))))
(if (<= w -4.4e+51)
(*
l
(+
(- 1.0 w)
(/
(* (* w w) (- 0.25 (* (* w w) 0.027777777777777776)))
(- 0.5 (* w -0.16666666666666666)))))
(if (<= w 0.16)
(/ (* l (+ 1.0 (* t_0 (* t_0 t_0)))) (+ 1.0 (* t_0 (+ -1.0 t_0))))
0.0))))
double code(double w, double l) {
double t_0 = w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666))));
double tmp;
if (w <= -4.4e+51) {
tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666))));
} else if (w <= 0.16) {
tmp = (l * (1.0 + (t_0 * (t_0 * t_0)))) / (1.0 + (t_0 * (-1.0 + t_0)));
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: t_0
real(8) :: tmp
t_0 = w * ((-1.0d0) + (w * (0.5d0 + (w * (-0.16666666666666666d0)))))
if (w <= (-4.4d+51)) then
tmp = l * ((1.0d0 - w) + (((w * w) * (0.25d0 - ((w * w) * 0.027777777777777776d0))) / (0.5d0 - (w * (-0.16666666666666666d0)))))
else if (w <= 0.16d0) then
tmp = (l * (1.0d0 + (t_0 * (t_0 * t_0)))) / (1.0d0 + (t_0 * ((-1.0d0) + t_0)))
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double t_0 = w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666))));
double tmp;
if (w <= -4.4e+51) {
tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666))));
} else if (w <= 0.16) {
tmp = (l * (1.0 + (t_0 * (t_0 * t_0)))) / (1.0 + (t_0 * (-1.0 + t_0)));
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): t_0 = w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666)))) tmp = 0 if w <= -4.4e+51: tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666)))) elif w <= 0.16: tmp = (l * (1.0 + (t_0 * (t_0 * t_0)))) / (1.0 + (t_0 * (-1.0 + t_0))) else: tmp = 0.0 return tmp
function code(w, l) t_0 = Float64(w * Float64(-1.0 + Float64(w * Float64(0.5 + Float64(w * -0.16666666666666666))))) tmp = 0.0 if (w <= -4.4e+51) tmp = Float64(l * Float64(Float64(1.0 - w) + Float64(Float64(Float64(w * w) * Float64(0.25 - Float64(Float64(w * w) * 0.027777777777777776))) / Float64(0.5 - Float64(w * -0.16666666666666666))))); elseif (w <= 0.16) tmp = Float64(Float64(l * Float64(1.0 + Float64(t_0 * Float64(t_0 * t_0)))) / Float64(1.0 + Float64(t_0 * Float64(-1.0 + t_0)))); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) t_0 = w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666)))); tmp = 0.0; if (w <= -4.4e+51) tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666)))); elseif (w <= 0.16) tmp = (l * (1.0 + (t_0 * (t_0 * t_0)))) / (1.0 + (t_0 * (-1.0 + t_0))); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := Block[{t$95$0 = N[(w * N[(-1.0 + N[(w * N[(0.5 + N[(w * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[w, -4.4e+51], N[(l * N[(N[(1.0 - w), $MachinePrecision] + N[(N[(N[(w * w), $MachinePrecision] * N[(0.25 - N[(N[(w * w), $MachinePrecision] * 0.027777777777777776), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(0.5 - N[(w * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[w, 0.16], N[(N[(l * N[(1.0 + N[(t$95$0 * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(t$95$0 * N[(-1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := w \cdot \left(-1 + w \cdot \left(0.5 + w \cdot -0.16666666666666666\right)\right)\\
\mathbf{if}\;w \leq -4.4 \cdot 10^{+51}:\\
\;\;\;\;\ell \cdot \left(\left(1 - w\right) + \frac{\left(w \cdot w\right) \cdot \left(0.25 - \left(w \cdot w\right) \cdot 0.027777777777777776\right)}{0.5 - w \cdot -0.16666666666666666}\right)\\
\mathbf{elif}\;w \leq 0.16:\\
\;\;\;\;\frac{\ell \cdot \left(1 + t\_0 \cdot \left(t\_0 \cdot t\_0\right)\right)}{1 + t\_0 \cdot \left(-1 + t\_0\right)}\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < -4.39999999999999984e51Initial program 100.0%
Taylor expanded in w around 0
Simplified100.0%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6490.5%
Simplified90.5%
distribute-lft-inN/A
associate-+r+N/A
*-commutativeN/A
neg-mul-1N/A
*-rgt-identityN/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
+-lowering-+.f64N/A
metadata-evalN/A
*-rgt-identityN/A
--lowering--.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6490.5%
Applied egg-rr90.5%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
--lowering--.f64N/A
swap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
*-lowering-*.f6496.2%
Applied egg-rr96.2%
if -4.39999999999999984e51 < w < 0.160000000000000003Initial program 99.7%
Taylor expanded in w around 0
Simplified95.6%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.1%
Simplified89.1%
flip3-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr93.8%
if 0.160000000000000003 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification94.2%
(FPCore (w l)
:precision binary64
(if (<= w 0.105)
(*
l
(+
(- 1.0 w)
(/
(* (* w w) (- 0.25 (* (* w w) 0.027777777777777776)))
(- 0.5 (* w -0.16666666666666666)))))
0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.105) {
tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666))));
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.105d0) then
tmp = l * ((1.0d0 - w) + (((w * w) * (0.25d0 - ((w * w) * 0.027777777777777776d0))) / (0.5d0 - (w * (-0.16666666666666666d0)))))
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.105) {
tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666))));
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.105: tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666)))) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.105) tmp = Float64(l * Float64(Float64(1.0 - w) + Float64(Float64(Float64(w * w) * Float64(0.25 - Float64(Float64(w * w) * 0.027777777777777776))) / Float64(0.5 - Float64(w * -0.16666666666666666))))); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.105) tmp = l * ((1.0 - w) + (((w * w) * (0.25 - ((w * w) * 0.027777777777777776))) / (0.5 - (w * -0.16666666666666666)))); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.105], N[(l * N[(N[(1.0 - w), $MachinePrecision] + N[(N[(N[(w * w), $MachinePrecision] * N[(0.25 - N[(N[(w * w), $MachinePrecision] * 0.027777777777777776), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(0.5 - N[(w * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.105:\\
\;\;\;\;\ell \cdot \left(\left(1 - w\right) + \frac{\left(w \cdot w\right) \cdot \left(0.25 - \left(w \cdot w\right) \cdot 0.027777777777777776\right)}{0.5 - w \cdot -0.16666666666666666}\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.104999999999999996Initial program 99.8%
Taylor expanded in w around 0
Simplified96.7%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.4%
Simplified89.4%
distribute-lft-inN/A
associate-+r+N/A
*-commutativeN/A
neg-mul-1N/A
*-rgt-identityN/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
+-lowering-+.f64N/A
metadata-evalN/A
*-rgt-identityN/A
--lowering--.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6489.4%
Applied egg-rr89.4%
flip-+N/A
associate-*l/N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
--lowering--.f64N/A
swap-sqrN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
*-lowering-*.f6490.8%
Applied egg-rr90.8%
if 0.104999999999999996 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification91.2%
(FPCore (w l) :precision binary64 (if (<= w 0.13) (* l (+ 1.0 (* w (+ -1.0 (* w (+ 0.5 (* w -0.16666666666666666))))))) 0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.13) {
tmp = l * (1.0 + (w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666))))));
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.13d0) then
tmp = l * (1.0d0 + (w * ((-1.0d0) + (w * (0.5d0 + (w * (-0.16666666666666666d0)))))))
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.13) {
tmp = l * (1.0 + (w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666))))));
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.13: tmp = l * (1.0 + (w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666)))))) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.13) tmp = Float64(l * Float64(1.0 + Float64(w * Float64(-1.0 + Float64(w * Float64(0.5 + Float64(w * -0.16666666666666666))))))); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.13) tmp = l * (1.0 + (w * (-1.0 + (w * (0.5 + (w * -0.16666666666666666)))))); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.13], N[(l * N[(1.0 + N[(w * N[(-1.0 + N[(w * N[(0.5 + N[(w * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.13:\\
\;\;\;\;\ell \cdot \left(1 + w \cdot \left(-1 + w \cdot \left(0.5 + w \cdot -0.16666666666666666\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.13Initial program 99.8%
Taylor expanded in w around 0
Simplified96.7%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.4%
Simplified89.4%
if 0.13 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification90.1%
(FPCore (w l) :precision binary64 (if (<= w 0.145) (* l (+ 1.0 (* (* w w) (+ 0.5 (* w -0.16666666666666666))))) 0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.145) {
tmp = l * (1.0 + ((w * w) * (0.5 + (w * -0.16666666666666666))));
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.145d0) then
tmp = l * (1.0d0 + ((w * w) * (0.5d0 + (w * (-0.16666666666666666d0)))))
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.145) {
tmp = l * (1.0 + ((w * w) * (0.5 + (w * -0.16666666666666666))));
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.145: tmp = l * (1.0 + ((w * w) * (0.5 + (w * -0.16666666666666666)))) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.145) tmp = Float64(l * Float64(1.0 + Float64(Float64(w * w) * Float64(0.5 + Float64(w * -0.16666666666666666))))); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.145) tmp = l * (1.0 + ((w * w) * (0.5 + (w * -0.16666666666666666)))); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.145], N[(l * N[(1.0 + N[(N[(w * w), $MachinePrecision] * N[(0.5 + N[(w * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.145:\\
\;\;\;\;\ell \cdot \left(1 + \left(w \cdot w\right) \cdot \left(0.5 + w \cdot -0.16666666666666666\right)\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.14499999999999999Initial program 99.8%
Taylor expanded in w around 0
Simplified96.7%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.4%
Simplified89.4%
distribute-lft-inN/A
associate-+r+N/A
*-commutativeN/A
neg-mul-1N/A
*-rgt-identityN/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
+-lowering-+.f64N/A
metadata-evalN/A
*-rgt-identityN/A
--lowering--.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6489.4%
Applied egg-rr89.4%
Taylor expanded in w around 0
Simplified89.4%
if 0.14499999999999999 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification90.1%
(FPCore (w l) :precision binary64 (if (<= w 0.17) (* l (+ 1.0 (* w (* (* w w) -0.16666666666666666)))) 0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.17) {
tmp = l * (1.0 + (w * ((w * w) * -0.16666666666666666)));
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.17d0) then
tmp = l * (1.0d0 + (w * ((w * w) * (-0.16666666666666666d0))))
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.17) {
tmp = l * (1.0 + (w * ((w * w) * -0.16666666666666666)));
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.17: tmp = l * (1.0 + (w * ((w * w) * -0.16666666666666666))) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.17) tmp = Float64(l * Float64(1.0 + Float64(w * Float64(Float64(w * w) * -0.16666666666666666)))); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.17) tmp = l * (1.0 + (w * ((w * w) * -0.16666666666666666))); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.17], N[(l * N[(1.0 + N[(w * N[(N[(w * w), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.17:\\
\;\;\;\;\ell \cdot \left(1 + w \cdot \left(\left(w \cdot w\right) \cdot -0.16666666666666666\right)\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.170000000000000012Initial program 99.8%
Taylor expanded in w around 0
Simplified96.7%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6489.4%
Simplified89.4%
Taylor expanded in w around inf
*-commutativeN/A
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6489.4%
Simplified89.4%
if 0.170000000000000012 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification90.1%
(FPCore (w l) :precision binary64 (if (<= w -190.0) (* l (* w (* (* w w) -0.16666666666666666))) (if (<= w 0.11) (* l (- 1.0 w)) 0.0)))
double code(double w, double l) {
double tmp;
if (w <= -190.0) {
tmp = l * (w * ((w * w) * -0.16666666666666666));
} else if (w <= 0.11) {
tmp = l * (1.0 - w);
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= (-190.0d0)) then
tmp = l * (w * ((w * w) * (-0.16666666666666666d0)))
else if (w <= 0.11d0) then
tmp = l * (1.0d0 - w)
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= -190.0) {
tmp = l * (w * ((w * w) * -0.16666666666666666));
} else if (w <= 0.11) {
tmp = l * (1.0 - w);
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= -190.0: tmp = l * (w * ((w * w) * -0.16666666666666666)) elif w <= 0.11: tmp = l * (1.0 - w) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= -190.0) tmp = Float64(l * Float64(w * Float64(Float64(w * w) * -0.16666666666666666))); elseif (w <= 0.11) tmp = Float64(l * Float64(1.0 - w)); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= -190.0) tmp = l * (w * ((w * w) * -0.16666666666666666)); elseif (w <= 0.11) tmp = l * (1.0 - w); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, -190.0], N[(l * N[(w * N[(N[(w * w), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[w, 0.11], N[(l * N[(1.0 - w), $MachinePrecision]), $MachinePrecision], 0.0]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq -190:\\
\;\;\;\;\ell \cdot \left(w \cdot \left(\left(w \cdot w\right) \cdot -0.16666666666666666\right)\right)\\
\mathbf{elif}\;w \leq 0.11:\\
\;\;\;\;\ell \cdot \left(1 - w\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < -190Initial program 100.0%
Taylor expanded in w around 0
Simplified98.5%
Taylor expanded in w around 0
+-lowering-+.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f6474.5%
Simplified74.5%
Taylor expanded in w around inf
*-commutativeN/A
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6474.5%
Simplified74.5%
if -190 < w < 0.110000000000000001Initial program 99.7%
Taylor expanded in w around 0
Simplified95.9%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
--lowering--.f6495.9%
Simplified95.9%
if 0.110000000000000001 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification90.1%
(FPCore (w l) :precision binary64 (if (<= w 0.12) (* l (- 1.0 w)) 0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.12) {
tmp = l * (1.0 - w);
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.12d0) then
tmp = l * (1.0d0 - w)
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.12) {
tmp = l * (1.0 - w);
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.12: tmp = l * (1.0 - w) else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.12) tmp = Float64(l * Float64(1.0 - w)); else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.12) tmp = l * (1.0 - w); else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.12], N[(l * N[(1.0 - w), $MachinePrecision]), $MachinePrecision], 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.12:\\
\;\;\;\;\ell \cdot \left(1 - w\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.12Initial program 99.8%
Taylor expanded in w around 0
Simplified96.7%
Taylor expanded in w around 0
neg-mul-1N/A
unsub-negN/A
--lowering--.f6474.9%
Simplified74.9%
if 0.12 < w Initial program 93.3%
Applied egg-rr93.4%
Final simplification78.2%
(FPCore (w l) :precision binary64 (if (<= w 0.15) l 0.0))
double code(double w, double l) {
double tmp;
if (w <= 0.15) {
tmp = l;
} else {
tmp = 0.0;
}
return tmp;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
real(8) :: tmp
if (w <= 0.15d0) then
tmp = l
else
tmp = 0.0d0
end if
code = tmp
end function
public static double code(double w, double l) {
double tmp;
if (w <= 0.15) {
tmp = l;
} else {
tmp = 0.0;
}
return tmp;
}
def code(w, l): tmp = 0 if w <= 0.15: tmp = l else: tmp = 0.0 return tmp
function code(w, l) tmp = 0.0 if (w <= 0.15) tmp = l; else tmp = 0.0; end return tmp end
function tmp_2 = code(w, l) tmp = 0.0; if (w <= 0.15) tmp = l; else tmp = 0.0; end tmp_2 = tmp; end
code[w_, l_] := If[LessEqual[w, 0.15], l, 0.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;w \leq 0.15:\\
\;\;\;\;\ell\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if w < 0.149999999999999994Initial program 99.8%
Taylor expanded in w around 0
Simplified67.9%
if 0.149999999999999994 < w Initial program 93.3%
Applied egg-rr93.4%
(FPCore (w l) :precision binary64 0.0)
double code(double w, double l) {
return 0.0;
}
real(8) function code(w, l)
real(8), intent (in) :: w
real(8), intent (in) :: l
code = 0.0d0
end function
public static double code(double w, double l) {
return 0.0;
}
def code(w, l): return 0.0
function code(w, l) return 0.0 end
function tmp = code(w, l) tmp = 0.0; end
code[w_, l_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 98.6%
Applied egg-rr19.0%
herbie shell --seed 2024163
(FPCore (w l)
:name "exp-w (used to crash)"
:precision binary64
(* (exp (- w)) (pow l (exp w))))