
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 22 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
(FPCore (J l K U) :precision binary64 (fma (* (* 2.0 (sinh l)) (cos (* K 0.5))) J U))
double code(double J, double l, double K, double U) {
return fma(((2.0 * sinh(l)) * cos((K * 0.5))), J, U);
}
function code(J, l, K, U) return fma(Float64(Float64(2.0 * sinh(l)) * cos(Float64(K * 0.5))), J, U) end
code[J_, l_, K_, U_] := N[(N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(2 \cdot \sinh \ell\right) \cdot \cos \left(K \cdot 0.5\right), J, U\right)
\end{array}
Initial program 83.4%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (* 2.0 (* l J)))
(t_1 (* (cos (/ K 2.0)) (* J (- (exp l) (exp (- l)))))))
(if (<= t_1 -1e-59) t_0 (if (<= t_1 4e-158) U t_0))))
double code(double J, double l, double K, double U) {
double t_0 = 2.0 * (l * J);
double t_1 = cos((K / 2.0)) * (J * (exp(l) - exp(-l)));
double tmp;
if (t_1 <= -1e-59) {
tmp = t_0;
} else if (t_1 <= 4e-158) {
tmp = U;
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = 2.0d0 * (l * j)
t_1 = cos((k / 2.0d0)) * (j * (exp(l) - exp(-l)))
if (t_1 <= (-1d-59)) then
tmp = t_0
else if (t_1 <= 4d-158) then
tmp = u
else
tmp = t_0
end if
code = tmp
end function
public static double code(double J, double l, double K, double U) {
double t_0 = 2.0 * (l * J);
double t_1 = Math.cos((K / 2.0)) * (J * (Math.exp(l) - Math.exp(-l)));
double tmp;
if (t_1 <= -1e-59) {
tmp = t_0;
} else if (t_1 <= 4e-158) {
tmp = U;
} else {
tmp = t_0;
}
return tmp;
}
def code(J, l, K, U): t_0 = 2.0 * (l * J) t_1 = math.cos((K / 2.0)) * (J * (math.exp(l) - math.exp(-l))) tmp = 0 if t_1 <= -1e-59: tmp = t_0 elif t_1 <= 4e-158: tmp = U else: tmp = t_0 return tmp
function code(J, l, K, U) t_0 = Float64(2.0 * Float64(l * J)) t_1 = Float64(cos(Float64(K / 2.0)) * Float64(J * Float64(exp(l) - exp(Float64(-l))))) tmp = 0.0 if (t_1 <= -1e-59) tmp = t_0; elseif (t_1 <= 4e-158) tmp = U; else tmp = t_0; end return tmp end
function tmp_2 = code(J, l, K, U) t_0 = 2.0 * (l * J); t_1 = cos((K / 2.0)) * (J * (exp(l) - exp(-l))); tmp = 0.0; if (t_1 <= -1e-59) tmp = t_0; elseif (t_1 <= 4e-158) tmp = U; else tmp = t_0; end tmp_2 = tmp; end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(2.0 * N[(l * J), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision] * N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -1e-59], t$95$0, If[LessEqual[t$95$1, 4e-158], U, t$95$0]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 2 \cdot \left(\ell \cdot J\right)\\
t_1 := \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right)\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{-59}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;t\_1 \leq 4 \cdot 10^{-158}:\\
\;\;\;\;U\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < -1e-59 or 4.00000000000000026e-158 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) Initial program 99.2%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6471.9
Simplified71.9%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6453.5
Simplified53.5%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6426.5
Simplified26.5%
Taylor expanded in J around inf
*-lowering-*.f64N/A
*-lowering-*.f6426.5
Simplified26.5%
if -1e-59 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < 4.00000000000000026e-158Initial program 70.4%
Taylor expanded in J around 0
Simplified69.8%
Final simplification50.1%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (* J (- (exp l) (exp (- l))))))
(if (<= t_0 -1e-59)
(* J (* l (fma l (* l 0.3333333333333333) 2.0)))
(if (<= t_0 1e+186)
(fma 2.0 (* l J) U)
(* J (* 0.3333333333333333 (* l (* l l))))))))
double code(double J, double l, double K, double U) {
double t_0 = J * (exp(l) - exp(-l));
double tmp;
if (t_0 <= -1e-59) {
tmp = J * (l * fma(l, (l * 0.3333333333333333), 2.0));
} else if (t_0 <= 1e+186) {
tmp = fma(2.0, (l * J), U);
} else {
tmp = J * (0.3333333333333333 * (l * (l * l)));
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(J * Float64(exp(l) - exp(Float64(-l)))) tmp = 0.0 if (t_0 <= -1e-59) tmp = Float64(J * Float64(l * fma(l, Float64(l * 0.3333333333333333), 2.0))); elseif (t_0 <= 1e+186) tmp = fma(2.0, Float64(l * J), U); else tmp = Float64(J * Float64(0.3333333333333333 * Float64(l * Float64(l * l)))); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -1e-59], N[(J * N[(l * N[(l * N[(l * 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, 1e+186], N[(2.0 * N[(l * J), $MachinePrecision] + U), $MachinePrecision], N[(J * N[(0.3333333333333333 * N[(l * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := J \cdot \left(e^{\ell} - e^{-\ell}\right)\\
\mathbf{if}\;t\_0 \leq -1 \cdot 10^{-59}:\\
\;\;\;\;J \cdot \left(\ell \cdot \mathsf{fma}\left(\ell, \ell \cdot 0.3333333333333333, 2\right)\right)\\
\mathbf{elif}\;t\_0 \leq 10^{+186}:\\
\;\;\;\;\mathsf{fma}\left(2, \ell \cdot J, U\right)\\
\mathbf{else}:\\
\;\;\;\;J \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \left(\ell \cdot \ell\right)\right)\right)\\
\end{array}
\end{array}
if (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < -1e-59Initial program 99.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6467.5
Simplified67.5%
Taylor expanded in J around inf
*-lowering-*.f64N/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6466.4
Simplified66.4%
Taylor expanded in K around 0
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6457.4
Simplified57.4%
if -1e-59 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < 9.9999999999999998e185Initial program 70.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6499.3
Simplified99.3%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6484.0
Simplified84.0%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6484.0
Simplified84.0%
if 9.9999999999999998e185 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) Initial program 100.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6475.7
Simplified75.7%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6450.5
Simplified50.5%
Taylor expanded in l around inf
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6457.3
Simplified57.3%
Final simplification72.1%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.12)
(+ U (* t_0 (* J (* l (fma l (* l 0.3333333333333333) 2.0)))))
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.12) {
tmp = U + (t_0 * (J * (l * fma(l, (l * 0.3333333333333333), 2.0))));
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.12) tmp = Float64(U + Float64(t_0 * Float64(J * Float64(l * fma(l, Float64(l * 0.3333333333333333), 2.0))))); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.12], N[(U + N[(t$95$0 * N[(J * N[(l * N[(l * N[(l * 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.12:\\
\;\;\;\;U + t\_0 \cdot \left(J \cdot \left(\ell \cdot \mathsf{fma}\left(\ell, \ell \cdot 0.3333333333333333, 2\right)\right)\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.12Initial program 80.8%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6487.1
Simplified87.1%
if 0.12 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.6%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified95.6%
Final simplification93.1%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) 0.12) (fma l (* (fma l (* l 0.3333333333333333) 2.0) (* J (cos (* K 0.5)))) U) (fma (* 2.0 (sinh l)) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= 0.12) {
tmp = fma(l, (fma(l, (l * 0.3333333333333333), 2.0) * (J * cos((K * 0.5)))), U);
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= 0.12) tmp = fma(l, Float64(fma(l, Float64(l * 0.3333333333333333), 2.0) * Float64(J * cos(Float64(K * 0.5)))), U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], 0.12], N[(l * N[(N[(l * N[(l * 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision] * N[(J * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.12:\\
\;\;\;\;\mathsf{fma}\left(\ell, \mathsf{fma}\left(\ell, \ell \cdot 0.3333333333333333, 2\right) \cdot \left(J \cdot \cos \left(K \cdot 0.5\right)\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.12Initial program 80.8%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified84.6%
if 0.12 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.6%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified95.6%
Final simplification92.4%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.01) (fma 2.0 (* (cos (* K 0.5)) (* l J)) U) (fma (* 2.0 (sinh l)) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma(2.0, (cos((K * 0.5)) * (l * J)), U);
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(2.0, Float64(cos(Float64(K * 0.5)) * Float64(l * J)), U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(2.0 * N[(N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision] * N[(l * J), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(2, \cos \left(K \cdot 0.5\right) \cdot \left(\ell \cdot J\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 81.0%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6476.5
Simplified76.5%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.3%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified94.9%
Final simplification90.2%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(fma
(*
l
(fma
(* l l)
(fma
l
(* l (fma l (* l 0.0003968253968253968) 0.016666666666666666))
0.3333333333333333)
2.0))
(* K (* K (fma J -0.125 (/ J (* K K)))))
U)
(fma (* 2.0 (sinh l)) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma((l * fma((l * l), fma(l, (l * fma(l, (l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0)), (K * (K * fma(J, -0.125, (J / (K * K))))), U);
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(l * fma(Float64(l * l), fma(l, Float64(l * fma(l, Float64(l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0)), Float64(K * Float64(K * fma(J, -0.125, Float64(J / Float64(K * K))))), U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(l * N[(N[(l * l), $MachinePrecision] * N[(l * N[(l * N[(l * N[(l * 0.0003968253968253968), $MachinePrecision] + 0.016666666666666666), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] * N[(K * N[(K * N[(J * -0.125 + N[(J / N[(K * K), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\ell \cdot \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell, \ell \cdot \mathsf{fma}\left(\ell, \ell \cdot 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right), 2\right), K \cdot \left(K \cdot \mathsf{fma}\left(J, -0.125, \frac{J}{K \cdot K}\right)\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 81.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6495.6
Simplified95.6%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
distribute-rgt-outN/A
+-commutativeN/A
Simplified47.8%
Taylor expanded in K around inf
*-commutativeN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6452.6
Simplified52.6%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.3%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified94.9%
Final simplification84.0%
(FPCore (J l K U) :precision binary64 (fma (* 2.0 (cos (* K 0.5))) (* (sinh l) J) U))
double code(double J, double l, double K, double U) {
return fma((2.0 * cos((K * 0.5))), (sinh(l) * J), U);
}
function code(J, l, K, U) return fma(Float64(2.0 * cos(Float64(K * 0.5))), Float64(sinh(l) * J), U) end
code[J_, l_, K_, U_] := N[(N[(2.0 * N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[Sinh[l], $MachinePrecision] * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2 \cdot \cos \left(K \cdot 0.5\right), \sinh \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
*-commutativeN/A
*-commutativeN/A
sinh-undefN/A
associate-*l*N/A
associate-*r*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-evalN/A
*-commutativeN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f6499.9
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (J l K U)
:precision binary64
(let* ((t_0
(fma
(* l l)
(fma
l
(* l (fma l (* l 0.0003968253968253968) 0.016666666666666666))
0.3333333333333333)
2.0)))
(if (<= (cos (/ K 2.0)) -0.01)
(fma (* l t_0) (* K (* K (fma J -0.125 (/ J (* K K))))) U)
(fma t_0 (* l J) U))))
double code(double J, double l, double K, double U) {
double t_0 = fma((l * l), fma(l, (l * fma(l, (l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0);
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma((l * t_0), (K * (K * fma(J, -0.125, (J / (K * K))))), U);
} else {
tmp = fma(t_0, (l * J), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = fma(Float64(l * l), fma(l, Float64(l * fma(l, Float64(l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(l * t_0), Float64(K * Float64(K * fma(J, -0.125, Float64(J / Float64(K * K))))), U); else tmp = fma(t_0, Float64(l * J), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(l * l), $MachinePrecision] * N[(l * N[(l * N[(l * N[(l * 0.0003968253968253968), $MachinePrecision] + 0.016666666666666666), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]}, If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(l * t$95$0), $MachinePrecision] * N[(K * N[(K * N[(J * -0.125 + N[(J / N[(K * K), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(t$95$0 * N[(l * J), $MachinePrecision] + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell, \ell \cdot \mathsf{fma}\left(\ell, \ell \cdot 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right), 2\right)\\
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\ell \cdot t\_0, K \cdot \left(K \cdot \mathsf{fma}\left(J, -0.125, \frac{J}{K \cdot K}\right)\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, \ell \cdot J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 81.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6495.6
Simplified95.6%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
distribute-rgt-outN/A
+-commutativeN/A
Simplified47.8%
Taylor expanded in K around inf
*-commutativeN/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6452.6
Simplified52.6%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.3%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.5
Simplified96.5%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified91.4%
Final simplification81.4%
(FPCore (J l K U)
:precision binary64
(if (<= (/ K 2.0) 1e-69)
(fma (* 2.0 (sinh l)) J U)
(+
U
(*
(*
J
(*
l
(fma
(* l l)
(fma
l
(* l (fma (* l l) 0.0003968253968253968 0.016666666666666666))
0.3333333333333333)
2.0)))
(cos (/ K 2.0))))))
double code(double J, double l, double K, double U) {
double tmp;
if ((K / 2.0) <= 1e-69) {
tmp = fma((2.0 * sinh(l)), J, U);
} else {
tmp = U + ((J * (l * fma((l * l), fma(l, (l * fma((l * l), 0.0003968253968253968, 0.016666666666666666)), 0.3333333333333333), 2.0))) * cos((K / 2.0)));
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(K / 2.0) <= 1e-69) tmp = fma(Float64(2.0 * sinh(l)), J, U); else tmp = Float64(U + Float64(Float64(J * Float64(l * fma(Float64(l * l), fma(l, Float64(l * fma(Float64(l * l), 0.0003968253968253968, 0.016666666666666666)), 0.3333333333333333), 2.0))) * cos(Float64(K / 2.0)))); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(K / 2.0), $MachinePrecision], 1e-69], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], N[(U + N[(N[(J * N[(l * N[(N[(l * l), $MachinePrecision] * N[(l * N[(l * N[(N[(l * l), $MachinePrecision] * 0.0003968253968253968 + 0.016666666666666666), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{K}{2} \leq 10^{-69}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\mathbf{else}:\\
\;\;\;\;U + \left(J \cdot \left(\ell \cdot \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell, \ell \cdot \mathsf{fma}\left(\ell \cdot \ell, 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right), 2\right)\right)\right) \cdot \cos \left(\frac{K}{2}\right)\\
\end{array}
\end{array}
if (/.f64 K #s(literal 2 binary64)) < 9.9999999999999996e-70Initial program 81.9%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified87.4%
if 9.9999999999999996e-70 < (/.f64 K #s(literal 2 binary64)) Initial program 86.7%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.4
Simplified96.4%
Final simplification90.3%
(FPCore (J l K U)
:precision binary64
(if (<= (/ K 2.0) 1e-69)
(fma (* 2.0 (sinh l)) J U)
(+
U
(*
(cos (/ K 2.0))
(*
J
(*
l
(fma
(* l l)
(fma (* l l) 0.016666666666666666 0.3333333333333333)
2.0)))))))
double code(double J, double l, double K, double U) {
double tmp;
if ((K / 2.0) <= 1e-69) {
tmp = fma((2.0 * sinh(l)), J, U);
} else {
tmp = U + (cos((K / 2.0)) * (J * (l * fma((l * l), fma((l * l), 0.016666666666666666, 0.3333333333333333), 2.0))));
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(K / 2.0) <= 1e-69) tmp = fma(Float64(2.0 * sinh(l)), J, U); else tmp = Float64(U + Float64(cos(Float64(K / 2.0)) * Float64(J * Float64(l * fma(Float64(l * l), fma(Float64(l * l), 0.016666666666666666, 0.3333333333333333), 2.0))))); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(K / 2.0), $MachinePrecision], 1e-69], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], N[(U + N[(N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision] * N[(J * N[(l * N[(N[(l * l), $MachinePrecision] * N[(N[(l * l), $MachinePrecision] * 0.016666666666666666 + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\frac{K}{2} \leq 10^{-69}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\mathbf{else}:\\
\;\;\;\;U + \cos \left(\frac{K}{2}\right) \cdot \left(J \cdot \left(\ell \cdot \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell \cdot \ell, 0.016666666666666666, 0.3333333333333333\right), 2\right)\right)\right)\\
\end{array}
\end{array}
if (/.f64 K #s(literal 2 binary64)) < 9.9999999999999996e-70Initial program 81.9%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified87.4%
if 9.9999999999999996e-70 < (/.f64 K #s(literal 2 binary64)) Initial program 86.7%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6495.2
Simplified95.2%
Final simplification89.9%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (* K 0.5)))
(t_1 (* J (* t_0 (* 0.3333333333333333 (* l (* l l))))))
(t_2 (* 2.0 (sinh l))))
(if (<= l -1e+113)
t_1
(if (<= l -0.003)
(+ U (/ J (/ 1.0 t_2)))
(if (<= l 0.00043)
(fma (* t_0 (* 2.0 l)) J U)
(if (<= l 5.5e+102) (fma t_2 J U) t_1))))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K * 0.5));
double t_1 = J * (t_0 * (0.3333333333333333 * (l * (l * l))));
double t_2 = 2.0 * sinh(l);
double tmp;
if (l <= -1e+113) {
tmp = t_1;
} else if (l <= -0.003) {
tmp = U + (J / (1.0 / t_2));
} else if (l <= 0.00043) {
tmp = fma((t_0 * (2.0 * l)), J, U);
} else if (l <= 5.5e+102) {
tmp = fma(t_2, J, U);
} else {
tmp = t_1;
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K * 0.5)) t_1 = Float64(J * Float64(t_0 * Float64(0.3333333333333333 * Float64(l * Float64(l * l))))) t_2 = Float64(2.0 * sinh(l)) tmp = 0.0 if (l <= -1e+113) tmp = t_1; elseif (l <= -0.003) tmp = Float64(U + Float64(J / Float64(1.0 / t_2))); elseif (l <= 0.00043) tmp = fma(Float64(t_0 * Float64(2.0 * l)), J, U); elseif (l <= 5.5e+102) tmp = fma(t_2, J, U); else tmp = t_1; end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(J * N[(t$95$0 * N[(0.3333333333333333 * N[(l * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[l, -1e+113], t$95$1, If[LessEqual[l, -0.003], N[(U + N[(J / N[(1.0 / t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[l, 0.00043], N[(N[(t$95$0 * N[(2.0 * l), $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], If[LessEqual[l, 5.5e+102], N[(t$95$2 * J + U), $MachinePrecision], t$95$1]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(K \cdot 0.5\right)\\
t_1 := J \cdot \left(t\_0 \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\
t_2 := 2 \cdot \sinh \ell\\
\mathbf{if}\;\ell \leq -1 \cdot 10^{+113}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;\ell \leq -0.003:\\
\;\;\;\;U + \frac{J}{\frac{1}{t\_2}}\\
\mathbf{elif}\;\ell \leq 0.00043:\\
\;\;\;\;\mathsf{fma}\left(t\_0 \cdot \left(2 \cdot \ell\right), J, U\right)\\
\mathbf{elif}\;\ell \leq 5.5 \cdot 10^{+102}:\\
\;\;\;\;\mathsf{fma}\left(t\_2, J, U\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if l < -1e113 or 5.49999999999999981e102 < l Initial program 100.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in J around inf
*-lowering-*.f64N/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in l around inf
associate-*r*N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
if -1e113 < l < -0.0030000000000000001Initial program 99.9%
*-commutativeN/A
associate-*r*N/A
flip--N/A
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-evalN/A
clear-numN/A
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified80.8%
if -0.0030000000000000001 < l < 4.29999999999999989e-4Initial program 70.0%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in l around 0
*-lowering-*.f6499.4
Simplified99.4%
if 4.29999999999999989e-4 < l < 5.49999999999999981e102Initial program 99.5%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in K around 0
Simplified72.2%
Final simplification95.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (fma (* 2.0 (sinh l)) J U))
(t_1 (cos (* K 0.5)))
(t_2 (* J (* t_1 (* 0.3333333333333333 (* l (* l l)))))))
(if (<= l -4.8e+112)
t_2
(if (<= l -0.00037)
t_0
(if (<= l 8.8e-5)
(fma (* t_1 (* 2.0 l)) J U)
(if (<= l 5.5e+102) t_0 t_2))))))
double code(double J, double l, double K, double U) {
double t_0 = fma((2.0 * sinh(l)), J, U);
double t_1 = cos((K * 0.5));
double t_2 = J * (t_1 * (0.3333333333333333 * (l * (l * l))));
double tmp;
if (l <= -4.8e+112) {
tmp = t_2;
} else if (l <= -0.00037) {
tmp = t_0;
} else if (l <= 8.8e-5) {
tmp = fma((t_1 * (2.0 * l)), J, U);
} else if (l <= 5.5e+102) {
tmp = t_0;
} else {
tmp = t_2;
}
return tmp;
}
function code(J, l, K, U) t_0 = fma(Float64(2.0 * sinh(l)), J, U) t_1 = cos(Float64(K * 0.5)) t_2 = Float64(J * Float64(t_1 * Float64(0.3333333333333333 * Float64(l * Float64(l * l))))) tmp = 0.0 if (l <= -4.8e+112) tmp = t_2; elseif (l <= -0.00037) tmp = t_0; elseif (l <= 8.8e-5) tmp = fma(Float64(t_1 * Float64(2.0 * l)), J, U); elseif (l <= 5.5e+102) tmp = t_0; else tmp = t_2; end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]}, Block[{t$95$1 = N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$2 = N[(J * N[(t$95$1 * N[(0.3333333333333333 * N[(l * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[l, -4.8e+112], t$95$2, If[LessEqual[l, -0.00037], t$95$0, If[LessEqual[l, 8.8e-5], N[(N[(t$95$1 * N[(2.0 * l), $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], If[LessEqual[l, 5.5e+102], t$95$0, t$95$2]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
t_1 := \cos \left(K \cdot 0.5\right)\\
t_2 := J \cdot \left(t\_1 \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \left(\ell \cdot \ell\right)\right)\right)\right)\\
\mathbf{if}\;\ell \leq -4.8 \cdot 10^{+112}:\\
\;\;\;\;t\_2\\
\mathbf{elif}\;\ell \leq -0.00037:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;\ell \leq 8.8 \cdot 10^{-5}:\\
\;\;\;\;\mathsf{fma}\left(t\_1 \cdot \left(2 \cdot \ell\right), J, U\right)\\
\mathbf{elif}\;\ell \leq 5.5 \cdot 10^{+102}:\\
\;\;\;\;t\_0\\
\mathbf{else}:\\
\;\;\;\;t\_2\\
\end{array}
\end{array}
if l < -4.8e112 or 5.49999999999999981e102 < l Initial program 100.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in J around inf
*-lowering-*.f64N/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in l around inf
associate-*r*N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
if -4.8e112 < l < -3.6999999999999999e-4 or 8.7999999999999998e-5 < l < 5.49999999999999981e102Initial program 99.7%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in K around 0
Simplified76.5%
if -3.6999999999999999e-4 < l < 8.7999999999999998e-5Initial program 70.0%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval99.9
Applied egg-rr99.9%
Taylor expanded in l around 0
*-lowering-*.f6499.4
Simplified99.4%
Final simplification95.0%
(FPCore (J l K U) :precision binary64 (if (<= J 1.85e+148) (fma (* 2.0 (sinh l)) J U) (* J (* (cos (* K 0.5)) (* 2.0 l)))))
double code(double J, double l, double K, double U) {
double tmp;
if (J <= 1.85e+148) {
tmp = fma((2.0 * sinh(l)), J, U);
} else {
tmp = J * (cos((K * 0.5)) * (2.0 * l));
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (J <= 1.85e+148) tmp = fma(Float64(2.0 * sinh(l)), J, U); else tmp = Float64(J * Float64(cos(Float64(K * 0.5)) * Float64(2.0 * l))); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[J, 1.85e+148], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], N[(J * N[(N[Cos[N[(K * 0.5), $MachinePrecision]], $MachinePrecision] * N[(2.0 * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;J \leq 1.85 \cdot 10^{+148}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\mathbf{else}:\\
\;\;\;\;J \cdot \left(\cos \left(K \cdot 0.5\right) \cdot \left(2 \cdot \ell\right)\right)\\
\end{array}
\end{array}
if J < 1.8500000000000001e148Initial program 87.1%
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sinh-undefN/A
*-lowering-*.f64N/A
sinh-lowering-sinh.f64N/A
cos-lowering-cos.f64N/A
div-invN/A
*-lowering-*.f64N/A
metadata-eval100.0
Applied egg-rr100.0%
Taylor expanded in K around 0
Simplified84.3%
if 1.8500000000000001e148 < J Initial program 58.9%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6499.8
Simplified99.8%
Taylor expanded in J around inf
*-lowering-*.f64N/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6494.1
Simplified94.1%
Taylor expanded in l around 0
Simplified91.3%
Final simplification85.2%
(FPCore (J l K U)
:precision binary64
(fma
(fma
(* l l)
(fma
l
(* l (fma l (* l 0.0003968253968253968) 0.016666666666666666))
0.3333333333333333)
2.0)
(* l J)
U))
double code(double J, double l, double K, double U) {
return fma(fma((l * l), fma(l, (l * fma(l, (l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0), (l * J), U);
}
function code(J, l, K, U) return fma(fma(Float64(l * l), fma(l, Float64(l * fma(l, Float64(l * 0.0003968253968253968), 0.016666666666666666)), 0.3333333333333333), 2.0), Float64(l * J), U) end
code[J_, l_, K_, U_] := N[(N[(N[(l * l), $MachinePrecision] * N[(l * N[(l * N[(l * N[(l * 0.0003968253968253968), $MachinePrecision] + 0.016666666666666666), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision] * N[(l * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell, \ell \cdot \mathsf{fma}\left(\ell, \ell \cdot 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right), 2\right), \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.2
Simplified96.2%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified79.3%
Final simplification79.3%
(FPCore (J l K U)
:precision binary64
(fma
l
(*
J
(fma
(* l l)
(fma
(* l l)
(fma (* l l) 0.0003968253968253968 0.016666666666666666)
0.3333333333333333)
2.0))
U))
double code(double J, double l, double K, double U) {
return fma(l, (J * fma((l * l), fma((l * l), fma((l * l), 0.0003968253968253968, 0.016666666666666666), 0.3333333333333333), 2.0)), U);
}
function code(J, l, K, U) return fma(l, Float64(J * fma(Float64(l * l), fma(Float64(l * l), fma(Float64(l * l), 0.0003968253968253968, 0.016666666666666666), 0.3333333333333333), 2.0)), U) end
code[J_, l_, K_, U_] := N[(l * N[(J * N[(N[(l * l), $MachinePrecision] * N[(N[(l * l), $MachinePrecision] * N[(N[(l * l), $MachinePrecision] * 0.0003968253968253968 + 0.016666666666666666), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\ell, J \cdot \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell \cdot \ell, 0.0003968253968253968, 0.016666666666666666\right), 0.3333333333333333\right), 2\right), U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.2
Simplified96.2%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
distribute-rgt-outN/A
+-commutativeN/A
Simplified59.7%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
Simplified79.3%
(FPCore (J l K U) :precision binary64 (fma (fma (* l l) (* l (* 0.0003968253968253968 (* l (* l l)))) 2.0) (* l J) U))
double code(double J, double l, double K, double U) {
return fma(fma((l * l), (l * (0.0003968253968253968 * (l * (l * l)))), 2.0), (l * J), U);
}
function code(J, l, K, U) return fma(fma(Float64(l * l), Float64(l * Float64(0.0003968253968253968 * Float64(l * Float64(l * l)))), 2.0), Float64(l * J), U) end
code[J_, l_, K_, U_] := N[(N[(N[(l * l), $MachinePrecision] * N[(l * N[(0.0003968253968253968 * N[(l * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision] * N[(l * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, \ell \cdot \left(0.0003968253968253968 \cdot \left(\ell \cdot \left(\ell \cdot \ell\right)\right)\right), 2\right), \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.2
Simplified96.2%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified79.3%
Taylor expanded in l around inf
metadata-evalN/A
pow-sqrN/A
associate-*l*N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6479.1
Simplified79.1%
Final simplification79.1%
(FPCore (J l K U) :precision binary64 (fma (fma (* l l) (fma l (* l 0.016666666666666666) 0.3333333333333333) 2.0) (* l J) U))
double code(double J, double l, double K, double U) {
return fma(fma((l * l), fma(l, (l * 0.016666666666666666), 0.3333333333333333), 2.0), (l * J), U);
}
function code(J, l, K, U) return fma(fma(Float64(l * l), fma(l, Float64(l * 0.016666666666666666), 0.3333333333333333), 2.0), Float64(l * J), U) end
code[J_, l_, K_, U_] := N[(N[(N[(l * l), $MachinePrecision] * N[(l * N[(l * 0.016666666666666666), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision] * N[(l * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, \mathsf{fma}\left(\ell, \ell \cdot 0.016666666666666666, 0.3333333333333333\right), 2\right), \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6496.2
Simplified96.2%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified79.3%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6477.0
Simplified77.0%
Final simplification77.0%
(FPCore (J l K U) :precision binary64 (let* ((t_0 (* J (* 0.3333333333333333 (* l (* l l)))))) (if (<= l -2.2e+27) t_0 (if (<= l 3.5e+71) (fma 2.0 (* l J) U) t_0))))
double code(double J, double l, double K, double U) {
double t_0 = J * (0.3333333333333333 * (l * (l * l)));
double tmp;
if (l <= -2.2e+27) {
tmp = t_0;
} else if (l <= 3.5e+71) {
tmp = fma(2.0, (l * J), U);
} else {
tmp = t_0;
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(J * Float64(0.3333333333333333 * Float64(l * Float64(l * l)))) tmp = 0.0 if (l <= -2.2e+27) tmp = t_0; elseif (l <= 3.5e+71) tmp = fma(2.0, Float64(l * J), U); else tmp = t_0; end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(J * N[(0.3333333333333333 * N[(l * N[(l * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[l, -2.2e+27], t$95$0, If[LessEqual[l, 3.5e+71], N[(2.0 * N[(l * J), $MachinePrecision] + U), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := J \cdot \left(0.3333333333333333 \cdot \left(\ell \cdot \left(\ell \cdot \ell\right)\right)\right)\\
\mathbf{if}\;\ell \leq -2.2 \cdot 10^{+27}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;\ell \leq 3.5 \cdot 10^{+71}:\\
\;\;\;\;\mathsf{fma}\left(2, \ell \cdot J, U\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if l < -2.1999999999999999e27 or 3.4999999999999999e71 < l Initial program 100.0%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6479.8
Simplified79.8%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6460.9
Simplified60.9%
Taylor expanded in l around inf
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
unpow3N/A
unpow2N/A
associate-*r*N/A
*-lowering-*.f64N/A
associate-*r*N/A
unpow2N/A
unpow3N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6465.8
Simplified65.8%
if -2.1999999999999999e27 < l < 3.4999999999999999e71Initial program 73.8%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6491.0
Simplified91.0%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6475.8
Simplified75.8%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6475.6
Simplified75.6%
Final simplification72.0%
(FPCore (J l K U) :precision binary64 (fma (fma l (* l 0.3333333333333333) 2.0) (* l J) U))
double code(double J, double l, double K, double U) {
return fma(fma(l, (l * 0.3333333333333333), 2.0), (l * J), U);
}
function code(J, l, K, U) return fma(fma(l, Float64(l * 0.3333333333333333), 2.0), Float64(l * J), U) end
code[J_, l_, K_, U_] := N[(N[(l * N[(l * 0.3333333333333333), $MachinePrecision] + 2.0), $MachinePrecision] * N[(l * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\ell, \ell \cdot 0.3333333333333333, 2\right), \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6486.9
Simplified86.9%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6470.3
Simplified70.3%
Final simplification70.3%
(FPCore (J l K U) :precision binary64 (fma 2.0 (* l J) U))
double code(double J, double l, double K, double U) {
return fma(2.0, (l * J), U);
}
function code(J, l, K, U) return fma(2.0, Float64(l * J), U) end
code[J_, l_, K_, U_] := N[(2.0 * N[(l * J), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2, \ell \cdot J, U\right)
\end{array}
Initial program 83.4%
Taylor expanded in l around 0
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6486.9
Simplified86.9%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6470.3
Simplified70.3%
Taylor expanded in l around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6458.1
Simplified58.1%
Final simplification58.1%
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
return U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u
end function
public static double code(double J, double l, double K, double U) {
return U;
}
def code(J, l, K, U): return U
function code(J, l, K, U) return U end
function tmp = code(J, l, K, U) tmp = U; end
code[J_, l_, K_, U_] := U
\begin{array}{l}
\\
U
\end{array}
Initial program 83.4%
Taylor expanded in J around 0
Simplified39.2%
herbie shell --seed 2024204
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))