
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
real(8) function code(j, l, k, u)
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
(FPCore (J l K U) :precision binary64 (fma (* 2.0 (* J (sinh l))) (cos (* -0.5 K)) U))
double code(double J, double l, double K, double U) {
return fma((2.0 * (J * sinh(l))), cos((-0.5 * K)), U);
}
function code(J, l, K, U) return fma(Float64(2.0 * Float64(J * sinh(l))), cos(Float64(-0.5 * K)), U) end
code[J_, l_, K_, U_] := N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(-0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), \cos \left(-0.5 \cdot K\right), U\right)
\end{array}
Initial program 86.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6486.4
Applied rewrites100.0%
Final simplification100.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (* 2.0 (* J (sinh l))))
(t_1 (* (* (- (exp l) (exp (- l))) J) (cos (/ K 2.0)))))
(if (<= t_1 -1e+290)
(fma t_0 1.0 U)
(if (<= t_1 2e+236)
(fma (* (cos (/ K -2.0)) J) (* 2.0 l) U)
(fma t_0 (fma (* K K) -0.125 1.0) U)))))
double code(double J, double l, double K, double U) {
double t_0 = 2.0 * (J * sinh(l));
double t_1 = ((exp(l) - exp(-l)) * J) * cos((K / 2.0));
double tmp;
if (t_1 <= -1e+290) {
tmp = fma(t_0, 1.0, U);
} else if (t_1 <= 2e+236) {
tmp = fma((cos((K / -2.0)) * J), (2.0 * l), U);
} else {
tmp = fma(t_0, fma((K * K), -0.125, 1.0), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(2.0 * Float64(J * sinh(l))) t_1 = Float64(Float64(Float64(exp(l) - exp(Float64(-l))) * J) * cos(Float64(K / 2.0))) tmp = 0.0 if (t_1 <= -1e+290) tmp = fma(t_0, 1.0, U); elseif (t_1 <= 2e+236) tmp = fma(Float64(cos(Float64(K / -2.0)) * J), Float64(2.0 * l), U); else tmp = fma(t_0, fma(Float64(K * K), -0.125, 1.0), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, -1e+290], N[(t$95$0 * 1.0 + U), $MachinePrecision], If[LessEqual[t$95$1, 2e+236], N[(N[(N[Cos[N[(K / -2.0), $MachinePrecision]], $MachinePrecision] * J), $MachinePrecision] * N[(2.0 * l), $MachinePrecision] + U), $MachinePrecision], N[(t$95$0 * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 2 \cdot \left(J \cdot \sinh \ell\right)\\
t_1 := \left(\left(e^{\ell} - e^{-\ell}\right) \cdot J\right) \cdot \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{+290}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, 1, U\right)\\
\mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+236}:\\
\;\;\;\;\mathsf{fma}\left(\cos \left(\frac{K}{-2}\right) \cdot J, 2 \cdot \ell, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\end{array}
\end{array}
if (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < -1.00000000000000006e290Initial program 100.0%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f64100.0
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites80.8%
if -1.00000000000000006e290 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < 2.00000000000000011e236Initial program 75.8%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6499.9
Applied rewrites99.9%
Applied rewrites100.0%
if 2.00000000000000011e236 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) Initial program 99.7%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6499.7
Applied rewrites100.0%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6481.9
Applied rewrites81.9%
Final simplification91.8%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.775)
(+
(*
(*
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
J)
t_0)
U)
(fma (* 2.0 (* J (sinh l))) 1.0 U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.775) {
tmp = (((fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J) * t_0) + U;
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.775) tmp = Float64(Float64(Float64(Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J) * t_0) + U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.775], N[(N[(N[(N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.775:\\
\;\;\;\;\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.775000000000000022Initial program 82.0%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6494.6
Applied rewrites94.6%
if 0.775000000000000022 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 88.7%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6488.7
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites99.7%
Final simplification98.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.775)
(+
(*
(*
(*
(fma
(fma 0.016666666666666666 (* l l) 0.3333333333333333)
(* l l)
2.0)
l)
J)
t_0)
U)
(fma (* 2.0 (* J (sinh l))) 1.0 U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.775) {
tmp = (((fma(fma(0.016666666666666666, (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J) * t_0) + U;
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.775) tmp = Float64(Float64(Float64(Float64(fma(fma(0.016666666666666666, Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J) * t_0) + U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.775], N[(N[(N[(N[(N[(N[(0.016666666666666666 * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.775:\\
\;\;\;\;\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.016666666666666666, \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.775000000000000022Initial program 82.0%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6492.3
Applied rewrites92.3%
if 0.775000000000000022 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 88.7%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6488.7
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites99.7%
Final simplification97.2%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.66)
(+ (* (* (* (fma (* l l) 0.3333333333333333 2.0) l) J) t_0) U)
(fma (* 2.0 (* J (sinh l))) 1.0 U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.66) {
tmp = (((fma((l * l), 0.3333333333333333, 2.0) * l) * J) * t_0) + U;
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.66) tmp = Float64(Float64(Float64(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l) * J) * t_0) + U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.66], N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.66:\\
\;\;\;\;\left(\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell\right) \cdot J\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.660000000000000031Initial program 82.3%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6490.7
Applied rewrites90.7%
if 0.660000000000000031 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 88.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6488.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites99.3%
Final simplification96.5%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) 0.66)
(fma
(* (* (fma 0.16666666666666666 (* (* l l) J) J) l) 2.0)
(cos (* -0.5 K))
U)
(fma (* 2.0 (* J (sinh l))) 1.0 U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= 0.66) {
tmp = fma(((fma(0.16666666666666666, ((l * l) * J), J) * l) * 2.0), cos((-0.5 * K)), U);
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= 0.66) tmp = fma(Float64(Float64(fma(0.16666666666666666, Float64(Float64(l * l) * J), J) * l) * 2.0), cos(Float64(-0.5 * K)), U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], 0.66], N[(N[(N[(N[(0.16666666666666666 * N[(N[(l * l), $MachinePrecision] * J), $MachinePrecision] + J), $MachinePrecision] * l), $MachinePrecision] * 2.0), $MachinePrecision] * N[Cos[N[(-0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.66:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(0.16666666666666666, \left(\ell \cdot \ell\right) \cdot J, J\right) \cdot \ell\right) \cdot 2, \cos \left(-0.5 \cdot K\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.660000000000000031Initial program 82.3%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6482.3
Applied rewrites99.9%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6487.2
Applied rewrites87.2%
if 0.660000000000000031 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 88.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6488.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites99.3%
Final simplification95.4%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) 0.66) (fma (* (* (fma (* l l) 0.3333333333333333 2.0) J) (cos (* 0.5 K))) l U) (fma (* 2.0 (* J (sinh l))) 1.0 U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= 0.66) {
tmp = fma(((fma((l * l), 0.3333333333333333, 2.0) * J) * cos((0.5 * K))), l, U);
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= 0.66) tmp = fma(Float64(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * J) * cos(Float64(0.5 * K))), l, U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], 0.66], N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * J), $MachinePrecision] * N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * l + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.66:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot J\right) \cdot \cos \left(0.5 \cdot K\right), \ell, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.660000000000000031Initial program 82.3%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites87.2%
if 0.660000000000000031 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 88.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6488.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites99.3%
Final simplification95.4%
(FPCore (J l K U) :precision binary64 (if (<= (* (- (exp l) (exp (- l))) J) -1e+236) (* (* (fma (* K K) -0.25 2.0) l) J) (* (fma (/ (* J l) U) 2.0 1.0) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (((exp(l) - exp(-l)) * J) <= -1e+236) {
tmp = (fma((K * K), -0.25, 2.0) * l) * J;
} else {
tmp = fma(((J * l) / U), 2.0, 1.0) * U;
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(Float64(exp(l) - exp(Float64(-l))) * J) <= -1e+236) tmp = Float64(Float64(fma(Float64(K * K), -0.25, 2.0) * l) * J); else tmp = Float64(fma(Float64(Float64(J * l) / U), 2.0, 1.0) * U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision], -1e+236], N[(N[(N[(N[(K * K), $MachinePrecision] * -0.25 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision], N[(N[(N[(N[(J * l), $MachinePrecision] / U), $MachinePrecision] * 2.0 + 1.0), $MachinePrecision] * U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(e^{\ell} - e^{-\ell}\right) \cdot J \leq -1 \cdot 10^{+236}:\\
\;\;\;\;\left(\mathsf{fma}\left(K \cdot K, -0.25, 2\right) \cdot \ell\right) \cdot J\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{J \cdot \ell}{U}, 2, 1\right) \cdot U\\
\end{array}
\end{array}
if (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < -1.00000000000000005e236Initial program 99.3%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6420.2
Applied rewrites20.2%
Taylor expanded in K around 0
Applied rewrites35.0%
Taylor expanded in K around inf
Applied rewrites15.6%
Taylor expanded in J around inf
Applied rewrites34.9%
if -1.00000000000000005e236 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) Initial program 82.9%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6479.8
Applied rewrites79.8%
Taylor expanded in K around 0
Applied rewrites70.6%
Taylor expanded in U around inf
Applied rewrites72.3%
Final simplification64.2%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) 0.1) (fma (* (* 2.0 l) J) (cos (* 0.5 K)) U) (fma (* 2.0 (* J (sinh l))) 1.0 U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= 0.1) {
tmp = fma(((2.0 * l) * J), cos((0.5 * K)), U);
} else {
tmp = fma((2.0 * (J * sinh(l))), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= 0.1) tmp = fma(Float64(Float64(2.0 * l) * J), cos(Float64(0.5 * K)), U); else tmp = fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], 0.1], N[(N[(N[(2.0 * l), $MachinePrecision] * J), $MachinePrecision] * N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq 0.1:\\
\;\;\;\;\mathsf{fma}\left(\left(2 \cdot \ell\right) \cdot J, \cos \left(0.5 \cdot K\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.10000000000000001Initial program 89.4%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6471.5
Applied rewrites71.5%
if 0.10000000000000001 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 85.5%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6485.5
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites95.7%
Final simplification90.0%
(FPCore (J l K U) :precision binary64 (if (<= (* (- (exp l) (exp (- l))) J) -1e+236) (* (* (fma (* K K) -0.25 2.0) l) J) (fma (* 2.0 l) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (((exp(l) - exp(-l)) * J) <= -1e+236) {
tmp = (fma((K * K), -0.25, 2.0) * l) * J;
} else {
tmp = fma((2.0 * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(Float64(exp(l) - exp(Float64(-l))) * J) <= -1e+236) tmp = Float64(Float64(fma(Float64(K * K), -0.25, 2.0) * l) * J); else tmp = fma(Float64(2.0 * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision], -1e+236], N[(N[(N[(N[(K * K), $MachinePrecision] * -0.25 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision], N[(N[(2.0 * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(e^{\ell} - e^{-\ell}\right) \cdot J \leq -1 \cdot 10^{+236}:\\
\;\;\;\;\left(\mathsf{fma}\left(K \cdot K, -0.25, 2\right) \cdot \ell\right) \cdot J\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < -1.00000000000000005e236Initial program 99.3%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6420.2
Applied rewrites20.2%
Taylor expanded in K around 0
Applied rewrites35.0%
Taylor expanded in K around inf
Applied rewrites15.6%
Taylor expanded in J around inf
Applied rewrites34.9%
if -1.00000000000000005e236 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) Initial program 82.9%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6479.8
Applied rewrites79.8%
Taylor expanded in K around 0
Applied rewrites70.6%
Final simplification62.9%
(FPCore (J l K U) :precision binary64 (if (<= (* (- (exp l) (exp (- l))) J) -1e+236) (* (* (fma (* K K) -0.25 2.0) J) l) (fma (* 2.0 l) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (((exp(l) - exp(-l)) * J) <= -1e+236) {
tmp = (fma((K * K), -0.25, 2.0) * J) * l;
} else {
tmp = fma((2.0 * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(Float64(exp(l) - exp(Float64(-l))) * J) <= -1e+236) tmp = Float64(Float64(fma(Float64(K * K), -0.25, 2.0) * J) * l); else tmp = fma(Float64(2.0 * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision], -1e+236], N[(N[(N[(N[(K * K), $MachinePrecision] * -0.25 + 2.0), $MachinePrecision] * J), $MachinePrecision] * l), $MachinePrecision], N[(N[(2.0 * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left(e^{\ell} - e^{-\ell}\right) \cdot J \leq -1 \cdot 10^{+236}:\\
\;\;\;\;\left(\mathsf{fma}\left(K \cdot K, -0.25, 2\right) \cdot J\right) \cdot \ell\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < -1.00000000000000005e236Initial program 99.3%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6420.2
Applied rewrites20.2%
Taylor expanded in K around 0
Applied rewrites35.0%
Taylor expanded in J around inf
Applied rewrites29.7%
if -1.00000000000000005e236 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) Initial program 82.9%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6479.8
Applied rewrites79.8%
Taylor expanded in K around 0
Applied rewrites70.6%
Final simplification61.8%
(FPCore (J l K U) :precision binary64 (fma (* 2.0 (* J (sinh l))) 1.0 U))
double code(double J, double l, double K, double U) {
return fma((2.0 * (J * sinh(l))), 1.0, U);
}
function code(J, l, K, U) return fma(Float64(2.0 * Float64(J * sinh(l))), 1.0, U) end
code[J_, l_, K_, U_] := N[(N[(2.0 * N[(J * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * 1.0 + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2 \cdot \left(J \cdot \sinh \ell\right), 1, U\right)
\end{array}
Initial program 86.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6486.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites85.3%
Final simplification85.3%
(FPCore (J l K U)
:precision binary64
(fma
(*
(*
(fma
(* (fma 0.008333333333333333 (* l l) 0.16666666666666666) J)
(* l l)
J)
l)
2.0)
1.0
U))
double code(double J, double l, double K, double U) {
return fma(((fma((fma(0.008333333333333333, (l * l), 0.16666666666666666) * J), (l * l), J) * l) * 2.0), 1.0, U);
}
function code(J, l, K, U) return fma(Float64(Float64(fma(Float64(fma(0.008333333333333333, Float64(l * l), 0.16666666666666666) * J), Float64(l * l), J) * l) * 2.0), 1.0, U) end
code[J_, l_, K_, U_] := N[(N[(N[(N[(N[(N[(0.008333333333333333 * N[(l * l), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] * J), $MachinePrecision] * N[(l * l), $MachinePrecision] + J), $MachinePrecision] * l), $MachinePrecision] * 2.0), $MachinePrecision] * 1.0 + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.008333333333333333, \ell \cdot \ell, 0.16666666666666666\right) \cdot J, \ell \cdot \ell, J\right) \cdot \ell\right) \cdot 2, 1, U\right)
\end{array}
Initial program 86.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6486.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites85.3%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites79.6%
Final simplification79.6%
(FPCore (J l K U) :precision binary64 (fma (* (* (fma 0.16666666666666666 (* (* l l) J) J) l) 2.0) 1.0 U))
double code(double J, double l, double K, double U) {
return fma(((fma(0.16666666666666666, ((l * l) * J), J) * l) * 2.0), 1.0, U);
}
function code(J, l, K, U) return fma(Float64(Float64(fma(0.16666666666666666, Float64(Float64(l * l) * J), J) * l) * 2.0), 1.0, U) end
code[J_, l_, K_, U_] := N[(N[(N[(N[(0.16666666666666666 * N[(N[(l * l), $MachinePrecision] * J), $MachinePrecision] + J), $MachinePrecision] * l), $MachinePrecision] * 2.0), $MachinePrecision] * 1.0 + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(0.16666666666666666, \left(\ell \cdot \ell\right) \cdot J, J\right) \cdot \ell\right) \cdot 2, 1, U\right)
\end{array}
Initial program 86.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6486.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites85.3%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6474.0
Applied rewrites74.0%
(FPCore (J l K U) :precision binary64 (fma (* (* (fma (* l l) 0.3333333333333333 2.0) J) l) 1.0 U))
double code(double J, double l, double K, double U) {
return fma(((fma((l * l), 0.3333333333333333, 2.0) * J) * l), 1.0, U);
}
function code(J, l, K, U) return fma(Float64(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * J) * l), 1.0, U) end
code[J_, l_, K_, U_] := N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * J), $MachinePrecision] * l), $MachinePrecision] * 1.0 + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot J\right) \cdot \ell, 1, U\right)
\end{array}
Initial program 86.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6486.4
Applied rewrites100.0%
Taylor expanded in K around 0
Applied rewrites85.3%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
distribute-lft-outN/A
lower-*.f64N/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6474.0
Applied rewrites74.0%
Final simplification74.0%
(FPCore (J l K U) :precision binary64 (fma (* 2.0 l) J U))
double code(double J, double l, double K, double U) {
return fma((2.0 * l), J, U);
}
function code(J, l, K, U) return fma(Float64(2.0 * l), J, U) end
code[J_, l_, K_, U_] := N[(N[(2.0 * l), $MachinePrecision] * J + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(2 \cdot \ell, J, U\right)
\end{array}
Initial program 86.4%
Taylor expanded in l around 0
+-commutativeN/A
associate-*r*N/A
associate-*r*N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6467.0
Applied rewrites67.0%
Taylor expanded in K around 0
Applied rewrites59.3%
herbie shell --seed 2024332
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))