
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Herbie found 17 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
(FPCore (J l K U) :precision binary64 (fma (* (* 2.0 (sinh l)) J) (cos (/ K 2.0)) U))
double code(double J, double l, double K, double U) {
return fma(((2.0 * sinh(l)) * J), cos((K / 2.0)), U);
}
function code(J, l, K, U) return fma(Float64(Float64(2.0 * sinh(l)) * J), cos(Float64(K / 2.0)), U) end
code[J_, l_, K_, U_] := N[(N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(2 \cdot \sinh \ell\right) \cdot J, \cos \left(\frac{K}{2}\right), U\right)
\end{array}
Initial program 86.3%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f64N/A
lift-cos.f64N/A
lift-/.f6499.9
Applied rewrites99.9%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.998)
(+
(*
(*
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l))
t_0)
U)
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.998) {
tmp = ((J * (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * t_0) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.998) tmp = Float64(Float64(Float64(J * Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * t_0) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.998], N[(N[(N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.998:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.998Initial program 86.4%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites94.7%
if 0.998 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6499.6
Applied rewrites99.6%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 -0.01)
(+
(*
(*
J
(*
(fma
(fma 0.016666666666666666 (* l l) 0.3333333333333333)
(* l l)
2.0)
l))
t_0)
U)
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= -0.01) {
tmp = ((J * (fma(fma(0.016666666666666666, (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * t_0) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= -0.01) tmp = Float64(Float64(Float64(J * Float64(fma(fma(0.016666666666666666, Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * t_0) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.01], N[(N[(N[(J * N[(N[(N[(0.016666666666666666 * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq -0.01:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(0.016666666666666666, \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6492.9
Applied rewrites92.9%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 -0.01)
(+ (* (* J (* (fma (* l l) 0.3333333333333333 2.0) l)) t_0) U)
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= -0.01) {
tmp = ((J * (fma((l * l), 0.3333333333333333, 2.0) * l)) * t_0) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= -0.01) tmp = Float64(Float64(Float64(J * Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l)) * t_0) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.01], N[(N[(N[(J * N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq -0.01:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6487.3
Applied rewrites87.3%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
(FPCore (J l K U) :precision binary64 (if (<= (* J (- (exp l) (exp (- l)))) 0.0) (fma (+ J J) l U) (* (fma 2.0 l (/ U J)) J)))
double code(double J, double l, double K, double U) {
double tmp;
if ((J * (exp(l) - exp(-l))) <= 0.0) {
tmp = fma((J + J), l, U);
} else {
tmp = fma(2.0, l, (U / J)) * J;
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (Float64(J * Float64(exp(l) - exp(Float64(-l)))) <= 0.0) tmp = fma(Float64(J + J), l, U); else tmp = Float64(fma(2.0, l, Float64(U / J)) * J); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 0.0], N[(N[(J + J), $MachinePrecision] * l + U), $MachinePrecision], N[(N[(2.0 * l + N[(U / J), $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;J \cdot \left(e^{\ell} - e^{-\ell}\right) \leq 0:\\
\;\;\;\;\mathsf{fma}\left(J + J, \ell, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2, \ell, \frac{U}{J}\right) \cdot J\\
\end{array}
\end{array}
if (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) < -0.0Initial program 81.5%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6476.4
Applied rewrites76.4%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6465.3
Applied rewrites65.3%
lift-*.f64N/A
*-commutativeN/A
count-2-revN/A
lower-+.f6465.3
Applied rewrites65.3%
if -0.0 < (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) Initial program 99.1%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6431.7
Applied rewrites31.7%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6424.2
Applied rewrites24.2%
Taylor expanded in J around inf
*-commutativeN/A
lower-*.f64N/A
lower-fma.f64N/A
lower-/.f6429.7
Applied rewrites29.7%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (* 0.5 K))))
(if (or (<= l -9.5) (not (<= l 5.0)))
(* (* t_0 J) (* 2.0 (sinh l)))
(fma (* J l) (* t_0 2.0) U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((0.5 * K));
double tmp;
if ((l <= -9.5) || !(l <= 5.0)) {
tmp = (t_0 * J) * (2.0 * sinh(l));
} else {
tmp = fma((J * l), (t_0 * 2.0), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(0.5 * K)) tmp = 0.0 if ((l <= -9.5) || !(l <= 5.0)) tmp = Float64(Float64(t_0 * J) * Float64(2.0 * sinh(l))); else tmp = fma(Float64(J * l), Float64(t_0 * 2.0), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision]}, If[Or[LessEqual[l, -9.5], N[Not[LessEqual[l, 5.0]], $MachinePrecision]], N[(N[(t$95$0 * J), $MachinePrecision] * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(J * l), $MachinePrecision] * N[(t$95$0 * 2.0), $MachinePrecision] + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(0.5 \cdot K\right)\\
\mathbf{if}\;\ell \leq -9.5 \lor \neg \left(\ell \leq 5\right):\\
\;\;\;\;\left(t\_0 \cdot J\right) \cdot \left(2 \cdot \sinh \ell\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J \cdot \ell, t\_0 \cdot 2, U\right)\\
\end{array}
\end{array}
if l < -9.5 or 5 < l Initial program 100.0%
Taylor expanded in J around inf
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6499.6
Applied rewrites99.6%
if -9.5 < l < 5Initial program 72.7%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6499.4
Applied rewrites99.4%
lift-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-cos.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lift-cos.f64N/A
lift-*.f6499.4
Applied rewrites99.4%
Final simplification99.5%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(fma
(*
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
J)
(fma (* K K) -0.125 1.0)
U)
(fma (* 2.0 (sinh l)) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma(((fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J), fma((K * K), -0.125, 1.0), U);
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J), fma(Float64(K * K), -0.125, 1.0), U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites94.8%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6462.4
Applied rewrites62.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6462.4
lift-*.f64N/A
*-commutativeN/A
lower-*.f6462.4
Applied rewrites62.4%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
(FPCore (J l K U) :precision binary64 (fma J (* (* 2.0 (sinh l)) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return fma(J, ((2.0 * sinh(l)) * cos((K / 2.0))), U);
}
function code(J, l, K, U) return fma(J, Float64(Float64(2.0 * sinh(l)) * cos(Float64(K / 2.0))), U) end
code[J_, l_, K_, U_] := N[(J * N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(J, \left(2 \cdot \sinh \ell\right) \cdot \cos \left(\frac{K}{2}\right), U\right)
\end{array}
Initial program 86.3%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.9%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(fma
(*
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
J)
(fma (* K K) -0.125 1.0)
U)
(fma
(*
(*
(fma
(fma
(fma (* l l) 0.0003968253968253968 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
J)
1.0
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma(((fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J), fma((K * K), -0.125, 1.0), U);
} else {
tmp = fma(((fma(fma(fma((l * l), 0.0003968253968253968, 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J), fma(Float64(K * K), -0.125, 1.0), U); else tmp = fma(Float64(Float64(fma(fma(fma(Float64(l * l), 0.0003968253968253968, 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.0003968253968253968 + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * 1.0 + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.0003968253968253968, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J, 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites94.8%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6462.4
Applied rewrites62.4%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6462.4
lift-*.f64N/A
*-commutativeN/A
lower-*.f6462.4
Applied rewrites62.4%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites95.0%
Taylor expanded in K around 0
Applied rewrites90.7%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6490.7
Applied rewrites90.7%
Final simplification83.6%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(+
(*
(* (fma (* (* l l) J) 0.3333333333333333 (* 2.0 J)) l)
(fma (* K K) -0.125 1.0))
U)
(fma
(*
(*
(fma
(fma
(fma (* l l) 0.0003968253968253968 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
J)
1.0
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = ((fma(((l * l) * J), 0.3333333333333333, (2.0 * J)) * l) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma(((fma(fma(fma((l * l), 0.0003968253968253968, 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l) * J), 1.0, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = Float64(Float64(Float64(fma(Float64(Float64(l * l) * J), 0.3333333333333333, Float64(2.0 * J)) * l) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(Float64(Float64(fma(fma(fma(Float64(l * l), 0.0003968253968253968, 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) * J), 1.0, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * J), $MachinePrecision] * 0.3333333333333333 + N[(2.0 * J), $MachinePrecision]), $MachinePrecision] * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.0003968253968253968 + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J), $MachinePrecision] * 1.0 + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\left(\mathsf{fma}\left(\left(\ell \cdot \ell\right) \cdot J, 0.3333333333333333, 2 \cdot J\right) \cdot \ell\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.0003968253968253968, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right) \cdot J, 1, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6483.9
Applied rewrites83.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.6
Applied rewrites59.6%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites95.0%
Taylor expanded in K around 0
Applied rewrites90.7%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6490.7
Applied rewrites90.7%
Final simplification82.8%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(+
(*
(* (fma (* (* l l) J) 0.3333333333333333 (* 2.0 J)) l)
(fma (* K K) -0.125 1.0))
U)
(fma
(*
(fma (fma (* l l) 0.016666666666666666 0.3333333333333333) (* l l) 2.0)
l)
J
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = ((fma(((l * l) * J), 0.3333333333333333, (2.0 * J)) * l) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma((fma(fma((l * l), 0.016666666666666666, 0.3333333333333333), (l * l), 2.0) * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = Float64(Float64(Float64(fma(Float64(Float64(l * l) * J), 0.3333333333333333, Float64(2.0 * J)) * l) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(Float64(fma(fma(Float64(l * l), 0.016666666666666666, 0.3333333333333333), Float64(l * l), 2.0) * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * J), $MachinePrecision] * 0.3333333333333333 + N[(2.0 * J), $MachinePrecision]), $MachinePrecision] * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.016666666666666666 + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\left(\mathsf{fma}\left(\left(\ell \cdot \ell\right) \cdot J, 0.3333333333333333, 2 \cdot J\right) \cdot \ell\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.016666666666666666, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6483.9
Applied rewrites83.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.6
Applied rewrites59.6%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6488.7
Applied rewrites88.7%
Final simplification81.3%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.01)
(fma (* (* l J) (fma (* K K) -0.125 1.0)) 2.0 U)
(fma
(*
(fma (fma (* l l) 0.016666666666666666 0.3333333333333333) (* l l) 2.0)
l)
J
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma(((l * J) * fma((K * K), -0.125, 1.0)), 2.0, U);
} else {
tmp = fma((fma(fma((l * l), 0.016666666666666666, 0.3333333333333333), (l * l), 2.0) * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(Float64(l * J) * fma(Float64(K * K), -0.125, 1.0)), 2.0, U); else tmp = fma(Float64(fma(fma(Float64(l * l), 0.016666666666666666, 0.3333333333333333), Float64(l * l), 2.0) * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(N[(N[(N[(N[(l * l), $MachinePrecision] * 0.016666666666666666 + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.016666666666666666, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6463.8
Applied rewrites63.8%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6451.4
Applied rewrites51.4%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6488.7
Applied rewrites88.7%
Final simplification79.3%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.01) (fma (* (* l J) (fma (* K K) -0.125 1.0)) 2.0 U) (fma (* (fma (* l l) 0.3333333333333333 2.0) l) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = fma(((l * J) * fma((K * K), -0.125, 1.0)), 2.0, U);
} else {
tmp = fma((fma((l * l), 0.3333333333333333, 2.0) * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = fma(Float64(Float64(l * J) * fma(Float64(K * K), -0.125, 1.0)), 2.0, U); else tmp = fma(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6463.8
Applied rewrites63.8%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6451.4
Applied rewrites51.4%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6484.1
Applied rewrites84.1%
Final simplification75.8%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.01) (* (* (* J l) (fma (* K K) -0.125 1.0)) 2.0) (fma (* (fma (* l l) 0.3333333333333333 2.0) l) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.01) {
tmp = ((J * l) * fma((K * K), -0.125, 1.0)) * 2.0;
} else {
tmp = fma((fma((l * l), 0.3333333333333333, 2.0) * l), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.01) tmp = Float64(Float64(Float64(J * l) * fma(Float64(K * K), -0.125, 1.0)) * 2.0); else tmp = fma(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.01], N[(N[(N[(J * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision], N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.01:\\
\;\;\;\;\left(\left(J \cdot \ell\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right)\right) \cdot 2\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0100000000000000002Initial program 86.8%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6463.8
Applied rewrites63.8%
Taylor expanded in J around inf
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lift-cos.f64N/A
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f6428.9
Applied rewrites28.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6439.0
Applied rewrites39.0%
if -0.0100000000000000002 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 86.1%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.6
Applied rewrites95.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6484.1
Applied rewrites84.1%
Final simplification72.7%
(FPCore (J l K U) :precision binary64 (fma (* (fma (* l l) 0.3333333333333333 2.0) l) J U))
double code(double J, double l, double K, double U) {
return fma((fma((l * l), 0.3333333333333333, 2.0) * l), J, U);
}
function code(J, l, K, U) return fma(Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), J, U) end
code[J_, l_, K_, U_] := N[(N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] * J + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, J, U\right)
\end{array}
Initial program 86.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6480.4
Applied rewrites80.4%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6471.9
Applied rewrites71.9%
Final simplification71.9%
(FPCore (J l K U) :precision binary64 (fma (+ J J) l U))
double code(double J, double l, double K, double U) {
return fma((J + J), l, U);
}
function code(J, l, K, U) return fma(Float64(J + J), l, U) end
code[J_, l_, K_, U_] := N[(N[(J + J), $MachinePrecision] * l + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(J + J, \ell, U\right)
\end{array}
Initial program 86.3%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6464.4
Applied rewrites64.4%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6454.3
Applied rewrites54.3%
lift-*.f64N/A
*-commutativeN/A
count-2-revN/A
lower-+.f6454.3
Applied rewrites54.3%
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
return U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u
end function
public static double code(double J, double l, double K, double U) {
return U;
}
def code(J, l, K, U): return U
function code(J, l, K, U) return U end
function tmp = code(J, l, K, U) tmp = U; end
code[J_, l_, K_, U_] := U
\begin{array}{l}
\\
U
\end{array}
Initial program 86.3%
Taylor expanded in J around 0
Applied rewrites37.2%
herbie shell --seed 2025086
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))