
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
\end{array}
(FPCore (J l K U) :precision binary64 (fma J (* (* 2.0 (sinh l)) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return fma(J, ((2.0 * sinh(l)) * cos((K / 2.0))), U);
}
function code(J, l, K, U) return fma(J, Float64(Float64(2.0 * sinh(l)) * cos(Float64(K / 2.0))), U) end
code[J_, l_, K_, U_] := N[(J * N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(J, \left(2 \cdot \sinh \ell\right) \cdot \cos \left(\frac{K}{2}\right), U\right)
\end{array}
Initial program 85.9%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))))
(t_1 (cos (* 0.5 K))))
(if (or (<= t_0 (- INFINITY)) (not (<= t_0 5e-224)))
(* (* t_1 J) (* 2.0 (sinh l)))
(fma J (* (* l 2.0) t_1) U))))
double code(double J, double l, double K, double U) {
double t_0 = (J * (exp(l) - exp(-l))) * cos((K / 2.0));
double t_1 = cos((0.5 * K));
double tmp;
if ((t_0 <= -((double) INFINITY)) || !(t_0 <= 5e-224)) {
tmp = (t_1 * J) * (2.0 * sinh(l));
} else {
tmp = fma(J, ((l * 2.0) * t_1), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) t_1 = cos(Float64(0.5 * K)) tmp = 0.0 if ((t_0 <= Float64(-Inf)) || !(t_0 <= 5e-224)) tmp = Float64(Float64(t_1 * J) * Float64(2.0 * sinh(l))); else tmp = fma(J, Float64(Float64(l * 2.0) * t_1), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision]}, If[Or[LessEqual[t$95$0, (-Infinity)], N[Not[LessEqual[t$95$0, 5e-224]], $MachinePrecision]], N[(N[(t$95$1 * J), $MachinePrecision] * N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(J * N[(N[(l * 2.0), $MachinePrecision] * t$95$1), $MachinePrecision] + U), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right)\\
t_1 := \cos \left(0.5 \cdot K\right)\\
\mathbf{if}\;t\_0 \leq -\infty \lor \neg \left(t\_0 \leq 5 \cdot 10^{-224}\right):\\
\;\;\;\;\left(t\_1 \cdot J\right) \cdot \left(2 \cdot \sinh \ell\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \left(\ell \cdot 2\right) \cdot t\_1, U\right)\\
\end{array}
\end{array}
if (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < -inf.0 or 4.9999999999999999e-224 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) Initial program 99.9%
Taylor expanded in J around inf
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f64100.0
Applied rewrites100.0%
if -inf.0 < (*.f64 (*.f64 J (-.f64 (exp.f64 l) (exp.f64 (neg.f64 l)))) (cos.f64 (/.f64 K #s(literal 2 binary64)))) < 4.9999999999999999e-224Initial program 69.2%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in l around 0
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-cos.f64N/A
lift-*.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 -0.5)
(+ (* (* J (* (fma (* l l) 0.3333333333333333 2.0) l)) t_0) U)
(if (<= t_0 -0.02)
(+
(*
(*
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l))
(fma (* K K) -0.125 1.0))
U)
(fma (* 2.0 (sinh l)) J U)))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= -0.5) {
tmp = ((J * (fma((l * l), 0.3333333333333333, 2.0) * l)) * t_0) + U;
} else if (t_0 <= -0.02) {
tmp = ((J * (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= -0.5) tmp = Float64(Float64(Float64(J * Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l)) * t_0) + U); elseif (t_0 <= -0.02) tmp = Float64(Float64(Float64(J * Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.5], N[(N[(N[(J * N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], If[LessEqual[t$95$0, -0.02], N[(N[(N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq -0.5:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{elif}\;t\_0 \leq -0.02:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.5Initial program 89.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6485.0
Applied rewrites85.0%
if -0.5 < (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 94.5%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites80.6%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6487.3
Applied rewrites87.3%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.7
Applied rewrites95.7%
Final simplification93.6%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.9705)
(+
(*
(*
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l))
t_0)
U)
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.9705) {
tmp = ((J * (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * t_0) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.9705) tmp = Float64(Float64(Float64(J * Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * t_0) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.9705], N[(N[(N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.9705:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.970500000000000029Initial program 86.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites93.3%
if 0.970500000000000029 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 85.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f64100.0
Applied rewrites100.0%
Final simplification97.0%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 0.9705)
(+
(*
(*
J
(*
(fma
(fma 0.016666666666666666 (* l l) 0.3333333333333333)
(* l l)
2.0)
l))
t_0)
U)
(fma (* 2.0 (sinh l)) J U))))
double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= 0.9705) {
tmp = ((J * (fma(fma(0.016666666666666666, (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * t_0) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= 0.9705) tmp = Float64(Float64(Float64(J * Float64(fma(fma(0.016666666666666666, Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * t_0) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, 0.9705], N[(N[(N[(J * N[(N[(N[(0.016666666666666666 * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq 0.9705:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(0.016666666666666666, \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot t\_0 + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < 0.970500000000000029Initial program 86.6%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6488.4
Applied rewrites88.4%
if 0.970500000000000029 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 85.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f64100.0
Applied rewrites100.0%
Final simplification94.8%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.02)
(+
(*
(*
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l))
(fma (* K K) -0.125 1.0))
U)
(fma (* 2.0 (sinh l)) J U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = ((J * (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l)) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma((2.0 * sinh(l)), J, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = Float64(Float64(Float64(J * Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l)) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(Float64(2.0 * sinh(l)), J, U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(N[(2.0 * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\left(J \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\right)\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(2 \cdot \sinh \ell, J, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites90.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6468.9
Applied rewrites68.9%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sinh-undefN/A
lower-*.f64N/A
lower-sinh.f6495.7
Applied rewrites95.7%
Final simplification90.1%
(FPCore (J l K U)
:precision binary64
(let* ((t_0
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)))
(if (<= (cos (/ K 2.0)) -0.02)
(+ (* (* J t_0) (fma (* K K) -0.125 1.0)) U)
(fma J t_0 U))))
double code(double J, double l, double K, double U) {
double t_0 = fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l;
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = ((J * t_0) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma(J, t_0, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = Float64(Float64(Float64(J * t_0) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(J, t_0, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision]}, If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(J * t$95$0), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(J * t$95$0 + U), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell\\
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\left(J \cdot t\_0\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, t\_0, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites90.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6468.9
Applied rewrites68.9%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites90.4%
Final simplification85.9%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.02)
(+
(*
(* (fma (* (* l l) J) 0.3333333333333333 (* 2.0 J)) l)
(fma (* K K) -0.125 1.0))
U)
(fma
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = ((fma(((l * l) * J), 0.3333333333333333, (2.0 * J)) * l) * fma((K * K), -0.125, 1.0)) + U;
} else {
tmp = fma(J, (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = Float64(Float64(Float64(fma(Float64(Float64(l * l) * J), 0.3333333333333333, Float64(2.0 * J)) * l) * fma(Float64(K * K), -0.125, 1.0)) + U); else tmp = fma(J, Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(N[(N[(N[(l * l), $MachinePrecision] * J), $MachinePrecision] * 0.3333333333333333 + N[(2.0 * J), $MachinePrecision]), $MachinePrecision] * l), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision], N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\left(\mathsf{fma}\left(\left(\ell \cdot \ell\right) \cdot J, 0.3333333333333333, 2 \cdot J\right) \cdot \ell\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right) + U\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6472.9
Applied rewrites72.9%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6467.1
Applied rewrites67.1%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites90.4%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.02)
(fma (* (* l J) (fma (* K K) -0.125 1.0)) 2.0 U)
(fma
J
(*
(fma
(fma
(fma 0.0003968253968253968 (* l l) 0.016666666666666666)
(* l l)
0.3333333333333333)
(* l l)
2.0)
l)
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = fma(((l * J) * fma((K * K), -0.125, 1.0)), 2.0, U);
} else {
tmp = fma(J, (fma(fma(fma(0.0003968253968253968, (l * l), 0.016666666666666666), (l * l), 0.3333333333333333), (l * l), 2.0) * l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = fma(Float64(Float64(l * J) * fma(Float64(K * K), -0.125, 1.0)), 2.0, U); else tmp = fma(J, Float64(fma(fma(fma(0.0003968253968253968, Float64(l * l), 0.016666666666666666), Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(J * N[(N[(N[(N[(0.0003968253968253968 * N[(l * l), $MachinePrecision] + 0.016666666666666666), $MachinePrecision] * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0003968253968253968, \ell \cdot \ell, 0.016666666666666666\right), \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6458.3
Applied rewrites58.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.8
Applied rewrites59.8%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites90.4%
(FPCore (J l K U)
:precision binary64
(if (<= (cos (/ K 2.0)) -0.02)
(fma (* (* l J) (fma (* K K) -0.125 1.0)) 2.0 U)
(fma
J
(*
(fma (fma 0.016666666666666666 (* l l) 0.3333333333333333) (* l l) 2.0)
l)
U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = fma(((l * J) * fma((K * K), -0.125, 1.0)), 2.0, U);
} else {
tmp = fma(J, (fma(fma(0.016666666666666666, (l * l), 0.3333333333333333), (l * l), 2.0) * l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = fma(Float64(Float64(l * J) * fma(Float64(K * K), -0.125, 1.0)), 2.0, U); else tmp = fma(J, Float64(fma(fma(0.016666666666666666, Float64(l * l), 0.3333333333333333), Float64(l * l), 2.0) * l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(J * N[(N[(N[(0.016666666666666666 * N[(l * l), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(l * l), $MachinePrecision] + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \mathsf{fma}\left(\mathsf{fma}\left(0.016666666666666666, \ell \cdot \ell, 0.3333333333333333\right), \ell \cdot \ell, 2\right) \cdot \ell, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6458.3
Applied rewrites58.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.8
Applied rewrites59.8%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6489.0
Applied rewrites89.0%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.02) (fma (* (* l J) (fma (* K K) -0.125 1.0)) 2.0 U) (fma J (* (fma (* l l) 0.3333333333333333 2.0) l) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = fma(((l * J) * fma((K * K), -0.125, 1.0)), 2.0, U);
} else {
tmp = fma(J, (fma((l * l), 0.3333333333333333, 2.0) * l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = fma(Float64(Float64(l * J) * fma(Float64(K * K), -0.125, 1.0)), 2.0, U); else tmp = fma(J, Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(J * N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \mathsf{fma}\left(K \cdot K, -0.125, 1\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6458.3
Applied rewrites58.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.8
Applied rewrites59.8%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6484.7
Applied rewrites84.7%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.02) (fma (* (* l J) (* (* K K) -0.125)) 2.0 U) (fma J (* (fma (* l l) 0.3333333333333333 2.0) l) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.02) {
tmp = fma(((l * J) * ((K * K) * -0.125)), 2.0, U);
} else {
tmp = fma(J, (fma((l * l), 0.3333333333333333, 2.0) * l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.02) tmp = fma(Float64(Float64(l * J) * Float64(Float64(K * K) * -0.125)), 2.0, U); else tmp = fma(J, Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.02], N[(N[(N[(l * J), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125), $MachinePrecision]), $MachinePrecision] * 2.0 + U), $MachinePrecision], N[(J * N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.02:\\
\;\;\;\;\mathsf{fma}\left(\left(\ell \cdot J\right) \cdot \left(\left(K \cdot K\right) \cdot -0.125\right), 2, U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J, \mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, U\right)\\
\end{array}
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0200000000000000004Initial program 91.1%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6458.3
Applied rewrites58.3%
Taylor expanded in K around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6459.8
Applied rewrites59.8%
Taylor expanded in K around inf
*-commutativeN/A
lower-*.f64N/A
pow2N/A
lift-*.f6459.8
Applied rewrites59.8%
if -0.0200000000000000004 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 84.5%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6495.7
Applied rewrites95.7%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6484.7
Applied rewrites84.7%
(FPCore (J l K U) :precision binary64 (if (or (<= l -9.5e+102) (not (<= l 220000000000.0))) (* (fma 2.0 l (/ U J)) J) (fma (+ J J) l U)))
double code(double J, double l, double K, double U) {
double tmp;
if ((l <= -9.5e+102) || !(l <= 220000000000.0)) {
tmp = fma(2.0, l, (U / J)) * J;
} else {
tmp = fma((J + J), l, U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if ((l <= -9.5e+102) || !(l <= 220000000000.0)) tmp = Float64(fma(2.0, l, Float64(U / J)) * J); else tmp = fma(Float64(J + J), l, U); end return tmp end
code[J_, l_, K_, U_] := If[Or[LessEqual[l, -9.5e+102], N[Not[LessEqual[l, 220000000000.0]], $MachinePrecision]], N[(N[(2.0 * l + N[(U / J), $MachinePrecision]), $MachinePrecision] * J), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * l + U), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\ell \leq -9.5 \cdot 10^{+102} \lor \neg \left(\ell \leq 220000000000\right):\\
\;\;\;\;\mathsf{fma}\left(2, \ell, \frac{U}{J}\right) \cdot J\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J + J, \ell, U\right)\\
\end{array}
\end{array}
if l < -9.4999999999999992e102 or 2.2e11 < l Initial program 100.0%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6433.0
Applied rewrites33.0%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lift-*.f6426.3
Applied rewrites26.3%
Taylor expanded in J around inf
*-commutativeN/A
lower-*.f64N/A
lower-fma.f64N/A
lower-/.f6438.1
Applied rewrites38.1%
if -9.4999999999999992e102 < l < 2.2e11Initial program 75.3%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6485.8
Applied rewrites85.8%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lift-*.f6476.4
Applied rewrites76.4%
lift-*.f64N/A
count-2-revN/A
lower-+.f6476.4
Applied rewrites76.4%
Final simplification60.0%
(FPCore (J l K U) :precision binary64 (fma J (* (fma (* l l) 0.3333333333333333 2.0) l) U))
double code(double J, double l, double K, double U) {
return fma(J, (fma((l * l), 0.3333333333333333, 2.0) * l), U);
}
function code(J, l, K, U) return fma(J, Float64(fma(Float64(l * l), 0.3333333333333333, 2.0) * l), U) end
code[J_, l_, K_, U_] := N[(J * N[(N[(N[(l * l), $MachinePrecision] * 0.3333333333333333 + 2.0), $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(J, \mathsf{fma}\left(\ell \cdot \ell, 0.3333333333333333, 2\right) \cdot \ell, U\right)
\end{array}
Initial program 85.9%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
lift-exp.f64N/A
lift-/.f64N/A
lift-cos.f64N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in K around 0
rec-expN/A
sinh-undef-revN/A
*-commutativeN/A
lower-*.f64N/A
lift-sinh.f6483.0
Applied rewrites83.0%
Taylor expanded in l around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6474.4
Applied rewrites74.4%
(FPCore (J l K U) :precision binary64 (fma (+ J J) l U))
double code(double J, double l, double K, double U) {
return fma((J + J), l, U);
}
function code(J, l, K, U) return fma(Float64(J + J), l, U) end
code[J_, l_, K_, U_] := N[(N[(J + J), $MachinePrecision] * l + U), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(J + J, \ell, U\right)
\end{array}
Initial program 85.9%
Taylor expanded in l around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6463.1
Applied rewrites63.1%
Taylor expanded in K around 0
+-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lift-*.f6454.9
Applied rewrites54.9%
lift-*.f64N/A
count-2-revN/A
lower-+.f6454.9
Applied rewrites54.9%
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
return U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u
end function
public static double code(double J, double l, double K, double U) {
return U;
}
def code(J, l, K, U): return U
function code(J, l, K, U) return U end
function tmp = code(J, l, K, U) tmp = U; end
code[J_, l_, K_, U_] := U
\begin{array}{l}
\\
U
\end{array}
Initial program 85.9%
Taylor expanded in J around 0
Applied rewrites32.7%
herbie shell --seed 2025079
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))