
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (J l K U) :precision binary64 (+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))
double code(double J, double l, double K, double U) {
return ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = ((j * (exp(l) - exp(-l))) * cos((k / 2.0d0))) + u
end function
public static double code(double J, double l, double K, double U) {
return ((J * (Math.exp(l) - Math.exp(-l))) * Math.cos((K / 2.0))) + U;
}
def code(J, l, K, U): return ((J * (math.exp(l) - math.exp(-l))) * math.cos((K / 2.0))) + U
function code(J, l, K, U) return Float64(Float64(Float64(J * Float64(exp(l) - exp(Float64(-l)))) * cos(Float64(K / 2.0))) + U) end
function tmp = code(J, l, K, U) tmp = ((J * (exp(l) - exp(-l))) * cos((K / 2.0))) + U; end
code[J_, l_, K_, U_] := N[(N[(N[(J * N[(N[Exp[l], $MachinePrecision] - N[Exp[(-l)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\left(J \cdot \left(e^{\ell} - e^{-\ell}\right)\right) \cdot \cos \left(\frac{K}{2}\right) + U
(FPCore (J l K U) :precision binary64 (fma (+ J J) (* (cos (* -0.5 K)) (sinh l)) U))
double code(double J, double l, double K, double U) {
return fma((J + J), (cos((-0.5 * K)) * sinh(l)), U);
}
function code(J, l, K, U) return fma(Float64(J + J), Float64(cos(Float64(-0.5 * K)) * sinh(l)), U) end
code[J_, l_, K_, U_] := N[(N[(J + J), $MachinePrecision] * N[(N[Cos[N[(-0.5 * K), $MachinePrecision]], $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
\mathsf{fma}\left(J + J, \cos \left(-0.5 \cdot K\right) \cdot \sinh \ell, U\right)
Initial program 87.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
lift-*.f64N/A
associate-*l*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.9%
lift-cos.f64N/A
lift-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
lift-*.f64N/A
cos-neg-revN/A
lift-cos.f6499.9%
Applied rewrites99.9%
(FPCore (J l K U) :precision binary64 (fma (* (+ J J) (sinh l)) (cos (* 0.5 K)) U))
double code(double J, double l, double K, double U) {
return fma(((J + J) * sinh(l)), cos((0.5 * K)), U);
}
function code(J, l, K, U) return fma(Float64(Float64(J + J) * sinh(l)), cos(Float64(0.5 * K)), U) end
code[J_, l_, K_, U_] := N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[Cos[N[(0.5 * K), $MachinePrecision]], $MachinePrecision] + U), $MachinePrecision]
\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \cos \left(0.5 \cdot K\right), U\right)
Initial program 87.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
Applied rewrites99.9%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (exp (- l))) (t_1 (cos (* -0.5 K))))
(if (<= l -150.0)
(fma (* t_1 (- 1.0 t_0)) J U)
(if (<= l 0.52)
(fma (+ J J) (* t_1 l) U)
(if (<= l 8.5e+118)
(+ U (* J (- (/ 1.0 t_0) t_0)))
(fma (* (+ J J) (sinh l)) (fma (* K K) -0.125 1.0) U))))))double code(double J, double l, double K, double U) {
double t_0 = exp(-l);
double t_1 = cos((-0.5 * K));
double tmp;
if (l <= -150.0) {
tmp = fma((t_1 * (1.0 - t_0)), J, U);
} else if (l <= 0.52) {
tmp = fma((J + J), (t_1 * l), U);
} else if (l <= 8.5e+118) {
tmp = U + (J * ((1.0 / t_0) - t_0));
} else {
tmp = fma(((J + J) * sinh(l)), fma((K * K), -0.125, 1.0), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = exp(Float64(-l)) t_1 = cos(Float64(-0.5 * K)) tmp = 0.0 if (l <= -150.0) tmp = fma(Float64(t_1 * Float64(1.0 - t_0)), J, U); elseif (l <= 0.52) tmp = fma(Float64(J + J), Float64(t_1 * l), U); elseif (l <= 8.5e+118) tmp = Float64(U + Float64(J * Float64(Float64(1.0 / t_0) - t_0))); else tmp = fma(Float64(Float64(J + J) * sinh(l)), fma(Float64(K * K), -0.125, 1.0), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Exp[(-l)], $MachinePrecision]}, Block[{t$95$1 = N[Cos[N[(-0.5 * K), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[l, -150.0], N[(N[(t$95$1 * N[(1.0 - t$95$0), $MachinePrecision]), $MachinePrecision] * J + U), $MachinePrecision], If[LessEqual[l, 0.52], N[(N[(J + J), $MachinePrecision] * N[(t$95$1 * l), $MachinePrecision] + U), $MachinePrecision], If[LessEqual[l, 8.5e+118], N[(U + N[(J * N[(N[(1.0 / t$95$0), $MachinePrecision] - t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision]]]]]]
\begin{array}{l}
t_0 := e^{-\ell}\\
t_1 := \cos \left(-0.5 \cdot K\right)\\
\mathbf{if}\;\ell \leq -150:\\
\;\;\;\;\mathsf{fma}\left(t\_1 \cdot \left(1 - t\_0\right), J, U\right)\\
\mathbf{elif}\;\ell \leq 0.52:\\
\;\;\;\;\mathsf{fma}\left(J + J, t\_1 \cdot \ell, U\right)\\
\mathbf{elif}\;\ell \leq 8.5 \cdot 10^{+118}:\\
\;\;\;\;U + J \cdot \left(\frac{1}{t\_0} - t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\end{array}
if l < -150Initial program 87.0%
Taylor expanded in l around 0
Applied rewrites62.9%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites62.9%
if -150 < l < 0.52000000000000002Initial program 87.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
lift-*.f64N/A
associate-*l*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.9%
lift-cos.f64N/A
lift-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
lift-*.f64N/A
cos-neg-revN/A
lift-cos.f6499.9%
Applied rewrites99.9%
Taylor expanded in l around 0
Applied rewrites64.6%
if 0.52000000000000002 < l < 8.5000000000000003e118Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-exp.f64N/A
lower-exp.f64N/A
lower-neg.f6474.2%
Applied rewrites74.2%
lift-exp.f64N/A
sinh-+-cosh-revN/A
lift-sinh.f64N/A
add-flipN/A
cosh-neg-revN/A
lift-neg.f64N/A
lift-sinh.f64N/A
sinh-negN/A
lift-neg.f64N/A
sinh---cosh-revN/A
exp-negN/A
lift-exp.f64N/A
lower-/.f6474.2%
Applied rewrites74.2%
if 8.5000000000000003e118 < l Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6463.9%
Applied rewrites63.9%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6463.9%
Applied rewrites68.4%
(FPCore (J l K U)
:precision binary64
(let* ((t_0 (cos (/ K 2.0))))
(if (<= t_0 -0.15)
(fma (+ J J) (* (cos (* -0.5 K)) l) U)
(if (<= t_0 -0.004)
(fma (* (+ J J) (sinh l)) (fma (* K K) -0.125 1.0) U)
(fma (+ J J) (sinh l) U)))))double code(double J, double l, double K, double U) {
double t_0 = cos((K / 2.0));
double tmp;
if (t_0 <= -0.15) {
tmp = fma((J + J), (cos((-0.5 * K)) * l), U);
} else if (t_0 <= -0.004) {
tmp = fma(((J + J) * sinh(l)), fma((K * K), -0.125, 1.0), U);
} else {
tmp = fma((J + J), sinh(l), U);
}
return tmp;
}
function code(J, l, K, U) t_0 = cos(Float64(K / 2.0)) tmp = 0.0 if (t_0 <= -0.15) tmp = fma(Float64(J + J), Float64(cos(Float64(-0.5 * K)) * l), U); elseif (t_0 <= -0.004) tmp = fma(Float64(Float64(J + J) * sinh(l)), fma(Float64(K * K), -0.125, 1.0), U); else tmp = fma(Float64(J + J), sinh(l), U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t$95$0, -0.15], N[(N[(J + J), $MachinePrecision] * N[(N[Cos[N[(-0.5 * K), $MachinePrecision]], $MachinePrecision] * l), $MachinePrecision] + U), $MachinePrecision], If[LessEqual[t$95$0, -0.004], N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]]]
\begin{array}{l}
t_0 := \cos \left(\frac{K}{2}\right)\\
\mathbf{if}\;t\_0 \leq -0.15:\\
\;\;\;\;\mathsf{fma}\left(J + J, \cos \left(-0.5 \cdot K\right) \cdot \ell, U\right)\\
\mathbf{elif}\;t\_0 \leq -0.004:\\
\;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.14999999999999999Initial program 87.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
Applied rewrites99.9%
lift-fma.f64N/A
lift-*.f64N/A
associate-*l*N/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6499.9%
lift-cos.f64N/A
lift-*.f64N/A
metadata-evalN/A
distribute-lft-neg-inN/A
lift-*.f64N/A
cos-neg-revN/A
lift-cos.f6499.9%
Applied rewrites99.9%
Taylor expanded in l around 0
Applied rewrites64.6%
if -0.14999999999999999 < (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0040000000000000001Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6463.9%
Applied rewrites63.9%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6463.9%
Applied rewrites68.4%
if -0.0040000000000000001 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-exp.f64N/A
lower-exp.f64N/A
lower-neg.f6474.2%
Applied rewrites74.2%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
lift-sinh.f64N/A
associate-*r*N/A
*-commutativeN/A
count-2N/A
lift-+.f64N/A
lower-fma.f6481.1%
Applied rewrites81.1%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.004) (fma (* (+ J J) (sinh l)) (fma (* K K) -0.125 1.0) U) (fma (+ J J) (sinh l) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.004) {
tmp = fma(((J + J) * sinh(l)), fma((K * K), -0.125, 1.0), U);
} else {
tmp = fma((J + J), sinh(l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.004) tmp = fma(Float64(Float64(J + J) * sinh(l)), fma(Float64(K * K), -0.125, 1.0), U); else tmp = fma(Float64(J + J), sinh(l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.004], N[(N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision]), $MachinePrecision] * N[(N[(K * K), $MachinePrecision] * -0.125 + 1.0), $MachinePrecision] + U), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.004:\\
\;\;\;\;\mathsf{fma}\left(\left(J + J\right) \cdot \sinh \ell, \mathsf{fma}\left(K \cdot K, -0.125, 1\right), U\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.0040000000000000001Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower-pow.f6463.9%
Applied rewrites63.9%
lift-+.f64N/A
lift-*.f64N/A
lower-fma.f6463.9%
Applied rewrites68.4%
if -0.0040000000000000001 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-exp.f64N/A
lower-exp.f64N/A
lower-neg.f6474.2%
Applied rewrites74.2%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
lift-sinh.f64N/A
associate-*r*N/A
*-commutativeN/A
count-2N/A
lift-+.f64N/A
lower-fma.f6481.1%
Applied rewrites81.1%
(FPCore (J l K U) :precision binary64 (if (<= (cos (/ K 2.0)) -0.068) (+ U (/ (* J (* l (+ 2.0 (* 2.0 l)))) (exp l))) (fma (+ J J) (sinh l) U)))
double code(double J, double l, double K, double U) {
double tmp;
if (cos((K / 2.0)) <= -0.068) {
tmp = U + ((J * (l * (2.0 + (2.0 * l)))) / exp(l));
} else {
tmp = fma((J + J), sinh(l), U);
}
return tmp;
}
function code(J, l, K, U) tmp = 0.0 if (cos(Float64(K / 2.0)) <= -0.068) tmp = Float64(U + Float64(Float64(J * Float64(l * Float64(2.0 + Float64(2.0 * l)))) / exp(l))); else tmp = fma(Float64(J + J), sinh(l), U); end return tmp end
code[J_, l_, K_, U_] := If[LessEqual[N[Cos[N[(K / 2.0), $MachinePrecision]], $MachinePrecision], -0.068], N[(U + N[(N[(J * N[(l * N[(2.0 + N[(2.0 * l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Exp[l], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\cos \left(\frac{K}{2}\right) \leq -0.068:\\
\;\;\;\;U + \frac{J \cdot \left(\ell \cdot \left(2 + 2 \cdot \ell\right)\right)}{e^{\ell}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(J + J, \sinh \ell, U\right)\\
\end{array}
if (cos.f64 (/.f64 K #s(literal 2 binary64))) < -0.068000000000000005Initial program 87.0%
lift-*.f64N/A
*-commutativeN/A
lift--.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
exp-negN/A
lift-exp.f64N/A
sub-to-fractionN/A
associate-*l/N/A
lower-/.f64N/A
lower-*.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
prod-expN/A
lower-expm1.f64N/A
lower-+.f6475.8%
Applied rewrites75.8%
Taylor expanded in K around 0
lower-+.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-expm1.f64N/A
lower-*.f64N/A
lower-exp.f6462.9%
Applied rewrites62.9%
Taylor expanded in l around 0
lower-*.f64N/A
lower-+.f64N/A
lower-*.f6450.4%
Applied rewrites50.4%
if -0.068000000000000005 < (cos.f64 (/.f64 K #s(literal 2 binary64))) Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-exp.f64N/A
lower-exp.f64N/A
lower-neg.f6474.2%
Applied rewrites74.2%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
lift-sinh.f64N/A
associate-*r*N/A
*-commutativeN/A
count-2N/A
lift-+.f64N/A
lower-fma.f6481.1%
Applied rewrites81.1%
(FPCore (J l K U) :precision binary64 (fma (+ J J) (sinh l) U))
double code(double J, double l, double K, double U) {
return fma((J + J), sinh(l), U);
}
function code(J, l, K, U) return fma(Float64(J + J), sinh(l), U) end
code[J_, l_, K_, U_] := N[(N[(J + J), $MachinePrecision] * N[Sinh[l], $MachinePrecision] + U), $MachinePrecision]
\mathsf{fma}\left(J + J, \sinh \ell, U\right)
Initial program 87.0%
Taylor expanded in K around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-exp.f64N/A
lower-exp.f64N/A
lower-neg.f6474.2%
Applied rewrites74.2%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift--.f64N/A
lift-exp.f64N/A
lift-exp.f64N/A
lift-neg.f64N/A
sinh-undefN/A
lift-sinh.f64N/A
associate-*r*N/A
*-commutativeN/A
count-2N/A
lift-+.f64N/A
lower-fma.f6481.1%
Applied rewrites81.1%
(FPCore (J l K U) :precision binary64 (let* ((t_0 (* (* l J) 2.0))) (if (<= l -6.5e-23) (/ (* (+ t_0 U) U) U) (fma (/ t_0 U) U U))))
double code(double J, double l, double K, double U) {
double t_0 = (l * J) * 2.0;
double tmp;
if (l <= -6.5e-23) {
tmp = ((t_0 + U) * U) / U;
} else {
tmp = fma((t_0 / U), U, U);
}
return tmp;
}
function code(J, l, K, U) t_0 = Float64(Float64(l * J) * 2.0) tmp = 0.0 if (l <= -6.5e-23) tmp = Float64(Float64(Float64(t_0 + U) * U) / U); else tmp = fma(Float64(t_0 / U), U, U); end return tmp end
code[J_, l_, K_, U_] := Block[{t$95$0 = N[(N[(l * J), $MachinePrecision] * 2.0), $MachinePrecision]}, If[LessEqual[l, -6.5e-23], N[(N[(N[(t$95$0 + U), $MachinePrecision] * U), $MachinePrecision] / U), $MachinePrecision], N[(N[(t$95$0 / U), $MachinePrecision] * U + U), $MachinePrecision]]]
\begin{array}{l}
t_0 := \left(\ell \cdot J\right) \cdot 2\\
\mathbf{if}\;\ell \leq -6.5 \cdot 10^{-23}:\\
\;\;\;\;\frac{\left(t\_0 + U\right) \cdot U}{U}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{t\_0}{U}, U, U\right)\\
\end{array}
if l < -6.5e-23Initial program 87.0%
Taylor expanded in l around 0
lower-*.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6464.6%
Applied rewrites64.6%
Taylor expanded in K around 0
lower-*.f6454.9%
Applied rewrites54.9%
lift-+.f64N/A
+-commutativeN/A
sum-to-multN/A
lower-unsound-*.f64N/A
Applied rewrites58.2%
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
add-to-fractionN/A
associate-*l/N/A
lower-/.f64N/A
Applied rewrites42.4%
if -6.5e-23 < l Initial program 87.0%
Taylor expanded in l around 0
lower-*.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6464.6%
Applied rewrites64.6%
Taylor expanded in K around 0
lower-*.f6454.9%
Applied rewrites54.9%
lift-+.f64N/A
+-commutativeN/A
sum-to-multN/A
lower-unsound-*.f64N/A
Applied rewrites58.2%
lift-*.f64N/A
*-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6458.2%
Applied rewrites58.2%
(FPCore (J l K U) :precision binary64 (fma (/ (* (* l J) 2.0) U) U U))
double code(double J, double l, double K, double U) {
return fma((((l * J) * 2.0) / U), U, U);
}
function code(J, l, K, U) return fma(Float64(Float64(Float64(l * J) * 2.0) / U), U, U) end
code[J_, l_, K_, U_] := N[(N[(N[(N[(l * J), $MachinePrecision] * 2.0), $MachinePrecision] / U), $MachinePrecision] * U + U), $MachinePrecision]
\mathsf{fma}\left(\frac{\left(\ell \cdot J\right) \cdot 2}{U}, U, U\right)
Initial program 87.0%
Taylor expanded in l around 0
lower-*.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6464.6%
Applied rewrites64.6%
Taylor expanded in K around 0
lower-*.f6454.9%
Applied rewrites54.9%
lift-+.f64N/A
+-commutativeN/A
sum-to-multN/A
lower-unsound-*.f64N/A
Applied rewrites58.2%
lift-*.f64N/A
*-commutativeN/A
lift-+.f64N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f6458.2%
Applied rewrites58.2%
(FPCore (J l K U) :precision binary64 (+ (* 2.0 (* J l)) U))
double code(double J, double l, double K, double U) {
return (2.0 * (J * l)) + U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = (2.0d0 * (j * l)) + u
end function
public static double code(double J, double l, double K, double U) {
return (2.0 * (J * l)) + U;
}
def code(J, l, K, U): return (2.0 * (J * l)) + U
function code(J, l, K, U) return Float64(Float64(2.0 * Float64(J * l)) + U) end
function tmp = code(J, l, K, U) tmp = (2.0 * (J * l)) + U; end
code[J_, l_, K_, U_] := N[(N[(2.0 * N[(J * l), $MachinePrecision]), $MachinePrecision] + U), $MachinePrecision]
2 \cdot \left(J \cdot \ell\right) + U
Initial program 87.0%
Taylor expanded in l around 0
lower-*.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-*.f6464.6%
Applied rewrites64.6%
Taylor expanded in K around 0
lower-*.f6454.9%
Applied rewrites54.9%
(FPCore (J l K U) :precision binary64 U)
double code(double J, double l, double K, double U) {
return U;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(j, l, k, u)
use fmin_fmax_functions
real(8), intent (in) :: j
real(8), intent (in) :: l
real(8), intent (in) :: k
real(8), intent (in) :: u
code = u
end function
public static double code(double J, double l, double K, double U) {
return U;
}
def code(J, l, K, U): return U
function code(J, l, K, U) return U end
function tmp = code(J, l, K, U) tmp = U; end
code[J_, l_, K_, U_] := U
U
Initial program 87.0%
Taylor expanded in J around 0
Applied rewrites37.8%
herbie shell --seed 2025187
(FPCore (J l K U)
:name "Maksimov and Kolovsky, Equation (4)"
:precision binary64
(+ (* (* J (- (exp l) (exp (- l)))) (cos (/ K 2.0))) U))