
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.005) (fma (* x_m x_m) -0.041666666666666664 0.5) (/ (fma (cos x_m) (/ -1.0 x_m) (/ 1.0 x_m)) x_m)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.005) {
tmp = fma((x_m * x_m), -0.041666666666666664, 0.5);
} else {
tmp = fma(cos(x_m), (-1.0 / x_m), (1.0 / x_m)) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.005) tmp = fma(Float64(x_m * x_m), -0.041666666666666664, 0.5); else tmp = Float64(fma(cos(x_m), Float64(-1.0 / x_m), Float64(1.0 / x_m)) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.005], N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.041666666666666664 + 0.5), $MachinePrecision], N[(N[(N[Cos[x$95$m], $MachinePrecision] * N[(-1.0 / x$95$m), $MachinePrecision] + N[(1.0 / x$95$m), $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.005:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, -0.041666666666666664, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\cos x\_m, \frac{-1}{x\_m}, \frac{1}{x\_m}\right)}{x\_m}\\
\end{array}
\end{array}
if x < 0.0050000000000000001Initial program 2.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6499.9
Applied rewrites99.9%
if 0.0050000000000000001 < x Initial program 98.2%
lift-*.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-cos.f64N/A
lift--.f6499.2
Applied rewrites99.2%
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
*-lft-identityN/A
cancel-sign-sub-invN/A
metadata-evalN/A
mul-1-negN/A
sub0-negN/A
+-commutativeN/A
div-add-revN/A
metadata-evalN/A
associate-/l*N/A
sub0-negN/A
mul-1-negN/A
*-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lift-cos.f64N/A
lower-/.f64N/A
associate-/l*N/A
metadata-evalN/A
lower-/.f6499.2
Applied rewrites99.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.005) (fma (* x_m x_m) -0.041666666666666664 0.5) (* (/ (- 1.0 (cos x_m)) x_m) (/ 1.0 x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.005) {
tmp = fma((x_m * x_m), -0.041666666666666664, 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) * (1.0 / x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.005) tmp = fma(Float64(x_m * x_m), -0.041666666666666664, 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) * Float64(1.0 / x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.005], N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.041666666666666664 + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] * N[(1.0 / x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.005:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, -0.041666666666666664, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m} \cdot \frac{1}{x\_m}\\
\end{array}
\end{array}
if x < 0.0050000000000000001Initial program 2.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6499.9
Applied rewrites99.9%
if 0.0050000000000000001 < x Initial program 98.2%
lift-*.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-cos.f64N/A
lift--.f6499.2
Applied rewrites99.2%
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
*-lft-identityN/A
cancel-sign-sub-invN/A
metadata-evalN/A
mul-1-negN/A
sub0-negN/A
+-commutativeN/A
div-add-revN/A
metadata-evalN/A
associate-/l*N/A
sub0-negN/A
mul-1-negN/A
*-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
lift-cos.f64N/A
lower-/.f64N/A
associate-/l*N/A
metadata-evalN/A
lower-/.f6499.2
Applied rewrites99.2%
Applied rewrites99.1%
lift-*.f64N/A
lift-/.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-cos.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-/.f6499.1
Applied rewrites99.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.005) (fma (* x_m x_m) -0.041666666666666664 0.5) (/ (/ (- 1.0 (cos x_m)) x_m) x_m)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.005) {
tmp = fma((x_m * x_m), -0.041666666666666664, 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.005) tmp = fma(Float64(x_m * x_m), -0.041666666666666664, 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.005], N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.041666666666666664 + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.005:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, -0.041666666666666664, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.0050000000000000001Initial program 2.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6499.9
Applied rewrites99.9%
if 0.0050000000000000001 < x Initial program 98.2%
lift-*.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-cos.f64N/A
lift--.f6499.2
Applied rewrites99.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.005) (fma (* x_m x_m) -0.041666666666666664 0.5) (/ (- 1.0 (cos x_m)) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.005) {
tmp = fma((x_m * x_m), -0.041666666666666664, 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.005) tmp = fma(Float64(x_m * x_m), -0.041666666666666664, 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.005], N[(N[(x$95$m * x$95$m), $MachinePrecision] * -0.041666666666666664 + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.005:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, -0.041666666666666664, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.0050000000000000001Initial program 2.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6499.9
Applied rewrites99.9%
if 0.0050000000000000001 < x Initial program 98.2%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 110000.0)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(fma (/ 1.0 x_m) (/ 1.0 x_m) (* (- (/ 1.0 (* (* x_m x_m) x_m))) x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 110000.0) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = fma((1.0 / x_m), (1.0 / x_m), (-(1.0 / ((x_m * x_m) * x_m)) * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 110000.0) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = fma(Float64(1.0 / x_m), Float64(1.0 / x_m), Float64(Float64(-Float64(1.0 / Float64(Float64(x_m * x_m) * x_m))) * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 110000.0], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 / x$95$m), $MachinePrecision] * N[(1.0 / x$95$m), $MachinePrecision] + N[((-N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]) * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 110000:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{1}{x\_m}, \frac{1}{x\_m}, \left(-\frac{1}{\left(x\_m \cdot x\_m\right) \cdot x\_m}\right) \cdot x\_m\right)\\
\end{array}
\end{array}
if x < 1.1e5Initial program 4.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6498.6
Applied rewrites98.6%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6498.6
Applied rewrites98.6%
if 1.1e5 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites52.6%
lift-*.f64N/A
lift-/.f64N/A
pow2N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
pow2N/A
frac-timesN/A
associate-*l/N/A
frac-subN/A
associate-/l*N/A
metadata-evalN/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
inv-powN/A
metadata-evalN/A
associate-/l*N/A
pow2N/A
Applied rewrites2.9%
Applied rewrites55.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 3.3e+40)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(+ (/ 1.0 (* x_m x_m)) (* (- (/ 1.0 (* (* x_m x_m) x_m))) x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.3e+40) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 / (x_m * x_m)) + (-(1.0 / ((x_m * x_m) * x_m)) * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.3e+40) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 / Float64(x_m * x_m)) + Float64(Float64(-Float64(1.0 / Float64(Float64(x_m * x_m) * x_m))) * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.3e+40], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] + N[((-N[(1.0 / N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]) * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.3 \cdot 10^{+40}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x\_m \cdot x\_m} + \left(-\frac{1}{\left(x\_m \cdot x\_m\right) \cdot x\_m}\right) \cdot x\_m\\
\end{array}
\end{array}
if x < 3.2999999999999998e40Initial program 14.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6488.8
Applied rewrites88.8%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6488.8
Applied rewrites88.8%
if 3.2999999999999998e40 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites59.0%
lift-*.f64N/A
lift-/.f64N/A
pow2N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
pow2N/A
frac-timesN/A
associate-*l/N/A
frac-subN/A
associate-/l*N/A
metadata-evalN/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
inv-powN/A
metadata-evalN/A
associate-/l*N/A
pow2N/A
Applied rewrites2.7%
lift--.f64N/A
Applied rewrites61.4%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 3.7e+27)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(- (/ (/ 1.0 x_m) x_m) (/ (* x_m 1.0) (* (* x_m x_m) x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.7e+27) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 / x_m) / x_m) - ((x_m * 1.0) / ((x_m * x_m) * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.7e+27) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 / x_m) / x_m) - Float64(Float64(x_m * 1.0) / Float64(Float64(x_m * x_m) * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.7e+27], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision] - N[(N[(x$95$m * 1.0), $MachinePrecision] / N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.7 \cdot 10^{+27}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1}{x\_m}}{x\_m} - \frac{x\_m \cdot 1}{\left(x\_m \cdot x\_m\right) \cdot x\_m}\\
\end{array}
\end{array}
if x < 3.70000000000000002e27Initial program 11.2%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6492.1
Applied rewrites92.1%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6492.1
Applied rewrites92.1%
if 3.70000000000000002e27 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites56.6%
lift-*.f64N/A
lift-/.f64N/A
pow2N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
pow2N/A
frac-timesN/A
associate-*l/N/A
frac-subN/A
associate-/l*N/A
metadata-evalN/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
inv-powN/A
metadata-evalN/A
associate-/l*N/A
pow2N/A
Applied rewrites2.8%
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
pow2N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
associate-/l*N/A
inv-powN/A
lift-*.f64N/A
lift-*.f64N/A
pow3N/A
pow-divN/A
metadata-evalN/A
pow-prod-downN/A
pow2N/A
inv-powN/A
pow2N/A
associate-/r*N/A
lower-/.f64N/A
Applied rewrites59.0%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 3.3e+40)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(- (/ 1.0 (* x_m x_m)) (/ (* x_m 1.0) (* (* x_m x_m) x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.3e+40) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 / (x_m * x_m)) - ((x_m * 1.0) / ((x_m * x_m) * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.3e+40) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 / Float64(x_m * x_m)) - Float64(Float64(x_m * 1.0) / Float64(Float64(x_m * x_m) * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.3e+40], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] - N[(N[(x$95$m * 1.0), $MachinePrecision] / N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.3 \cdot 10^{+40}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x\_m \cdot x\_m} - \frac{x\_m \cdot 1}{\left(x\_m \cdot x\_m\right) \cdot x\_m}\\
\end{array}
\end{array}
if x < 3.2999999999999998e40Initial program 14.4%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6488.8
Applied rewrites88.8%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6488.8
Applied rewrites88.8%
if 3.2999999999999998e40 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites59.0%
lift-*.f64N/A
lift-/.f64N/A
pow2N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
pow2N/A
frac-timesN/A
associate-*l/N/A
frac-subN/A
associate-/l*N/A
metadata-evalN/A
inv-powN/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
inv-powN/A
metadata-evalN/A
associate-/l*N/A
pow2N/A
Applied rewrites2.7%
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
pow2N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
associate-/l*N/A
inv-powN/A
lift-*.f64N/A
lift-*.f64N/A
pow3N/A
pow-divN/A
metadata-evalN/A
pow-prod-downN/A
pow2N/A
inv-powN/A
lower-/.f64N/A
pow2N/A
lift-*.f6461.4
Applied rewrites61.4%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 19000000000000.0)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(fma (/ 1.0 x_m) (/ -1.0 x_m) (/ 1.0 (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 19000000000000.0) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = fma((1.0 / x_m), (-1.0 / x_m), (1.0 / (x_m * x_m)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 19000000000000.0) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = fma(Float64(1.0 / x_m), Float64(-1.0 / x_m), Float64(1.0 / Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 19000000000000.0], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 / x$95$m), $MachinePrecision] * N[(-1.0 / x$95$m), $MachinePrecision] + N[(1.0 / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 19000000000000:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{1}{x\_m}, \frac{-1}{x\_m}, \frac{1}{x\_m \cdot x\_m}\right)\\
\end{array}
\end{array}
if x < 1.9e13Initial program 7.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6496.2
Applied rewrites96.2%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6496.2
Applied rewrites96.2%
if 1.9e13 < x Initial program 98.3%
lift-*.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-cos.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-cos.f64N/A
lift--.f6499.3
Applied rewrites99.3%
Applied rewrites98.0%
Taylor expanded in x around 0
lift-/.f6454.6
Applied rewrites54.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 1.36e+31)
(fma
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- 1.0 (* x_m (/ 1.0 x_m))) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.36e+31) {
tmp = fma(fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - (x_m * (1.0 / x_m))) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.36e+31) tmp = fma(fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - Float64(x_m * Float64(1.0 / x_m))) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.36e+31], N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[(x$95$m * N[(1.0 / x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.36 \cdot 10^{+31}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x\_m \cdot \frac{1}{x\_m}}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 1.3600000000000001e31Initial program 12.1%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6491.2
Applied rewrites91.2%
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
metadata-evalN/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
metadata-evalN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6491.2
Applied rewrites91.2%
if 1.3600000000000001e31 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites57.2%
lift-*.f64N/A
lift-/.f64N/A
associate-/r*N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
associate-/l*N/A
sub-divN/A
frac-subN/A
pow2N/A
lower-/.f64N/A
Applied rewrites57.5%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 2.7e+48) 0.5 (/ (- 1.0 (* x_m (/ 1.0 x_m))) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.7e+48) {
tmp = 0.5;
} else {
tmp = (1.0 - (x_m * (1.0 / x_m))) / (x_m * x_m);
}
return tmp;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 2.7d+48) then
tmp = 0.5d0
else
tmp = (1.0d0 - (x_m * (1.0d0 / x_m))) / (x_m * x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 2.7e+48) {
tmp = 0.5;
} else {
tmp = (1.0 - (x_m * (1.0 / x_m))) / (x_m * x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 2.7e+48: tmp = 0.5 else: tmp = (1.0 - (x_m * (1.0 / x_m))) / (x_m * x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.7e+48) tmp = 0.5; else tmp = Float64(Float64(1.0 - Float64(x_m * Float64(1.0 / x_m))) / Float64(x_m * x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 2.7e+48) tmp = 0.5; else tmp = (1.0 - (x_m * (1.0 / x_m))) / (x_m * x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.7e+48], 0.5, N[(N[(1.0 - N[(x$95$m * N[(1.0 / x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 2.7 \cdot 10^{+48}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - x\_m \cdot \frac{1}{x\_m}}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 2.70000000000000004e48Initial program 16.5%
Taylor expanded in x around 0
Applied rewrites86.3%
if 2.70000000000000004e48 < x Initial program 98.3%
Taylor expanded in x around 0
Applied rewrites60.8%
lift-*.f64N/A
lift-/.f64N/A
associate-/r*N/A
lift--.f64N/A
div-subN/A
metadata-evalN/A
associate-/l*N/A
sub-divN/A
frac-subN/A
pow2N/A
lower-/.f64N/A
Applied rewrites61.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1e+77) 0.5 (/ (- 1.0 1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1d+77) then
tmp = 0.5d0
else
tmp = (1.0d0 - 1.0d0) / (x_m * x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1e+77: tmp = 0.5 else: tmp = (1.0 - 1.0) / (x_m * x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1e+77) tmp = 0.5; else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1e+77) tmp = 0.5; else tmp = (1.0 - 1.0) / (x_m * x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1e+77], 0.5, N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 10^{+77}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 9.99999999999999983e76Initial program 22.4%
Taylor expanded in x around 0
Applied rewrites80.5%
if 9.99999999999999983e76 < x Initial program 98.2%
Taylor expanded in x around 0
Applied rewrites67.4%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.5)
x_m = fabs(x);
double code(double x_m) {
return 0.5;
}
x_m = private
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x_m)
use fmin_fmax_functions
real(8), intent (in) :: x_m
code = 0.5d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.5;
}
x_m = math.fabs(x) def code(x_m): return 0.5
x_m = abs(x) function code(x_m) return 0.5 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.5; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.5
\begin{array}{l}
x_m = \left|x\right|
\\
0.5
\end{array}
Initial program 51.5%
Taylor expanded in x around 0
Applied rewrites50.8%
herbie shell --seed 2025130
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))