
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(n)
use fmin_fmax_functions
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(n)
use fmin_fmax_functions
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
(FPCore (N) :precision binary64 (atan2 1.0 (fma (+ 1.0 N) N 1.0)))
double code(double N) {
return atan2(1.0, fma((1.0 + N), N, 1.0));
}
function code(N) return atan(1.0, fma(Float64(1.0 + N), N, 1.0)) end
code[N_] := N[ArcTan[1.0 / N[(N[(1.0 + N), $MachinePrecision] * N + 1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(1 + N, N, 1\right)}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
(FPCore (N) :precision binary64 (atan2 1.0 (+ (fma N N 1.0) N)))
double code(double N) {
return atan2(1.0, (fma(N, N, 1.0) + N));
}
function code(N) return atan(1.0, Float64(fma(N, N, 1.0) + N)) end
code[N_] := N[ArcTan[1.0 / N[(N[(N * N + 1.0), $MachinePrecision] + N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, N, 1\right) + N}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
lift-fma.f64N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
metadata-evalN/A
*-rgt-identityN/A
lift-+.f64N/A
+-commutativeN/A
distribute-lft1-inN/A
associate-+r+N/A
lower-+.f64N/A
+-commutativeN/A
lower-fma.f6499.6
Applied rewrites99.6%
(FPCore (N) :precision binary64 (atan2 1.0 (fma N N N)))
double code(double N) {
return atan2(1.0, fma(N, N, N));
}
function code(N) return atan(1.0, fma(N, N, N)) end
code[N_] := N[ArcTan[1.0 / N[(N * N + N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, N, N\right)}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in N around inf
+-commutativeN/A
distribute-lft-inN/A
unpow2N/A
associate-*l*N/A
rgt-mult-inverseN/A
*-rgt-identityN/A
*-rgt-identityN/A
+-commutativeN/A
unpow2N/A
lower-fma.f6497.2
Applied rewrites97.2%
(FPCore (N) :precision binary64 (atan2 1.0 (* N N)))
double code(double N) {
return atan2(1.0, (N * N));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(n)
use fmin_fmax_functions
real(8), intent (in) :: n
code = atan2(1.0d0, (n * n))
end function
public static double code(double N) {
return Math.atan2(1.0, (N * N));
}
def code(N): return math.atan2(1.0, (N * N))
function code(N) return atan(1.0, Float64(N * N)) end
function tmp = code(N) tmp = atan2(1.0, (N * N)); end
code[N_] := N[ArcTan[1.0 / N[(N * N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{N \cdot N}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in N around inf
unpow2N/A
lower-*.f6493.8
Applied rewrites93.8%
(FPCore (N) :precision binary64 (atan2 1.0 (- N -1.0)))
double code(double N) {
return atan2(1.0, (N - -1.0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(n)
use fmin_fmax_functions
real(8), intent (in) :: n
code = atan2(1.0d0, (n - (-1.0d0)))
end function
public static double code(double N) {
return Math.atan2(1.0, (N - -1.0));
}
def code(N): return math.atan2(1.0, (N - -1.0))
function code(N) return atan(1.0, Float64(N - -1.0)) end
function tmp = code(N) tmp = atan2(1.0, (N - -1.0)); end
code[N_] := N[ArcTan[1.0 / N[(N - -1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{N - -1}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in N around 0
*-rgt-identityN/A
+-commutativeN/A
distribute-lft1-inN/A
rgt-mult-inverseN/A
*-rgt-identityN/A
*-commutativeN/A
cancel-sign-subN/A
distribute-lft-neg-outN/A
lft-mult-inverseN/A
metadata-evalN/A
lower--.f647.9
Applied rewrites7.9%
(FPCore (N) :precision binary64 (atan2 1.0 1.0))
double code(double N) {
return atan2(1.0, 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(n)
use fmin_fmax_functions
real(8), intent (in) :: n
code = atan2(1.0d0, 1.0d0)
end function
public static double code(double N) {
return Math.atan2(1.0, 1.0);
}
def code(N): return math.atan2(1.0, 1.0)
function code(N) return atan(1.0, 1.0) end
function tmp = code(N) tmp = atan2(1.0, 1.0); end
code[N_] := N[ArcTan[1.0 / 1.0], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{1}
\end{array}
Initial program 7.7%
lift--.f64N/A
lift-atan.f64N/A
lift-atan.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
associate-+r+N/A
distribute-lft1-inN/A
lift-+.f64N/A
lower-fma.f6499.6
Applied rewrites99.6%
Taylor expanded in N around 0
Applied rewrites6.3%
(FPCore (N) :precision binary64 (atan2 1.0 (fma N (+ 1.0 N) 1.0)))
double code(double N) {
return atan2(1.0, fma(N, (1.0 + N), 1.0));
}
function code(N) return atan(1.0, fma(N, Float64(1.0 + N), 1.0)) end
code[N_] := N[ArcTan[1.0 / N[(N * N[(1.0 + N), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, 1 + N, 1\right)}
\end{array}
herbie shell --seed 2024359
(FPCore (N)
:name "2atan (example 3.5)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+100))
:alt
(! :herbie-platform default (atan2 1 (fma N (+ 1 N) 1)))
(- (atan (+ N 1.0)) (atan N)))