
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
(FPCore (N)
:precision binary64
(let* ((t_0
(fma
(atan N)
(atan2 (+ N (+ N 1.0)) (- 1.0 (fma N N N)))
(pow (atan (+ N 1.0)) 2.0))))
(* t_0 (/ (atan2 1.0 (fma N N (+ N 1.0))) t_0))))
double code(double N) {
double t_0 = fma(atan(N), atan2((N + (N + 1.0)), (1.0 - fma(N, N, N))), pow(atan((N + 1.0)), 2.0));
return t_0 * (atan2(1.0, fma(N, N, (N + 1.0))) / t_0);
}
function code(N) t_0 = fma(atan(N), atan(Float64(N + Float64(N + 1.0)), Float64(1.0 - fma(N, N, N))), (atan(Float64(N + 1.0)) ^ 2.0)) return Float64(t_0 * Float64(atan(1.0, fma(N, N, Float64(N + 1.0))) / t_0)) end
code[N_] := Block[{t$95$0 = N[(N[ArcTan[N], $MachinePrecision] * N[ArcTan[N[(N + N[(N + 1.0), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N * N + N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + N[Power[N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]}, N[(t$95$0 * N[(N[ArcTan[1.0 / N[(N * N + N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\tan^{-1} N, \tan^{-1}_* \frac{N + \left(N + 1\right)}{1 - \mathsf{fma}\left(N, N, N\right)}, {\tan^{-1} \left(N + 1\right)}^{2}\right)\\
t\_0 \cdot \frac{\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, N, N + 1\right)}}{t\_0}
\end{array}
\end{array}
Initial program 9.5%
Applied egg-rr99.5%
(FPCore (N) :precision binary64 (atan2 1.0 (fma N N (+ N 1.0))))
double code(double N) {
return atan2(1.0, fma(N, N, (N + 1.0)));
}
function code(N) return atan(1.0, fma(N, N, Float64(N + 1.0))) end
code[N_] := N[ArcTan[1.0 / N[(N * N + N[(N + 1.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, N, N + 1\right)}
\end{array}
Initial program 9.5%
lift-+.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
lift-+.f64N/A
lower-fma.f6499.5
Applied egg-rr99.5%
(FPCore (N) :precision binary64 (atan2 1.0 (fma N N N)))
double code(double N) {
return atan2(1.0, fma(N, N, N));
}
function code(N) return atan(1.0, fma(N, N, N)) end
code[N_] := N[ArcTan[1.0 / N[(N * N + N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, N, N\right)}
\end{array}
Initial program 9.5%
lift-+.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
lift-+.f64N/A
lower-fma.f6499.5
Applied egg-rr99.5%
Taylor expanded in N around inf
distribute-lft-inN/A
unpow2N/A
associate-*l*N/A
rgt-mult-inverseN/A
*-rgt-identityN/A
*-rgt-identityN/A
unpow2N/A
lower-fma.f6495.6
Simplified95.6%
(FPCore (N) :precision binary64 (atan2 1.0 (* N N)))
double code(double N) {
return atan2(1.0, (N * N));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, (n * n))
end function
public static double code(double N) {
return Math.atan2(1.0, (N * N));
}
def code(N): return math.atan2(1.0, (N * N))
function code(N) return atan(1.0, Float64(N * N)) end
function tmp = code(N) tmp = atan2(1.0, (N * N)); end
code[N_] := N[ArcTan[1.0 / N[(N * N), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{N \cdot N}
\end{array}
Initial program 9.5%
lift-+.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
lift-+.f64N/A
lower-fma.f6499.5
Applied egg-rr99.5%
Taylor expanded in N around inf
unpow2N/A
lower-*.f6492.2
Simplified92.2%
(FPCore (N) :precision binary64 (atan2 1.0 (+ N 1.0)))
double code(double N) {
return atan2(1.0, (N + 1.0));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, (n + 1.0d0))
end function
public static double code(double N) {
return Math.atan2(1.0, (N + 1.0));
}
def code(N): return math.atan2(1.0, (N + 1.0))
function code(N) return atan(1.0, Float64(N + 1.0)) end
function tmp = code(N) tmp = atan2(1.0, (N + 1.0)); end
code[N_] := N[ArcTan[1.0 / N[(N + 1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{N + 1}
\end{array}
Initial program 9.5%
lift-+.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
lift-+.f64N/A
lower-fma.f6499.5
Applied egg-rr99.5%
Taylor expanded in N around 0
+-commutativeN/A
lower-+.f648.0
Simplified8.0%
(FPCore (N) :precision binary64 (atan2 1.0 1.0))
double code(double N) {
return atan2(1.0, 1.0);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, 1.0d0)
end function
public static double code(double N) {
return Math.atan2(1.0, 1.0);
}
def code(N): return math.atan2(1.0, 1.0)
function code(N) return atan(1.0, 1.0) end
function tmp = code(N) tmp = atan2(1.0, 1.0); end
code[N_] := N[ArcTan[1.0 / 1.0], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{1}
\end{array}
Initial program 9.5%
lift-+.f64N/A
diff-atanN/A
lift-+.f64N/A
+-commutativeN/A
associate--l+N/A
+-inversesN/A
metadata-evalN/A
+-commutativeN/A
lift-+.f64N/A
distribute-lft1-inN/A
associate-+r+N/A
+-commutativeN/A
metadata-evalN/A
*-rgt-identityN/A
lower-atan2.f64N/A
metadata-evalN/A
*-rgt-identityN/A
+-commutativeN/A
lift-+.f64N/A
lower-fma.f6499.5
Applied egg-rr99.5%
Taylor expanded in N around 0
Simplified6.5%
(FPCore (N) :precision binary64 (atan (/ 1.0 (+ 1.0 (* N (+ N 1.0))))))
double code(double N) {
return atan((1.0 / (1.0 + (N * (N + 1.0)))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((1.0d0 / (1.0d0 + (n * (n + 1.0d0)))))
end function
public static double code(double N) {
return Math.atan((1.0 / (1.0 + (N * (N + 1.0)))));
}
def code(N): return math.atan((1.0 / (1.0 + (N * (N + 1.0)))))
function code(N) return atan(Float64(1.0 / Float64(1.0 + Float64(N * Float64(N + 1.0))))) end
function tmp = code(N) tmp = atan((1.0 / (1.0 + (N * (N + 1.0))))); end
code[N_] := N[ArcTan[N[(1.0 / N[(1.0 + N[(N * N[(N + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(\frac{1}{1 + N \cdot \left(N + 1\right)}\right)
\end{array}
(FPCore (N) :precision binary64 (atan2 1.0 (fma N (+ 1.0 N) 1.0)))
double code(double N) {
return atan2(1.0, fma(N, (1.0 + N), 1.0));
}
function code(N) return atan(1.0, fma(N, Float64(1.0 + N), 1.0)) end
code[N_] := N[ArcTan[1.0 / N[(N * N[(1.0 + N), $MachinePrecision] + 1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\mathsf{fma}\left(N, 1 + N, 1\right)}
\end{array}
herbie shell --seed 2024215
(FPCore (N)
:name "2atan (example 3.5)"
:precision binary64
:pre (and (> N 1.0) (< N 1e+100))
:alt
(! :herbie-platform default (atan (/ 1 (+ 1 (* N (+ N 1))))))
:alt
(! :herbie-platform default (atan2 1 (fma N (+ 1 N) 1)))
(- (atan (+ N 1.0)) (atan N)))