
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (N) :precision binary64 (- (atan (+ N 1.0)) (atan N)))
double code(double N) {
return atan((N + 1.0)) - atan(N);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((n + 1.0d0)) - atan(n)
end function
public static double code(double N) {
return Math.atan((N + 1.0)) - Math.atan(N);
}
def code(N): return math.atan((N + 1.0)) - math.atan(N)
function code(N) return Float64(atan(Float64(N + 1.0)) - atan(N)) end
function tmp = code(N) tmp = atan((N + 1.0)) - atan(N); end
code[N_] := N[(N[ArcTan[N[(N + 1.0), $MachinePrecision]], $MachinePrecision] - N[ArcTan[N], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(N + 1\right) - \tan^{-1} N
\end{array}
(FPCore (N) :precision binary64 (atan2 1.0 (+ (+ 1.0 N) (* N N))))
double code(double N) {
return atan2(1.0, ((1.0 + N) + (N * N)));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, ((1.0d0 + n) + (n * n)))
end function
public static double code(double N) {
return Math.atan2(1.0, ((1.0 + N) + (N * N)));
}
def code(N): return math.atan2(1.0, ((1.0 + N) + (N * N)))
function code(N) return atan(1.0, Float64(Float64(1.0 + N) + Float64(N * N))) end
function tmp = code(N) tmp = atan2(1.0, ((1.0 + N) + (N * N))); end
code[N_] := N[ArcTan[1.0 / N[(N[(1.0 + N), $MachinePrecision] + N[(N * N), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{\left(1 + N\right) + N \cdot N}
\end{array}
Initial program 76.7%
diff-atan77.2%
associate--l+77.2%
+-commutative77.2%
*-commutative77.2%
fma-def77.2%
Applied egg-rr77.2%
+-commutative77.2%
associate-+l-99.1%
+-inverses99.1%
metadata-eval99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in N around 0 99.1%
associate-+r+99.1%
unpow299.1%
Simplified99.1%
Final simplification99.1%
(FPCore (N) :precision binary64 (if (or (<= N -1.0) (not (<= N 1.0))) (atan2 1.0 (* N N)) (atan2 1.0 1.0)))
double code(double N) {
double tmp;
if ((N <= -1.0) || !(N <= 1.0)) {
tmp = atan2(1.0, (N * N));
} else {
tmp = atan2(1.0, 1.0);
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if ((n <= (-1.0d0)) .or. (.not. (n <= 1.0d0))) then
tmp = atan2(1.0d0, (n * n))
else
tmp = atan2(1.0d0, 1.0d0)
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if ((N <= -1.0) || !(N <= 1.0)) {
tmp = Math.atan2(1.0, (N * N));
} else {
tmp = Math.atan2(1.0, 1.0);
}
return tmp;
}
def code(N): tmp = 0 if (N <= -1.0) or not (N <= 1.0): tmp = math.atan2(1.0, (N * N)) else: tmp = math.atan2(1.0, 1.0) return tmp
function code(N) tmp = 0.0 if ((N <= -1.0) || !(N <= 1.0)) tmp = atan(1.0, Float64(N * N)); else tmp = atan(1.0, 1.0); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if ((N <= -1.0) || ~((N <= 1.0))) tmp = atan2(1.0, (N * N)); else tmp = atan2(1.0, 1.0); end tmp_2 = tmp; end
code[N_] := If[Or[LessEqual[N, -1.0], N[Not[LessEqual[N, 1.0]], $MachinePrecision]], N[ArcTan[1.0 / N[(N * N), $MachinePrecision]], $MachinePrecision], N[ArcTan[1.0 / 1.0], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq -1 \lor \neg \left(N \leq 1\right):\\
\;\;\;\;\tan^{-1}_* \frac{1}{N \cdot N}\\
\mathbf{else}:\\
\;\;\;\;\tan^{-1}_* \frac{1}{1}\\
\end{array}
\end{array}
if N < -1 or 1 < N Initial program 53.1%
diff-atan54.0%
associate--l+54.0%
+-commutative54.0%
*-commutative54.0%
fma-def54.0%
Applied egg-rr54.0%
+-commutative54.0%
associate-+l-98.3%
+-inverses98.3%
metadata-eval98.3%
+-commutative98.3%
Simplified98.3%
Taylor expanded in N around inf 97.6%
unpow297.6%
Simplified97.6%
if -1 < N < 1Initial program 100.0%
diff-atan100.0%
associate--l+99.9%
+-commutative99.9%
*-commutative99.9%
fma-def99.9%
Applied egg-rr99.9%
+-commutative99.9%
associate-+l-100.0%
+-inverses100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in N around 0 97.7%
Final simplification97.7%
(FPCore (N) :precision binary64 (if (or (<= N -0.62) (not (<= N 1.6))) (atan2 1.0 (* N N)) (atan2 1.0 (+ 1.0 N))))
double code(double N) {
double tmp;
if ((N <= -0.62) || !(N <= 1.6)) {
tmp = atan2(1.0, (N * N));
} else {
tmp = atan2(1.0, (1.0 + N));
}
return tmp;
}
real(8) function code(n)
real(8), intent (in) :: n
real(8) :: tmp
if ((n <= (-0.62d0)) .or. (.not. (n <= 1.6d0))) then
tmp = atan2(1.0d0, (n * n))
else
tmp = atan2(1.0d0, (1.0d0 + n))
end if
code = tmp
end function
public static double code(double N) {
double tmp;
if ((N <= -0.62) || !(N <= 1.6)) {
tmp = Math.atan2(1.0, (N * N));
} else {
tmp = Math.atan2(1.0, (1.0 + N));
}
return tmp;
}
def code(N): tmp = 0 if (N <= -0.62) or not (N <= 1.6): tmp = math.atan2(1.0, (N * N)) else: tmp = math.atan2(1.0, (1.0 + N)) return tmp
function code(N) tmp = 0.0 if ((N <= -0.62) || !(N <= 1.6)) tmp = atan(1.0, Float64(N * N)); else tmp = atan(1.0, Float64(1.0 + N)); end return tmp end
function tmp_2 = code(N) tmp = 0.0; if ((N <= -0.62) || ~((N <= 1.6))) tmp = atan2(1.0, (N * N)); else tmp = atan2(1.0, (1.0 + N)); end tmp_2 = tmp; end
code[N_] := If[Or[LessEqual[N, -0.62], N[Not[LessEqual[N, 1.6]], $MachinePrecision]], N[ArcTan[1.0 / N[(N * N), $MachinePrecision]], $MachinePrecision], N[ArcTan[1.0 / N[(1.0 + N), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;N \leq -0.62 \lor \neg \left(N \leq 1.6\right):\\
\;\;\;\;\tan^{-1}_* \frac{1}{N \cdot N}\\
\mathbf{else}:\\
\;\;\;\;\tan^{-1}_* \frac{1}{1 + N}\\
\end{array}
\end{array}
if N < -0.619999999999999996 or 1.6000000000000001 < N Initial program 52.8%
diff-atan53.6%
associate--l+53.6%
+-commutative53.6%
*-commutative53.6%
fma-def53.6%
Applied egg-rr53.6%
+-commutative53.6%
associate-+l-98.3%
+-inverses98.3%
metadata-eval98.3%
+-commutative98.3%
Simplified98.3%
Taylor expanded in N around inf 98.3%
unpow298.3%
Simplified98.3%
if -0.619999999999999996 < N < 1.6000000000000001Initial program 100.0%
diff-atan100.0%
associate--l+99.9%
+-commutative99.9%
*-commutative99.9%
fma-def99.9%
Applied egg-rr99.9%
+-commutative99.9%
associate-+l-100.0%
+-inverses100.0%
metadata-eval100.0%
+-commutative100.0%
Simplified100.0%
Taylor expanded in N around 0 98.6%
Final simplification98.4%
(FPCore (N) :precision binary64 (atan2 1.0 (+ N (+ 1.0 (* N N)))))
double code(double N) {
return atan2(1.0, (N + (1.0 + (N * N))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, (n + (1.0d0 + (n * n))))
end function
public static double code(double N) {
return Math.atan2(1.0, (N + (1.0 + (N * N))));
}
def code(N): return math.atan2(1.0, (N + (1.0 + (N * N))))
function code(N) return atan(1.0, Float64(N + Float64(1.0 + Float64(N * N)))) end
function tmp = code(N) tmp = atan2(1.0, (N + (1.0 + (N * N)))); end
code[N_] := N[ArcTan[1.0 / N[(N + N[(1.0 + N[(N * N), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{N + \left(1 + N \cdot N\right)}
\end{array}
Initial program 76.7%
diff-atan77.2%
associate--l+77.2%
+-commutative77.2%
*-commutative77.2%
fma-def77.2%
Applied egg-rr77.2%
+-commutative77.2%
associate-+l-99.1%
+-inverses99.1%
metadata-eval99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in N around 0 99.1%
associate-+r+99.1%
unpow299.1%
Simplified99.1%
Taylor expanded in N around 0 99.1%
unpow299.1%
Simplified99.1%
Final simplification99.1%
(FPCore (N) :precision binary64 (atan2 1.0 1.0))
double code(double N) {
return atan2(1.0, 1.0);
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan2(1.0d0, 1.0d0)
end function
public static double code(double N) {
return Math.atan2(1.0, 1.0);
}
def code(N): return math.atan2(1.0, 1.0)
function code(N) return atan(1.0, 1.0) end
function tmp = code(N) tmp = atan2(1.0, 1.0); end
code[N_] := N[ArcTan[1.0 / 1.0], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1}_* \frac{1}{1}
\end{array}
Initial program 76.7%
diff-atan77.2%
associate--l+77.2%
+-commutative77.2%
*-commutative77.2%
fma-def77.2%
Applied egg-rr77.2%
+-commutative77.2%
associate-+l-99.1%
+-inverses99.1%
metadata-eval99.1%
+-commutative99.1%
Simplified99.1%
Taylor expanded in N around 0 51.3%
Final simplification51.3%
(FPCore (N) :precision binary64 (atan (/ 1.0 (+ 1.0 (* N (+ N 1.0))))))
double code(double N) {
return atan((1.0 / (1.0 + (N * (N + 1.0)))));
}
real(8) function code(n)
real(8), intent (in) :: n
code = atan((1.0d0 / (1.0d0 + (n * (n + 1.0d0)))))
end function
public static double code(double N) {
return Math.atan((1.0 / (1.0 + (N * (N + 1.0)))));
}
def code(N): return math.atan((1.0 / (1.0 + (N * (N + 1.0)))))
function code(N) return atan(Float64(1.0 / Float64(1.0 + Float64(N * Float64(N + 1.0))))) end
function tmp = code(N) tmp = atan((1.0 / (1.0 + (N * (N + 1.0))))); end
code[N_] := N[ArcTan[N[(1.0 / N[(1.0 + N[(N * N[(N + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\tan^{-1} \left(\frac{1}{1 + N \cdot \left(N + 1\right)}\right)
\end{array}
herbie shell --seed 2023279
(FPCore (N)
:name "2atan (example 3.5)"
:precision binary64
:herbie-target
(atan (/ 1.0 (+ 1.0 (* N (+ N 1.0)))))
(- (atan (+ N 1.0)) (atan N)))