
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
(FPCore (x) :precision binary64 (/ (fma (tan x) (- (tan x)) 1.0) (+ 1.0 (pow (tan x) 2.0))))
double code(double x) {
return fma(tan(x), -tan(x), 1.0) / (1.0 + pow(tan(x), 2.0));
}
function code(x) return Float64(fma(tan(x), Float64(-tan(x)), 1.0) / Float64(1.0 + (tan(x) ^ 2.0))) end
code[x_] := N[(N[(N[Tan[x], $MachinePrecision] * (-N[Tan[x], $MachinePrecision]) + 1.0), $MachinePrecision] / N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\tan x, -\tan x, 1\right)}{1 + {\tan x}^{2}}
\end{array}
Initial program 99.4%
sub-neg99.4%
+-commutative99.4%
distribute-rgt-neg-in99.4%
fma-def99.5%
Applied egg-rr99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
Final simplification99.5%
(FPCore (x) :precision binary64 (/ (- 1.0 (pow (tan x) 2.0)) (fma (tan x) (tan x) 1.0)))
double code(double x) {
return (1.0 - pow(tan(x), 2.0)) / fma(tan(x), tan(x), 1.0);
}
function code(x) return Float64(Float64(1.0 - (tan(x) ^ 2.0)) / fma(tan(x), tan(x), 1.0)) end
code[x_] := N[(N[(1.0 - N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - {\tan x}^{2}}{\mathsf{fma}\left(\tan x, \tan x, 1\right)}
\end{array}
Initial program 99.4%
+-commutative99.4%
fma-def99.5%
Simplified99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
Final simplification99.5%
(FPCore (x) :precision binary64 (if (<= (* (tan x) (tan x)) 1.0) (/ 1.0 (+ 1.0 (pow (tan x) 2.0))) -1.0))
double code(double x) {
double tmp;
if ((tan(x) * tan(x)) <= 1.0) {
tmp = 1.0 / (1.0 + pow(tan(x), 2.0));
} else {
tmp = -1.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((tan(x) * tan(x)) <= 1.0d0) then
tmp = 1.0d0 / (1.0d0 + (tan(x) ** 2.0d0))
else
tmp = -1.0d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((Math.tan(x) * Math.tan(x)) <= 1.0) {
tmp = 1.0 / (1.0 + Math.pow(Math.tan(x), 2.0));
} else {
tmp = -1.0;
}
return tmp;
}
def code(x): tmp = 0 if (math.tan(x) * math.tan(x)) <= 1.0: tmp = 1.0 / (1.0 + math.pow(math.tan(x), 2.0)) else: tmp = -1.0 return tmp
function code(x) tmp = 0.0 if (Float64(tan(x) * tan(x)) <= 1.0) tmp = Float64(1.0 / Float64(1.0 + (tan(x) ^ 2.0))); else tmp = -1.0; end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((tan(x) * tan(x)) <= 1.0) tmp = 1.0 / (1.0 + (tan(x) ^ 2.0)); else tmp = -1.0; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision], 1.0], N[(1.0 / N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\tan x \cdot \tan x \leq 1:\\
\;\;\;\;\frac{1}{1 + {\tan x}^{2}}\\
\mathbf{else}:\\
\;\;\;\;-1\\
\end{array}
\end{array}
if (*.f64 (tan.f64 x) (tan.f64 x)) < 1Initial program 99.6%
sub-neg99.6%
+-commutative99.6%
distribute-rgt-neg-in99.6%
fma-def99.6%
Applied egg-rr99.6%
add-log-exp99.6%
*-un-lft-identity99.6%
log-prod99.6%
metadata-eval99.6%
add-log-exp99.6%
pow299.6%
Applied egg-rr99.6%
+-lft-identity99.6%
Simplified99.6%
Taylor expanded in x around 0 73.0%
if 1 < (*.f64 (tan.f64 x) (tan.f64 x)) Initial program 99.0%
sub-neg99.0%
+-commutative99.0%
distribute-rgt-neg-in99.0%
fma-def99.0%
Applied egg-rr99.0%
add-log-exp92.3%
*-un-lft-identity92.3%
log-prod92.3%
metadata-eval92.3%
add-log-exp99.0%
pow299.0%
Applied egg-rr99.0%
+-lft-identity99.0%
Simplified99.0%
Applied egg-rr20.3%
*-inverses20.3%
metadata-eval20.3%
Simplified20.3%
Final simplification61.1%
(FPCore (x) :precision binary64 (let* ((t_0 (pow (tan x) 2.0))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = pow(tan(x), 2.0);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) ** 2.0d0
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.pow(Math.tan(x), 2.0);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.pow(math.tan(x), 2.0) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = tan(x) ^ 2.0 return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) ^ 2.0; tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
Initial program 99.4%
expm1-log1p-u99.3%
pow299.3%
Applied egg-rr99.3%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.3%
+-lft-identity99.5%
Simplified99.3%
expm1-udef99.3%
log1p-udef99.3%
add-exp-log99.4%
+-commutative99.4%
associate--l+99.4%
metadata-eval99.4%
Applied egg-rr99.4%
+-rgt-identity99.4%
Simplified99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (+ (/ 2.0 (+ 1.0 (pow (tan x) 2.0))) -1.0))
double code(double x) {
return (2.0 / (1.0 + pow(tan(x), 2.0))) + -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (2.0d0 / (1.0d0 + (tan(x) ** 2.0d0))) + (-1.0d0)
end function
public static double code(double x) {
return (2.0 / (1.0 + Math.pow(Math.tan(x), 2.0))) + -1.0;
}
def code(x): return (2.0 / (1.0 + math.pow(math.tan(x), 2.0))) + -1.0
function code(x) return Float64(Float64(2.0 / Float64(1.0 + (tan(x) ^ 2.0))) + -1.0) end
function tmp = code(x) tmp = (2.0 / (1.0 + (tan(x) ^ 2.0))) + -1.0; end
code[x_] := N[(N[(2.0 / N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{1 + {\tan x}^{2}} + -1
\end{array}
Initial program 99.4%
+-commutative99.4%
fma-def99.5%
Simplified99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
fma-udef99.4%
unpow299.4%
+-commutative99.4%
metadata-eval99.4%
associate--r+99.3%
sub-div99.3%
*-inverses99.3%
Applied egg-rr99.3%
Final simplification99.3%
(FPCore (x) :precision binary64 -1.0)
double code(double x) {
return -1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -1.0d0
end function
public static double code(double x) {
return -1.0;
}
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l}
\\
-1
\end{array}
Initial program 99.4%
sub-neg99.4%
+-commutative99.4%
distribute-rgt-neg-in99.4%
fma-def99.5%
Applied egg-rr99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
Applied egg-rr5.8%
*-inverses5.8%
metadata-eval5.8%
Simplified5.8%
Final simplification5.8%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.4%
Taylor expanded in x around 0 56.5%
Final simplification56.5%
herbie shell --seed 2024026
(FPCore (x)
:name "Trigonometry B"
:precision binary64
(/ (- 1.0 (* (tan x) (tan x))) (+ 1.0 (* (tan x) (tan x)))))