
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
(FPCore (x) :precision binary64 (/ (- 1.0 (pow (tan x) 2.0)) (fma (tan x) (tan x) 1.0)))
double code(double x) {
return (1.0 - pow(tan(x), 2.0)) / fma(tan(x), tan(x), 1.0);
}
function code(x) return Float64(Float64(1.0 - (tan(x) ^ 2.0)) / fma(tan(x), tan(x), 1.0)) end
code[x_] := N[(N[(1.0 - N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - {\tan x}^{2}}{\mathsf{fma}\left(\tan x, \tan x, 1\right)}
\end{array}
Initial program 99.5%
+-commutative99.5%
fma-define99.5%
Simplified99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
(FPCore (x) :precision binary64 (let* ((t_0 (pow (tan x) 2.0))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = pow(tan(x), 2.0);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) ** 2.0d0
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.pow(Math.tan(x), 2.0);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.pow(math.tan(x), 2.0) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = tan(x) ^ 2.0 return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) ^ 2.0; tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
Initial program 99.5%
+-commutative99.5%
fma-define99.5%
Simplified99.5%
fma-undefine99.5%
+-commutative99.5%
add-log-exp99.3%
*-un-lft-identity99.3%
log-prod99.3%
metadata-eval99.3%
add-log-exp99.5%
pow299.5%
+-commutative99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
+-commutative99.5%
Simplified99.5%
(FPCore (x) :precision binary64 (+ 1.0 (+ 1.0 (- -1.0 (pow (tan x) 2.0)))))
double code(double x) {
return 1.0 + (1.0 + (-1.0 - pow(tan(x), 2.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 + (1.0d0 + ((-1.0d0) - (tan(x) ** 2.0d0)))
end function
public static double code(double x) {
return 1.0 + (1.0 + (-1.0 - Math.pow(Math.tan(x), 2.0)));
}
def code(x): return 1.0 + (1.0 + (-1.0 - math.pow(math.tan(x), 2.0)))
function code(x) return Float64(1.0 + Float64(1.0 + Float64(-1.0 - (tan(x) ^ 2.0)))) end
function tmp = code(x) tmp = 1.0 + (1.0 + (-1.0 - (tan(x) ^ 2.0))); end
code[x_] := N[(1.0 + N[(1.0 + N[(-1.0 - N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \left(1 + \left(-1 - {\tan x}^{2}\right)\right)
\end{array}
Initial program 99.5%
expm1-log1p-u99.2%
log1p-define99.0%
expm1-undefine99.0%
add-exp-log99.3%
+-commutative99.3%
pow299.3%
Applied egg-rr99.3%
Taylor expanded in x around 0 55.1%
Final simplification55.1%
(FPCore (x) :precision binary64 (- 1.0 (* (tan x) (tan x))))
double code(double x) {
return 1.0 - (tan(x) * tan(x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - (tan(x) * tan(x))
end function
public static double code(double x) {
return 1.0 - (Math.tan(x) * Math.tan(x));
}
def code(x): return 1.0 - (math.tan(x) * math.tan(x))
function code(x) return Float64(1.0 - Float64(tan(x) * tan(x))) end
function tmp = code(x) tmp = 1.0 - (tan(x) * tan(x)); end
code[x_] := N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \tan x \cdot \tan x
\end{array}
Initial program 99.5%
Taylor expanded in x around 0 55.1%
Final simplification55.1%
(FPCore (x) :precision binary64 (expm1 (- (log 2.0) (* x x))))
double code(double x) {
return expm1((log(2.0) - (x * x)));
}
public static double code(double x) {
return Math.expm1((Math.log(2.0) - (x * x)));
}
def code(x): return math.expm1((math.log(2.0) - (x * x)))
function code(x) return expm1(Float64(log(2.0) - Float64(x * x))) end
code[x_] := N[(Exp[N[(N[Log[2.0], $MachinePrecision] - N[(x * x), $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{expm1}\left(\log 2 - x \cdot x\right)
\end{array}
Initial program 99.5%
+-commutative99.5%
fma-define99.5%
Simplified99.5%
expm1-log1p-u99.4%
pow299.4%
fma-undefine99.4%
pow299.4%
Applied egg-rr99.4%
Taylor expanded in x around 0 51.3%
mul-1-neg51.3%
unsub-neg51.3%
Simplified51.3%
unpow251.3%
Applied egg-rr51.3%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.5%
+-commutative99.5%
fma-define99.5%
Simplified99.5%
add-log-exp98.0%
*-un-lft-identity98.0%
log-prod98.0%
metadata-eval98.0%
add-log-exp99.5%
pow299.5%
Applied egg-rr99.5%
+-lft-identity99.5%
Simplified99.5%
Taylor expanded in x around 0 50.1%
herbie shell --seed 2024191
(FPCore (x)
:name "Trigonometry B"
:precision binary64
(/ (- 1.0 (* (tan x) (tan x))) (+ 1.0 (* (tan x) (tan x)))))