
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (let* ((t_0 (* (tan x) (tan x)))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) {
double t_0 = tan(x) * tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) * tan(x)
code = (1.0d0 - t_0) / (1.0d0 + t_0)
end function
public static double code(double x) {
double t_0 = Math.tan(x) * Math.tan(x);
return (1.0 - t_0) / (1.0 + t_0);
}
def code(x): t_0 = math.tan(x) * math.tan(x) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = Float64(tan(x) * tan(x)) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = tan(x) * tan(x); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \tan x \cdot \tan x\\
\frac{1 - t\_0}{1 + t\_0}
\end{array}
\end{array}
(FPCore (x) :precision binary64 (/ (fma (tan x) (- (tan x)) 1.0) (/ 1.0 (/ 1.0 (+ 1.0 (pow (tan x) 2.0))))))
double code(double x) {
return fma(tan(x), -tan(x), 1.0) / (1.0 / (1.0 / (1.0 + pow(tan(x), 2.0))));
}
function code(x) return Float64(fma(tan(x), Float64(-tan(x)), 1.0) / Float64(1.0 / Float64(1.0 / Float64(1.0 + (tan(x) ^ 2.0))))) end
code[x_] := N[(N[(N[Tan[x], $MachinePrecision] * (-N[Tan[x], $MachinePrecision]) + 1.0), $MachinePrecision] / N[(1.0 / N[(1.0 / N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\tan x, -\tan x, 1\right)}{\frac{1}{\frac{1}{1 + {\tan x}^{2}}}}
\end{array}
Initial program 99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
lift-+.f6499.5
remove-double-divN/A
lower-/.f64N/A
lower-/.f6499.5
lift-*.f64N/A
pow2N/A
lift-pow.f6499.5
Applied rewrites99.5%
(FPCore (x)
:precision binary64
(let* ((t_0 (cos (+ x x))) (t_1 (* (tan x) (tan x))) (t_2 (+ 1.0 t_1)))
(if (<= (/ (- 1.0 t_1) t_2) 0.25)
(+ 1.0 (/ (/ -1.0 (fma t_0 -0.5 -0.5)) (/ -1.0 (fma t_0 -0.5 0.5))))
(/ (+ 1.0 (- (* t_0 0.5) 0.5)) t_2))))
double code(double x) {
double t_0 = cos((x + x));
double t_1 = tan(x) * tan(x);
double t_2 = 1.0 + t_1;
double tmp;
if (((1.0 - t_1) / t_2) <= 0.25) {
tmp = 1.0 + ((-1.0 / fma(t_0, -0.5, -0.5)) / (-1.0 / fma(t_0, -0.5, 0.5)));
} else {
tmp = (1.0 + ((t_0 * 0.5) - 0.5)) / t_2;
}
return tmp;
}
function code(x) t_0 = cos(Float64(x + x)) t_1 = Float64(tan(x) * tan(x)) t_2 = Float64(1.0 + t_1) tmp = 0.0 if (Float64(Float64(1.0 - t_1) / t_2) <= 0.25) tmp = Float64(1.0 + Float64(Float64(-1.0 / fma(t_0, -0.5, -0.5)) / Float64(-1.0 / fma(t_0, -0.5, 0.5)))); else tmp = Float64(Float64(1.0 + Float64(Float64(t_0 * 0.5) - 0.5)) / t_2); end return tmp end
code[x_] := Block[{t$95$0 = N[Cos[N[(x + x), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 + t$95$1), $MachinePrecision]}, If[LessEqual[N[(N[(1.0 - t$95$1), $MachinePrecision] / t$95$2), $MachinePrecision], 0.25], N[(1.0 + N[(N[(-1.0 / N[(t$95$0 * -0.5 + -0.5), $MachinePrecision]), $MachinePrecision] / N[(-1.0 / N[(t$95$0 * -0.5 + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(1.0 + N[(N[(t$95$0 * 0.5), $MachinePrecision] - 0.5), $MachinePrecision]), $MachinePrecision] / t$95$2), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \cos \left(x + x\right)\\
t_1 := \tan x \cdot \tan x\\
t_2 := 1 + t\_1\\
\mathbf{if}\;\frac{1 - t\_1}{t\_2} \leq 0.25:\\
\;\;\;\;1 + \frac{\frac{-1}{\mathsf{fma}\left(t\_0, -0.5, -0.5\right)}}{\frac{-1}{\mathsf{fma}\left(t\_0, -0.5, 0.5\right)}}\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + \left(t\_0 \cdot 0.5 - 0.5\right)}{t\_2}\\
\end{array}
\end{array}
if (/.f64 (-.f64 #s(literal 1 binary64) (*.f64 (tan.f64 x) (tan.f64 x))) (+.f64 #s(literal 1 binary64) (*.f64 (tan.f64 x) (tan.f64 x)))) < 0.25Initial program 98.7%
tan-quotN/A
lift-tan.f64N/A
associate-*l/N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-cos.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites16.8%
Applied rewrites16.8%
if 0.25 < (/.f64 (-.f64 #s(literal 1 binary64) (*.f64 (tan.f64 x) (tan.f64 x))) (+.f64 #s(literal 1 binary64) (*.f64 (tan.f64 x) (tan.f64 x)))) Initial program 99.7%
tan-quotN/A
div-invN/A
tan-quotN/A
div-invN/A
swap-sqrN/A
lower-*.f64N/A
sqr-sin-aN/A
lower--.f64N/A
cos-2N/A
cos-sumN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-+.f64N/A
inv-powN/A
inv-powN/A
pow-prod-downN/A
inv-powN/A
lower-/.f64N/A
sqr-cos-aN/A
lower-+.f64N/A
cos-2N/A
cos-sumN/A
lower-*.f64N/A
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites77.8%
Final simplification61.3%
(FPCore (x) :precision binary64 (/ (fma (tan x) (- (tan x)) 1.0) (- (pow (tan x) 2.0) -1.0)))
double code(double x) {
return fma(tan(x), -tan(x), 1.0) / (pow(tan(x), 2.0) - -1.0);
}
function code(x) return Float64(fma(tan(x), Float64(-tan(x)), 1.0) / Float64((tan(x) ^ 2.0) - -1.0)) end
code[x_] := N[(N[(N[Tan[x], $MachinePrecision] * (-N[Tan[x], $MachinePrecision]) + 1.0), $MachinePrecision] / N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] - -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\tan x, -\tan x, 1\right)}{{\tan x}^{2} - -1}
\end{array}
Initial program 99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
lower--.f6499.5
lift-*.f64N/A
pow2N/A
lift-pow.f6499.5
Applied rewrites99.5%
(FPCore (x) :precision binary64 (/ (- 1.0 (pow (tan x) 2.0)) (fma (tan x) (tan x) 1.0)))
double code(double x) {
return (1.0 - pow(tan(x), 2.0)) / fma(tan(x), tan(x), 1.0);
}
function code(x) return Float64(Float64(1.0 - (tan(x) ^ 2.0)) / fma(tan(x), tan(x), 1.0)) end
code[x_] := N[(N[(1.0 - N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - {\tan x}^{2}}{\mathsf{fma}\left(\tan x, \tan x, 1\right)}
\end{array}
Initial program 99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
lift-+.f6499.5
remove-double-divN/A
lower-/.f64N/A
lower-/.f6499.5
lift-*.f64N/A
pow2N/A
lift-pow.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-neg.f64N/A
+-commutativeN/A
lift-neg.f64N/A
distribute-rgt-neg-outN/A
lift-*.f64N/A
sub-negN/A
lower--.f6499.5
lift-*.f64N/A
pow2N/A
lift-pow.f6499.5
Applied rewrites99.5%
Applied rewrites99.5%
(FPCore (x) :precision binary64 (let* ((t_0 (pow (tan x) 2.0))) (/ (- 1.0 t_0) (- t_0 -1.0))))
double code(double x) {
double t_0 = pow(tan(x), 2.0);
return (1.0 - t_0) / (t_0 - -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = tan(x) ** 2.0d0
code = (1.0d0 - t_0) / (t_0 - (-1.0d0))
end function
public static double code(double x) {
double t_0 = Math.pow(Math.tan(x), 2.0);
return (1.0 - t_0) / (t_0 - -1.0);
}
def code(x): t_0 = math.pow(math.tan(x), 2.0) return (1.0 - t_0) / (t_0 - -1.0)
function code(x) t_0 = tan(x) ^ 2.0 return Float64(Float64(1.0 - t_0) / Float64(t_0 - -1.0)) end
function tmp = code(x) t_0 = tan(x) ^ 2.0; tmp = (1.0 - t_0) / (t_0 - -1.0); end
code[x_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(t$95$0 - -1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\frac{1 - t\_0}{t\_0 - -1}
\end{array}
\end{array}
Initial program 99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lower-neg.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
lower--.f6499.5
lift-*.f64N/A
pow2N/A
lift-pow.f6499.5
Applied rewrites99.5%
lift-tan.f64N/A
lift-tan.f64N/A
neg-mul-1N/A
*-commutativeN/A
associate-*l*N/A
unpow2N/A
lift-pow.f64N/A
lift-pow.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
neg-mul-1N/A
lift-neg.f64N/A
+-commutativeN/A
lift-neg.f64N/A
distribute-rgt-neg-outN/A
unpow2N/A
lift-pow.f64N/A
sub-negN/A
lift--.f6499.5
Applied rewrites99.5%
(FPCore (x) :precision binary64 (/ 1.0 (/ -1.0 (+ (pow (tan x) 2.0) -1.0))))
double code(double x) {
return 1.0 / (-1.0 / (pow(tan(x), 2.0) + -1.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / ((-1.0d0) / ((tan(x) ** 2.0d0) + (-1.0d0)))
end function
public static double code(double x) {
return 1.0 / (-1.0 / (Math.pow(Math.tan(x), 2.0) + -1.0));
}
def code(x): return 1.0 / (-1.0 / (math.pow(math.tan(x), 2.0) + -1.0))
function code(x) return Float64(1.0 / Float64(-1.0 / Float64((tan(x) ^ 2.0) + -1.0))) end
function tmp = code(x) tmp = 1.0 / (-1.0 / ((tan(x) ^ 2.0) + -1.0)); end
code[x_] := N[(1.0 / N[(-1.0 / N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{-1}{{\tan x}^{2} + -1}}
\end{array}
Initial program 99.5%
tan-quotN/A
lift-tan.f64N/A
associate-*l/N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-cos.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites59.5%
Applied rewrites59.5%
(FPCore (x) :precision binary64 (- 1.0 (pow (tan x) 2.0)))
double code(double x) {
return 1.0 - pow(tan(x), 2.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 - (tan(x) ** 2.0d0)
end function
public static double code(double x) {
return 1.0 - Math.pow(Math.tan(x), 2.0);
}
def code(x): return 1.0 - math.pow(math.tan(x), 2.0)
function code(x) return Float64(1.0 - (tan(x) ^ 2.0)) end
function tmp = code(x) tmp = 1.0 - (tan(x) ^ 2.0); end
code[x_] := N[(1.0 - N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - {\tan x}^{2}
\end{array}
Initial program 99.5%
tan-quotN/A
lift-tan.f64N/A
associate-*l/N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-cos.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites59.5%
lift-tan.f64N/A
lift-sin.f64N/A
lift-*.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift--.f64N/A
/-rgt-identity59.5
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lift-sin.f64N/A
lift-cos.f64N/A
tan-quotN/A
lift-tan.f64N/A
unpow2N/A
lift-pow.f6459.5
Applied rewrites59.5%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 99.5%
Applied rewrites55.4%
herbie shell --seed 2024219
(FPCore (x)
:name "Trigonometry B"
:precision binary64
(/ (- 1.0 (* (tan x) (tan x))) (+ 1.0 (* (tan x) (tan x)))))