
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (/ (* (fma -0.16666666666666666 (* eps eps) 1.0) eps) (* (cos (+ eps x)) (cos x))))
double code(double x, double eps) {
return (fma(-0.16666666666666666, (eps * eps), 1.0) * eps) / (cos((eps + x)) * cos(x));
}
function code(x, eps) return Float64(Float64(fma(-0.16666666666666666, Float64(eps * eps), 1.0) * eps) / Float64(cos(Float64(eps + x)) * cos(x))) end
code[x_, eps_] := N[(N[(N[(-0.16666666666666666 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision] / N[(N[Cos[N[(eps + x), $MachinePrecision]], $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-0.16666666666666666, \varepsilon \cdot \varepsilon, 1\right) \cdot \varepsilon}{\cos \left(\varepsilon + x\right) \cdot \cos x}
\end{array}
Initial program 62.2%
lift--.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-tan.f64N/A
tan-quotN/A
frac-subN/A
lower-/.f64N/A
sin-diff-revN/A
lower-sin.f64N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-cos.f6462.2
Applied rewrites62.2%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
(FPCore (x eps) :precision binary64 (* (+ (pow (tan x) 2.0) 1.0) eps))
double code(double x, double eps) {
return (pow(tan(x), 2.0) + 1.0) * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((tan(x) ** 2.0d0) + 1.0d0) * eps
end function
public static double code(double x, double eps) {
return (Math.pow(Math.tan(x), 2.0) + 1.0) * eps;
}
def code(x, eps): return (math.pow(math.tan(x), 2.0) + 1.0) * eps
function code(x, eps) return Float64(Float64((tan(x) ^ 2.0) + 1.0) * eps) end
function tmp = code(x, eps) tmp = ((tan(x) ^ 2.0) + 1.0) * eps; end
code[x_, eps_] := N[(N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left({\tan x}^{2} + 1\right) \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Applied rewrites99.2%
(FPCore (x eps)
:precision binary64
(*
(+
(*
(fma
(fma
(fma 0.19682539682539682 (* x x) 0.37777777777777777)
(* x x)
0.6666666666666666)
(* x x)
1.0)
(* x x))
1.0)
eps))
double code(double x, double eps) {
return ((fma(fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x)) + 1.0) * eps;
}
function code(x, eps) return Float64(Float64(Float64(fma(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x)) + 1.0) * eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right) + 1\right) \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (* (fma (fma (fma 0.37777777777777777 (* x x) 0.6666666666666666) (* x x) 1.0) (* x x) 1.0) eps))
double code(double x, double eps) {
return fma(fma(fma(0.37777777777777777, (x * x), 0.6666666666666666), (x * x), 1.0), (x * x), 1.0) * eps;
}
function code(x, eps) return Float64(fma(fma(fma(0.37777777777777777, Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0), Float64(x * x), 1.0) * eps) end
code[x_, eps_] := N[(N[(N[(N[(0.37777777777777777 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right), x \cdot x, 1\right) \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (* (fma (fma 0.6666666666666666 (* x x) 1.0) (* x x) 1.0) eps))
double code(double x, double eps) {
return fma(fma(0.6666666666666666, (x * x), 1.0), (x * x), 1.0) * eps;
}
function code(x, eps) return Float64(fma(fma(0.6666666666666666, Float64(x * x), 1.0), Float64(x * x), 1.0) * eps) end
code[x_, eps_] := N[(N[(N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.6666666666666666, x \cdot x, 1\right), x \cdot x, 1\right) \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites98.8%
(FPCore (x eps) :precision binary64 (* (fma x x 1.0) eps))
double code(double x, double eps) {
return fma(x, x, 1.0) * eps;
}
function code(x, eps) return Float64(fma(x, x, 1.0) * eps) end
code[x_, eps_] := N[(N[(x * x + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 1\right) \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites98.5%
(FPCore (x eps) :precision binary64 (* 1.0 eps))
double code(double x, double eps) {
return 1.0 * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 1.0d0 * eps
end function
public static double code(double x, double eps) {
return 1.0 * eps;
}
def code(x, eps): return 1.0 * eps
function code(x, eps) return Float64(1.0 * eps) end
function tmp = code(x, eps) tmp = 1.0 * eps; end
code[x_, eps_] := N[(1.0 * eps), $MachinePrecision]
\begin{array}{l}
\\
1 \cdot \varepsilon
\end{array}
Initial program 62.2%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in eps around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites97.9%
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024313
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))