
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (cos x) 2.0)))
(fma
(fma
(/ eps (cos x))
(+ (/ (pow (sin x) 3.0) t_0) (sin x))
(/ (pow (sin x) 2.0) t_0))
eps
eps)))
double code(double x, double eps) {
double t_0 = pow(cos(x), 2.0);
return fma(fma((eps / cos(x)), ((pow(sin(x), 3.0) / t_0) + sin(x)), (pow(sin(x), 2.0) / t_0)), eps, eps);
}
function code(x, eps) t_0 = cos(x) ^ 2.0 return fma(fma(Float64(eps / cos(x)), Float64(Float64((sin(x) ^ 3.0) / t_0) + sin(x)), Float64((sin(x) ^ 2.0) / t_0)), eps, eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[(eps / N[Cos[x], $MachinePrecision]), $MachinePrecision] * N[(N[(N[Power[N[Sin[x], $MachinePrecision], 3.0], $MachinePrecision] / t$95$0), $MachinePrecision] + N[Sin[x], $MachinePrecision]), $MachinePrecision] + N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\cos x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(\frac{\varepsilon}{\cos x}, \frac{{\sin x}^{3}}{t\_0} + \sin x, \frac{{\sin x}^{2}}{t\_0}\right), \varepsilon, \varepsilon\right)
\end{array}
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos (+ eps x)) (cos x))))
double code(double x, double eps) {
return sin(eps) / (cos((eps + x)) * cos(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin(eps) / (cos((eps + x)) * cos(x))
end function
public static double code(double x, double eps) {
return Math.sin(eps) / (Math.cos((eps + x)) * Math.cos(x));
}
def code(x, eps): return math.sin(eps) / (math.cos((eps + x)) * math.cos(x))
function code(x, eps) return Float64(sin(eps) / Float64(cos(Float64(eps + x)) * cos(x))) end
function tmp = code(x, eps) tmp = sin(eps) / (cos((eps + x)) * cos(x)); end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[N[(eps + x), $MachinePrecision]], $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin \varepsilon}{\cos \left(\varepsilon + x\right) \cdot \cos x}
\end{array}
Initial program 63.4%
lift--.f64N/A
lift-tan.f64N/A
tan-quotN/A
lift-tan.f64N/A
tan-quotN/A
frac-subN/A
lower-/.f64N/A
sin-diffN/A
lower-sin.f64N/A
lower--.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lift-+.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-cos.f6463.4
Applied rewrites63.4%
Taylor expanded in eps around inf
lower-sin.f64100.0
Applied rewrites100.0%
(FPCore (x eps) :precision binary64 (fma (pow (tan x) 2.0) eps eps))
double code(double x, double eps) {
return fma(pow(tan(x), 2.0), eps, eps);
}
function code(x, eps) return fma((tan(x) ^ 2.0), eps, eps) end
code[x_, eps_] := N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left({\tan x}^{2}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
(FPCore (x eps)
:precision binary64
(fma
(fma
(*
(fma
(fma 0.19682539682539682 (* x x) 0.37777777777777777)
(* x x)
0.6666666666666666)
(* x x))
(* x x)
(* x x))
eps
eps))
double code(double x, double eps) {
return fma(fma((fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666) * (x * x)), (x * x), (x * x)), eps, eps);
}
function code(x, eps) return fma(fma(Float64(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666) * Float64(x * x)), Float64(x * x), Float64(x * x)), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + N[(x * x), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right) \cdot \left(x \cdot x\right), x \cdot x, x \cdot x\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites99.3%
Applied rewrites99.3%
(FPCore (x eps) :precision binary64 (fma (fma (* (fma 0.37777777777777777 (* x x) 0.6666666666666666) eps) (* x x) eps) (* x x) eps))
double code(double x, double eps) {
return fma(fma((fma(0.37777777777777777, (x * x), 0.6666666666666666) * eps), (x * x), eps), (x * x), eps);
}
function code(x, eps) return fma(fma(Float64(fma(0.37777777777777777, Float64(x * x), 0.6666666666666666) * eps), Float64(x * x), eps), Float64(x * x), eps) end
code[x_, eps_] := N[(N[(N[(N[(0.37777777777777777 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * eps), $MachinePrecision] * N[(x * x), $MachinePrecision] + eps), $MachinePrecision] * N[(x * x), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right) \cdot \varepsilon, x \cdot x, \varepsilon\right), x \cdot x, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites99.2%
(FPCore (x eps)
:precision binary64
(fma
(*
(fma
(fma (fma 1.3333333333333333 eps (* 0.6666666666666666 x)) x 1.0)
x
eps)
x)
eps
eps))
double code(double x, double eps) {
return fma((fma(fma(fma(1.3333333333333333, eps, (0.6666666666666666 * x)), x, 1.0), x, eps) * x), eps, eps);
}
function code(x, eps) return fma(Float64(fma(fma(fma(1.3333333333333333, eps, Float64(0.6666666666666666 * x)), x, 1.0), x, eps) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(1.3333333333333333 * eps + N[(0.6666666666666666 * x), $MachinePrecision]), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon, 0.6666666666666666 \cdot x\right), x, 1\right), x, \varepsilon\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (x eps) :precision binary64 (fma (* (fma (fma (* 0.6666666666666666 x) x 1.0) x eps) x) eps eps))
double code(double x, double eps) {
return fma((fma(fma((0.6666666666666666 * x), x, 1.0), x, eps) * x), eps, eps);
}
function code(x, eps) return fma(Float64(fma(fma(Float64(0.6666666666666666 * x), x, 1.0), x, eps) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(0.6666666666666666 * x), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.6666666666666666 \cdot x, x, 1\right), x, \varepsilon\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.1%
Taylor expanded in eps around 0
Applied rewrites99.1%
(FPCore (x eps) :precision binary64 (fma (fma (* 0.6666666666666666 eps) (* x x) eps) (* x x) eps))
double code(double x, double eps) {
return fma(fma((0.6666666666666666 * eps), (x * x), eps), (x * x), eps);
}
function code(x, eps) return fma(fma(Float64(0.6666666666666666 * eps), Float64(x * x), eps), Float64(x * x), eps) end
code[x_, eps_] := N[(N[(N[(0.6666666666666666 * eps), $MachinePrecision] * N[(x * x), $MachinePrecision] + eps), $MachinePrecision] * N[(x * x), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.6666666666666666 \cdot \varepsilon, x \cdot x, \varepsilon\right), x \cdot x, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (x eps) :precision binary64 (fma (* (+ eps x) x) eps eps))
double code(double x, double eps) {
return fma(((eps + x) * x), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(eps + x) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(eps + x), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\varepsilon + x\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites98.8%
(FPCore (x eps) :precision binary64 (fma (* eps x) x eps))
double code(double x, double eps) {
return fma((eps * x), x, eps);
}
function code(x, eps) return fma(Float64(eps * x), x, eps) end
code[x_, eps_] := N[(N[(eps * x), $MachinePrecision] * x + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot x, x, \varepsilon\right)
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Applied rewrites99.7%
Taylor expanded in x around 0
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.8%
Final simplification98.8%
(FPCore (x eps) :precision binary64 (* 1.0 eps))
double code(double x, double eps) {
return 1.0 * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 1.0d0 * eps
end function
public static double code(double x, double eps) {
return 1.0 * eps;
}
def code(x, eps): return 1.0 * eps
function code(x, eps) return Float64(1.0 * eps) end
function tmp = code(x, eps) tmp = 1.0 * eps; end
code[x_, eps_] := N[(1.0 * eps), $MachinePrecision]
\begin{array}{l}
\\
1 \cdot \varepsilon
\end{array}
Initial program 63.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6499.7
Applied rewrites99.7%
Applied rewrites99.6%
Taylor expanded in x around 0
Applied rewrites98.3%
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024242
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))