
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (let* ((t_0 (pow (tan x) 2.0))) (* eps (+ (fma (fma (sin x) t_0 (sin x)) (/ eps (cos x)) t_0) 1.0))))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
return eps * (fma(fma(sin(x), t_0, sin(x)), (eps / cos(x)), t_0) + 1.0);
}
function code(x, eps) t_0 = tan(x) ^ 2.0 return Float64(eps * Float64(fma(fma(sin(x), t_0, sin(x)), Float64(eps / cos(x)), t_0) + 1.0)) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(eps * N[(N[(N[(N[Sin[x], $MachinePrecision] * t$95$0 + N[Sin[x], $MachinePrecision]), $MachinePrecision] * N[(eps / N[Cos[x], $MachinePrecision]), $MachinePrecision] + t$95$0), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\varepsilon \cdot \left(\mathsf{fma}\left(\mathsf{fma}\left(\sin x, t\_0, \sin x\right), \frac{\varepsilon}{\cos x}, t\_0\right) + 1\right)
\end{array}
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
Final simplification98.9%
(FPCore (x eps) :precision binary64 (let* ((t_0 (pow (tan x) 2.0))) (fma (fma (sin x) (/ (fma eps t_0 eps) (cos x)) t_0) eps eps)))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
return fma(fma(sin(x), (fma(eps, t_0, eps) / cos(x)), t_0), eps, eps);
}
function code(x, eps) t_0 = tan(x) ^ 2.0 return fma(fma(sin(x), Float64(fma(eps, t_0, eps) / cos(x)), t_0), eps, eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[Sin[x], $MachinePrecision] * N[(N[(eps * t$95$0 + eps), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision] + t$95$0), $MachinePrecision] * eps + eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(\sin x, \frac{\mathsf{fma}\left(\varepsilon, t\_0, \varepsilon\right)}{\cos x}, t\_0\right), \varepsilon, \varepsilon\right)
\end{array}
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
lift-sin.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-sin.f64N/A
lift-fma.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
associate-+l+N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-+.f64N/A
lift-sin.f64N/A
lift-*.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-+.f64N/A
lift-fma.f64N/A
*-commutativeN/A
Applied rewrites98.9%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (+ (pow (tan x) 2.0) 1.0)))
(*
eps
(fma
(/
eps
(fma
(* x x)
(fma
(* x x)
(fma (* x x) -0.001388888888888889 0.041666666666666664)
-0.5)
1.0))
(* (sin x) t_0)
t_0))))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0) + 1.0;
return eps * fma((eps / fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0)), (sin(x) * t_0), t_0);
}
function code(x, eps) t_0 = Float64((tan(x) ^ 2.0) + 1.0) return Float64(eps * fma(Float64(eps / fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0)), Float64(sin(x) * t_0), t_0)) end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(eps * N[(N[(eps / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[Sin[x], $MachinePrecision] * t$95$0), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2} + 1\\
\varepsilon \cdot \mathsf{fma}\left(\frac{\varepsilon}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)}, \sin x \cdot t\_0, t\_0\right)
\end{array}
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
lift-sin.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-sin.f64N/A
lift-fma.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
associate-+l+N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.8
Applied rewrites98.8%
Final simplification98.8%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (+ (pow (tan x) 2.0) 1.0)))
(*
eps
(fma
(/ eps (fma (* x x) (fma (* x x) 0.041666666666666664 -0.5) 1.0))
(* (sin x) t_0)
t_0))))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0) + 1.0;
return eps * fma((eps / fma((x * x), fma((x * x), 0.041666666666666664, -0.5), 1.0)), (sin(x) * t_0), t_0);
}
function code(x, eps) t_0 = Float64((tan(x) ^ 2.0) + 1.0) return Float64(eps * fma(Float64(eps / fma(Float64(x * x), fma(Float64(x * x), 0.041666666666666664, -0.5), 1.0)), Float64(sin(x) * t_0), t_0)) end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(eps * N[(N[(eps / N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[Sin[x], $MachinePrecision] * t$95$0), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2} + 1\\
\varepsilon \cdot \mathsf{fma}\left(\frac{\varepsilon}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), 1\right)}, \sin x \cdot t\_0, t\_0\right)
\end{array}
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
lift-sin.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-sin.f64N/A
lift-fma.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
associate-+l+N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6498.7
Applied rewrites98.7%
Final simplification98.7%
(FPCore (x eps) :precision binary64 (let* ((t_0 (+ (pow (tan x) 2.0) 1.0))) (* eps (fma eps (* (sin x) t_0) t_0))))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0) + 1.0;
return eps * fma(eps, (sin(x) * t_0), t_0);
}
function code(x, eps) t_0 = Float64((tan(x) ^ 2.0) + 1.0) return Float64(eps * fma(eps, Float64(sin(x) * t_0), t_0)) end
code[x_, eps_] := Block[{t$95$0 = N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + 1.0), $MachinePrecision]}, N[(eps * N[(eps * N[(N[Sin[x], $MachinePrecision] * t$95$0), $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2} + 1\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \sin x \cdot t\_0, t\_0\right)
\end{array}
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
lift-sin.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
lift-sin.f64N/A
lift-fma.f64N/A
lift-cos.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
lift-pow.f64N/A
associate-+l+N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
Applied rewrites98.6%
Final simplification98.6%
(FPCore (x eps) :precision binary64 (fma (pow (tan x) 2.0) eps eps))
double code(double x, double eps) {
return fma(pow(tan(x), 2.0), eps, eps);
}
function code(x, eps) return fma((tan(x) ^ 2.0), eps, eps) end
code[x_, eps_] := N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left({\tan x}^{2}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
lift-sin.f64N/A
lift-pow.f64N/A
lift-cos.f64N/A
lift-pow.f64N/A
lift-/.f64N/A
lift-fma.f6498.6
Applied rewrites98.6%
(FPCore (x eps)
:precision binary64
(fma
eps
(*
x
(fma
x
(fma x (fma x 0.6666666666666666 (* eps 1.3333333333333333)) 1.0)
eps))
eps))
double code(double x, double eps) {
return fma(eps, (x * fma(x, fma(x, fma(x, 0.6666666666666666, (eps * 1.3333333333333333)), 1.0), eps)), eps);
}
function code(x, eps) return fma(eps, Float64(x * fma(x, fma(x, fma(x, 0.6666666666666666, Float64(eps * 1.3333333333333333)), 1.0), eps)), eps) end
code[x_, eps_] := N[(eps * N[(x * N[(x * N[(x * N[(x * 0.6666666666666666 + N[(eps * 1.3333333333333333), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.6666666666666666, \varepsilon \cdot 1.3333333333333333\right), 1\right), \varepsilon\right), \varepsilon\right)
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
associate--l+N/A
*-commutativeN/A
lower-fma.f64N/A
distribute-rgt-out--N/A
lower-*.f64N/A
metadata-eval97.6
Applied rewrites97.6%
(FPCore (x eps) :precision binary64 (fma x (* eps (+ x eps)) eps))
double code(double x, double eps) {
return fma(x, (eps * (x + eps)), eps);
}
function code(x, eps) return fma(x, Float64(eps * Float64(x + eps)), eps) end
code[x_, eps_] := N[(x * N[(eps * N[(x + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \varepsilon \cdot \left(x + \varepsilon\right), \varepsilon\right)
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
distribute-lft-outN/A
+-commutativeN/A
lower-*.f64N/A
lower-+.f6497.6
Applied rewrites97.6%
Final simplification97.6%
(FPCore (x eps) :precision binary64 (fma x (* x eps) eps))
double code(double x, double eps) {
return fma(x, (x * eps), eps);
}
function code(x, eps) return fma(x, Float64(x * eps), eps) end
code[x_, eps_] := N[(x * N[(x * eps), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x \cdot \varepsilon, \varepsilon\right)
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6497.6
Applied rewrites97.6%
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
return eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps
end function
public static double code(double x, double eps) {
return eps;
}
def code(x, eps): return eps
function code(x, eps) return eps end
function tmp = code(x, eps) tmp = eps; end
code[x_, eps_] := eps
\begin{array}{l}
\\
\varepsilon
\end{array}
Initial program 62.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
Applied rewrites98.9%
Applied rewrites98.9%
Taylor expanded in x around 0
Applied rewrites97.5%
*-lft-identity97.5
Applied rewrites97.5%
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
double code(double x, double eps) {
return sin(eps) / (cos(x) * cos((x + eps)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin(eps) / (cos(x) * cos((x + eps)))
end function
public static double code(double x, double eps) {
return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
}
def code(x, eps): return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
function code(x, eps) return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps)))) end
function tmp = code(x, eps) tmp = sin(eps) / (cos(x) * cos((x + eps))); end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}
(FPCore (x eps) :precision binary64 (- (/ (+ (tan x) (tan eps)) (- 1.0 (* (tan x) (tan eps)))) (tan x)))
double code(double x, double eps) {
return ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((tan(x) + tan(eps)) / (1.0d0 - (tan(x) * tan(eps)))) - tan(x)
end function
public static double code(double x, double eps) {
return ((Math.tan(x) + Math.tan(eps)) / (1.0 - (Math.tan(x) * Math.tan(eps)))) - Math.tan(x);
}
def code(x, eps): return ((math.tan(x) + math.tan(eps)) / (1.0 - (math.tan(x) * math.tan(eps)))) - math.tan(x)
function code(x, eps) return Float64(Float64(Float64(tan(x) + tan(eps)) / Float64(1.0 - Float64(tan(x) * tan(eps)))) - tan(x)) end
function tmp = code(x, eps) tmp = ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x); end
code[x_, eps_] := N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[eps], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024219
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))
:alt
(! :herbie-platform default (- (/ (+ (tan x) (tan eps)) (- 1 (* (tan x) (tan eps)))) (tan x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))