
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps)
:precision binary64
(fma
eps
(fma
x
eps
(*
x
(*
x
(fma (* x x) (fma (* x x) 0.37777777777777777 0.6666666666666666) 1.0))))
eps))
double code(double x, double eps) {
return fma(eps, fma(x, eps, (x * (x * fma((x * x), fma((x * x), 0.37777777777777777, 0.6666666666666666), 1.0)))), eps);
}
function code(x, eps) return fma(eps, fma(x, eps, Float64(x * Float64(x * fma(Float64(x * x), fma(Float64(x * x), 0.37777777777777777, 0.6666666666666666), 1.0)))), eps) end
code[x_, eps_] := N[(eps * N[(x * eps + N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.37777777777777777 + 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \varepsilon, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)\right), \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
Simplified100.0%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64100.0
Simplified100.0%
Taylor expanded in x around 0
Simplified100.0%
(FPCore (x eps) :precision binary64 (fma eps (* x (fma x (fma x (* x 0.6666666666666666) 1.0) eps)) eps))
double code(double x, double eps) {
return fma(eps, (x * fma(x, fma(x, (x * 0.6666666666666666), 1.0), eps)), eps);
}
function code(x, eps) return fma(eps, Float64(x * fma(x, fma(x, Float64(x * 0.6666666666666666), 1.0), eps)), eps) end
code[x_, eps_] := N[(eps * N[(x * N[(x * N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right), \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
associate--l+N/A
+-commutativeN/A
distribute-rgt-out--N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
*-commutativeN/A
*-lowering-*.f6499.9
Simplified99.9%
Taylor expanded in eps around 0
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6499.9
Simplified99.9%
(FPCore (x eps) :precision binary64 (fma (* x x) (* eps (fma x (* x 0.6666666666666666) 1.0)) eps))
double code(double x, double eps) {
return fma((x * x), (eps * fma(x, (x * 0.6666666666666666), 1.0)), eps);
}
function code(x, eps) return fma(Float64(x * x), Float64(eps * fma(x, Float64(x * 0.6666666666666666), 1.0)), eps) end
code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * N[(eps * N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
associate--l+N/A
+-commutativeN/A
distribute-rgt-out--N/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
*-commutativeN/A
*-lowering-*.f6499.9
Simplified99.9%
Taylor expanded in eps around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*l*N/A
*-lft-identityN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6499.9
Simplified99.9%
Final simplification99.9%
(FPCore (x eps) :precision binary64 (fma x (* eps (+ eps x)) eps))
double code(double x, double eps) {
return fma(x, (eps * (eps + x)), eps);
}
function code(x, eps) return fma(x, Float64(eps * Float64(eps + x)), eps) end
code[x_, eps_] := N[(x * N[(eps * N[(eps + x), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \varepsilon \cdot \left(\varepsilon + x\right), \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
distribute-lft-outN/A
+-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f6499.6
Simplified99.6%
(FPCore (x eps) :precision binary64 (fma x (* eps x) eps))
double code(double x, double eps) {
return fma(x, (eps * x), eps);
}
function code(x, eps) return fma(x, Float64(eps * x), eps) end
code[x_, eps_] := N[(x * N[(eps * x), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \varepsilon \cdot x, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
distribute-lft-outN/A
+-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f6499.6
Simplified99.6%
Taylor expanded in eps around 0
*-lowering-*.f6499.6
Simplified99.6%
(FPCore (x eps) :precision binary64 (fma eps (* eps x) eps))
double code(double x, double eps) {
return fma(eps, (eps * x), eps);
}
function code(x, eps) return fma(eps, Float64(eps * x), eps) end
code[x_, eps_] := N[(eps * N[(eps * x), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon, \varepsilon \cdot x, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
*-lowering-*.f6498.8
Simplified98.8%
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
return eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps
end function
public static double code(double x, double eps) {
return eps;
}
def code(x, eps): return eps
function code(x, eps) return eps end
function tmp = code(x, eps) tmp = eps; end
code[x_, eps_] := eps
\begin{array}{l}
\\
\varepsilon
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
Simplified100.0%
Taylor expanded in x around 0
Simplified98.8%
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
double code(double x, double eps) {
return sin(eps) / (cos(x) * cos((x + eps)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin(eps) / (cos(x) * cos((x + eps)))
end function
public static double code(double x, double eps) {
return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
}
def code(x, eps): return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
function code(x, eps) return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps)))) end
function tmp = code(x, eps) tmp = sin(eps) / (cos(x) * cos((x + eps))); end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}
(FPCore (x eps) :precision binary64 (- (/ (+ (tan x) (tan eps)) (- 1.0 (* (tan x) (tan eps)))) (tan x)))
double code(double x, double eps) {
return ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((tan(x) + tan(eps)) / (1.0d0 - (tan(x) * tan(eps)))) - tan(x)
end function
public static double code(double x, double eps) {
return ((Math.tan(x) + Math.tan(eps)) / (1.0 - (Math.tan(x) * Math.tan(eps)))) - Math.tan(x);
}
def code(x, eps): return ((math.tan(x) + math.tan(eps)) / (1.0 - (math.tan(x) * math.tan(eps)))) - math.tan(x)
function code(x, eps) return Float64(Float64(Float64(tan(x) + tan(eps)) / Float64(1.0 - Float64(tan(x) * tan(eps)))) - tan(x)) end
function tmp = code(x, eps) tmp = ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x); end
code[x_, eps_] := N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[eps], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024205
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))
:alt
(! :herbie-platform default (- (/ (+ (tan x) (tan eps)) (- 1 (* (tan x) (tan eps)))) (tan x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))