
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (fma (* (fma (fma (* 0.6666666666666666 x) x 1.0) x eps) x) eps eps))
double code(double x, double eps) {
return fma((fma(fma((0.6666666666666666 * x), x, 1.0), x, eps) * x), eps, eps);
}
function code(x, eps) return fma(Float64(fma(fma(Float64(0.6666666666666666 * x), x, 1.0), x, eps) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(0.6666666666666666 * x), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.6666666666666666 \cdot x, x, 1\right), x, \varepsilon\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 59.2%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
(FPCore (x eps) :precision binary64 (fma (* eps (+ eps x)) x eps))
double code(double x, double eps) {
return fma((eps * (eps + x)), x, eps);
}
function code(x, eps) return fma(Float64(eps * Float64(eps + x)), x, eps) end
code[x_, eps_] := N[(N[(eps * N[(eps + x), $MachinePrecision]), $MachinePrecision] * x + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot \left(\varepsilon + x\right), x, \varepsilon\right)
\end{array}
Initial program 59.2%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.9%
(FPCore (x eps) :precision binary64 (* (fma x x 1.0) eps))
double code(double x, double eps) {
return fma(x, x, 1.0) * eps;
}
function code(x, eps) return Float64(fma(x, x, 1.0) * eps) end
code[x_, eps_] := N[(N[(x * x + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, x, 1\right) \cdot \varepsilon
\end{array}
Initial program 59.2%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.9%
Taylor expanded in eps around 0
Applied rewrites99.9%
(FPCore (x eps) :precision binary64 (* (* eps x) x))
double code(double x, double eps) {
return (eps * x) * x;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (eps * x) * x
end function
public static double code(double x, double eps) {
return (eps * x) * x;
}
def code(x, eps): return (eps * x) * x
function code(x, eps) return Float64(Float64(eps * x) * x) end
function tmp = code(x, eps) tmp = (eps * x) * x; end
code[x_, eps_] := N[(N[(eps * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(\varepsilon \cdot x\right) \cdot x
\end{array}
Initial program 59.2%
Taylor expanded in eps around 0
associate--l+N/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.9%
Taylor expanded in x around inf
Applied rewrites6.3%
Applied rewrites6.3%
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
return 0.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double x, double eps) {
return 0.0;
}
def code(x, eps): return 0.0
function code(x, eps) return 0.0 end
function tmp = code(x, eps) tmp = 0.0; end
code[x_, eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 59.2%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-tan.f64N/A
tan-quotN/A
distribute-neg-frac2N/A
cos-+PI-revN/A
div-invN/A
lower-fma.f64N/A
lower-sin.f64N/A
lower-/.f64N/A
cos-+PI-revN/A
lower-neg.f64N/A
lower-cos.f6459.1
lift-+.f64N/A
+-commutativeN/A
lower-+.f6459.1
Applied rewrites59.1%
Taylor expanded in eps around 0
distribute-lft1-inN/A
metadata-evalN/A
mul0-lft5.4
Applied rewrites5.4%
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024319
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))