
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (sin x) 2.0)) (t_1 (pow (cos x) 2.0)))
(fma
(fma
(*
(fma
(fma
(fma (/ (sin x) t_1) (sin x) 1.0)
-0.5
(/
(fma (fma t_0 (/ -1.0 t_1) -1.0) t_0 (* t_0 0.16666666666666666))
t_1))
-1.0
-0.16666666666666666)
eps)
eps
(/ (fma (fma (/ t_0 t_1) eps eps) (sin x) (/ t_0 (cos x))) (cos x)))
eps
eps)))
double code(double x, double eps) {
double t_0 = pow(sin(x), 2.0);
double t_1 = pow(cos(x), 2.0);
return fma(fma((fma(fma(fma((sin(x) / t_1), sin(x), 1.0), -0.5, (fma(fma(t_0, (-1.0 / t_1), -1.0), t_0, (t_0 * 0.16666666666666666)) / t_1)), -1.0, -0.16666666666666666) * eps), eps, (fma(fma((t_0 / t_1), eps, eps), sin(x), (t_0 / cos(x))) / cos(x))), eps, eps);
}
function code(x, eps) t_0 = sin(x) ^ 2.0 t_1 = cos(x) ^ 2.0 return fma(fma(Float64(fma(fma(fma(Float64(sin(x) / t_1), sin(x), 1.0), -0.5, Float64(fma(fma(t_0, Float64(-1.0 / t_1), -1.0), t_0, Float64(t_0 * 0.16666666666666666)) / t_1)), -1.0, -0.16666666666666666) * eps), eps, Float64(fma(fma(Float64(t_0 / t_1), eps, eps), sin(x), Float64(t_0 / cos(x))) / cos(x))), eps, eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(N[Sin[x], $MachinePrecision] / t$95$1), $MachinePrecision] * N[Sin[x], $MachinePrecision] + 1.0), $MachinePrecision] * -0.5 + N[(N[(N[(t$95$0 * N[(-1.0 / t$95$1), $MachinePrecision] + -1.0), $MachinePrecision] * t$95$0 + N[(t$95$0 * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision] * -1.0 + -0.16666666666666666), $MachinePrecision] * eps), $MachinePrecision] * eps + N[(N[(N[(N[(t$95$0 / t$95$1), $MachinePrecision] * eps + eps), $MachinePrecision] * N[Sin[x], $MachinePrecision] + N[(t$95$0 / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\sin x}^{2}\\
t_1 := {\cos x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{\sin x}{t\_1}, \sin x, 1\right), -0.5, \frac{\mathsf{fma}\left(\mathsf{fma}\left(t\_0, \frac{-1}{t\_1}, -1\right), t\_0, t\_0 \cdot 0.16666666666666666\right)}{t\_1}\right), -1, -0.16666666666666666\right) \cdot \varepsilon, \varepsilon, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\frac{t\_0}{t\_1}, \varepsilon, \varepsilon\right), \sin x, \frac{t\_0}{\cos x}\right)}{\cos x}\right), \varepsilon, \varepsilon\right)
\end{array}
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
Applied rewrites99.2%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (tan x) 2.0)))
(fma
(fma
(tan x)
(+ (tan x) (fma eps t_0 eps))
(*
(* eps eps)
(+
(- (fma t_0 (+ (fma -1.0 t_0 -1.0) -0.3333333333333333) -0.5))
-0.16666666666666666)))
eps
eps)))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
return fma(fma(tan(x), (tan(x) + fma(eps, t_0, eps)), ((eps * eps) * (-fma(t_0, (fma(-1.0, t_0, -1.0) + -0.3333333333333333), -0.5) + -0.16666666666666666))), eps, eps);
}
function code(x, eps) t_0 = tan(x) ^ 2.0 return fma(fma(tan(x), Float64(tan(x) + fma(eps, t_0, eps)), Float64(Float64(eps * eps) * Float64(Float64(-fma(t_0, Float64(fma(-1.0, t_0, -1.0) + -0.3333333333333333), -0.5)) + -0.16666666666666666))), eps, eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[Tan[x], $MachinePrecision] * N[(N[Tan[x], $MachinePrecision] + N[(eps * t$95$0 + eps), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[((-N[(t$95$0 * N[(N[(-1.0 * t$95$0 + -1.0), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] + -0.5), $MachinePrecision]) + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(\tan x, \tan x + \mathsf{fma}\left(\varepsilon, t\_0, \varepsilon\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \left(\left(-\mathsf{fma}\left(t\_0, \mathsf{fma}\left(-1, t\_0, -1\right) + -0.3333333333333333, -0.5\right)\right) + -0.16666666666666666\right)\right), \varepsilon, \varepsilon\right)
\end{array}
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
Applied rewrites99.2%
Applied rewrites99.2%
Applied rewrites99.2%
Applied rewrites99.2%
(FPCore (x eps) :precision binary64 (fma (fma (tan x) (+ (tan x) (fma eps (pow (tan x) 2.0) eps)) (* (* eps eps) 0.3333333333333333)) eps eps))
double code(double x, double eps) {
return fma(fma(tan(x), (tan(x) + fma(eps, pow(tan(x), 2.0), eps)), ((eps * eps) * 0.3333333333333333)), eps, eps);
}
function code(x, eps) return fma(fma(tan(x), Float64(tan(x) + fma(eps, (tan(x) ^ 2.0), eps)), Float64(Float64(eps * eps) * 0.3333333333333333)), eps, eps) end
code[x_, eps_] := N[(N[(N[Tan[x], $MachinePrecision] * N[(N[Tan[x], $MachinePrecision] + N[(eps * N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\tan x, \tan x + \mathsf{fma}\left(\varepsilon, {\tan x}^{2}, \varepsilon\right), \left(\varepsilon \cdot \varepsilon\right) \cdot 0.3333333333333333\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
Applied rewrites99.2%
Applied rewrites99.2%
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites99.0%
(FPCore (x eps) :precision binary64 (fma (/ (pow (sin x) 2.0) (pow (cos x) 2.0)) eps eps))
double code(double x, double eps) {
return fma((pow(sin(x), 2.0) / pow(cos(x), 2.0)), eps, eps);
}
function code(x, eps) return fma(Float64((sin(x) ^ 2.0) / (cos(x) ^ 2.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
(FPCore (x eps) :precision binary64 (fma (pow (tan x) 2.0) eps eps))
double code(double x, double eps) {
return fma(pow(tan(x), 2.0), eps, eps);
}
function code(x, eps) return fma((tan(x) ^ 2.0), eps, eps) end
code[x_, eps_] := N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left({\tan x}^{2}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Applied rewrites98.6%
(FPCore (x eps)
:precision binary64
(fma
(/
(*
(*
(fma (- (* 0.044444444444444446 (* x x)) 0.3333333333333333) (* x x) 1.0)
x)
x)
(fma (- (* (* 0.3333333333333333 x) x) 1.0) (* x x) 1.0))
eps
eps))
double code(double x, double eps) {
return fma((((fma(((0.044444444444444446 * (x * x)) - 0.3333333333333333), (x * x), 1.0) * x) * x) / fma((((0.3333333333333333 * x) * x) - 1.0), (x * x), 1.0)), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(Float64(fma(Float64(Float64(0.044444444444444446 * Float64(x * x)) - 0.3333333333333333), Float64(x * x), 1.0) * x) * x) / fma(Float64(Float64(Float64(0.3333333333333333 * x) * x) - 1.0), Float64(x * x), 1.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(N[(N[(0.044444444444444446 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] / N[(N[(N[(N[(0.3333333333333333 * x), $MachinePrecision] * x), $MachinePrecision] - 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\left(\mathsf{fma}\left(0.044444444444444446 \cdot \left(x \cdot x\right) - 0.3333333333333333, x \cdot x, 1\right) \cdot x\right) \cdot x}{\mathsf{fma}\left(\left(0.3333333333333333 \cdot x\right) \cdot x - 1, x \cdot x, 1\right)}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.1%
Taylor expanded in x around 0
Applied rewrites98.2%
(FPCore (x eps) :precision binary64 (fma (* (* (fma (fma 0.37777777777777777 (* x x) 0.6666666666666666) (* x x) 1.0) x) x) eps eps))
double code(double x, double eps) {
return fma(((fma(fma(0.37777777777777777, (x * x), 0.6666666666666666), (x * x), 1.0) * x) * x), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(fma(fma(0.37777777777777777, Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * x) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(0.37777777777777777 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot x\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.1%
(FPCore (x eps) :precision binary64 (fma (* (* (fma 0.6666666666666666 (* x x) 1.0) x) x) eps eps))
double code(double x, double eps) {
return fma(((fma(0.6666666666666666, (x * x), 1.0) * x) * x), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(fma(0.6666666666666666, Float64(x * x), 1.0) * x) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\mathsf{fma}\left(0.6666666666666666, x \cdot x, 1\right) \cdot x\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.1%
(FPCore (x eps) :precision binary64 (fma (* (+ eps x) x) eps eps))
double code(double x, double eps) {
return fma(((eps + x) * x), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(eps + x) * x), eps, eps) end
code[x_, eps_] := N[(N[(N[(eps + x), $MachinePrecision] * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\varepsilon + x\right) \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
Applied rewrites99.2%
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.1%
Taylor expanded in eps around 0
Applied rewrites98.1%
(FPCore (x eps) :precision binary64 (fma (* eps x) x eps))
double code(double x, double eps) {
return fma((eps * x), x, eps);
}
function code(x, eps) return fma(Float64(eps * x), x, eps) end
code[x_, eps_] := N[(N[(eps * x), $MachinePrecision] * x + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot x, x, \varepsilon\right)
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.1%
Taylor expanded in x around 0
Applied rewrites98.1%
(FPCore (x eps) :precision binary64 (* (* eps x) x))
double code(double x, double eps) {
return (eps * x) * x;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (eps * x) * x
end function
public static double code(double x, double eps) {
return (eps * x) * x;
}
def code(x, eps): return (eps * x) * x
function code(x, eps) return Float64(Float64(eps * x) * x) end
function tmp = code(x, eps) tmp = (eps * x) * x; end
code[x_, eps_] := N[(N[(eps * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(\varepsilon \cdot x\right) \cdot x
\end{array}
Initial program 60.4%
Taylor expanded in eps around 0
fp-cancel-sub-sign-invN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
metadata-evalN/A
*-lft-identityN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.1%
Taylor expanded in x around 0
Applied rewrites98.1%
Taylor expanded in x around inf
Applied rewrites6.6%
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024331
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))