
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (tan x) 2.0)))
(fma
(fma
(fma
eps
(fma
(tan x)
(tan x)
(+ (pow (tan x) 4.0) (fma t_0 0.3333333333333333 0.3333333333333333)))
(* (fma (tan x) (tan x) 1.0) (tan x)))
eps
t_0)
eps
eps)))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
return fma(fma(fma(eps, fma(tan(x), tan(x), (pow(tan(x), 4.0) + fma(t_0, 0.3333333333333333, 0.3333333333333333))), (fma(tan(x), tan(x), 1.0) * tan(x))), eps, t_0), eps, eps);
}
function code(x, eps) t_0 = tan(x) ^ 2.0 return fma(fma(fma(eps, fma(tan(x), tan(x), Float64((tan(x) ^ 4.0) + fma(t_0, 0.3333333333333333, 0.3333333333333333))), Float64(fma(tan(x), tan(x), 1.0) * tan(x))), eps, t_0), eps, eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[(eps * N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + N[(N[Power[N[Tan[x], $MachinePrecision], 4.0], $MachinePrecision] + N[(t$95$0 * 0.3333333333333333 + 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + t$95$0), $MachinePrecision] * eps + eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\tan x, \tan x, {\tan x}^{4} + \mathsf{fma}\left(t\_0, 0.3333333333333333, 0.3333333333333333\right)\right), \mathsf{fma}\left(\tan x, \tan x, 1\right) \cdot \tan x\right), \varepsilon, t\_0\right), \varepsilon, \varepsilon\right)
\end{array}
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x eps)
:precision binary64
(fma
(fma
(fma
(fma (tan x) (tan x) (+ 0.3333333333333333 (pow (tan x) 4.0)))
eps
(/ (* (sin x) (fma (tan x) (tan x) 1.0)) (cos x)))
eps
(pow (tan x) 2.0))
eps
eps))
double code(double x, double eps) {
return fma(fma(fma(fma(tan(x), tan(x), (0.3333333333333333 + pow(tan(x), 4.0))), eps, ((sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, pow(tan(x), 2.0)), eps, eps);
}
function code(x, eps) return fma(fma(fma(fma(tan(x), tan(x), Float64(0.3333333333333333 + (tan(x) ^ 4.0))), eps, Float64(Float64(sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, (tan(x) ^ 2.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + N[(0.3333333333333333 + N[Power[N[Tan[x], $MachinePrecision], 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\tan x, \tan x, 0.3333333333333333 + {\tan x}^{4}\right), \varepsilon, \frac{\sin x \cdot \mathsf{fma}\left(\tan x, \tan x, 1\right)}{\cos x}\right), \varepsilon, {\tan x}^{2}\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(fma
(fma
(fma
(fma 1.3333333333333333 (* x x) 0.3333333333333333)
eps
(/ (* (sin x) (fma (tan x) (tan x) 1.0)) (cos x)))
eps
(pow (tan x) 2.0))
eps
eps))
double code(double x, double eps) {
return fma(fma(fma(fma(1.3333333333333333, (x * x), 0.3333333333333333), eps, ((sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, pow(tan(x), 2.0)), eps, eps);
}
function code(x, eps) return fma(fma(fma(fma(1.3333333333333333, Float64(x * x), 0.3333333333333333), eps, Float64(Float64(sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, (tan(x) ^ 2.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(1.3333333333333333 * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * eps + N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, x \cdot x, 0.3333333333333333\right), \varepsilon, \frac{\sin x \cdot \mathsf{fma}\left(\tan x, \tan x, 1\right)}{\cos x}\right), \varepsilon, {\tan x}^{2}\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x eps)
:precision binary64
(fma
(fma
(fma
0.3333333333333333
eps
(/ (* (sin x) (fma (tan x) (tan x) 1.0)) (cos x)))
eps
(pow (tan x) 2.0))
eps
eps))
double code(double x, double eps) {
return fma(fma(fma(0.3333333333333333, eps, ((sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, pow(tan(x), 2.0)), eps, eps);
}
function code(x, eps) return fma(fma(fma(0.3333333333333333, eps, Float64(Float64(sin(x) * fma(tan(x), tan(x), 1.0)) / cos(x))), eps, (tan(x) ^ 2.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[(0.3333333333333333 * eps + N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Tan[x], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, \frac{\sin x \cdot \mathsf{fma}\left(\tan x, \tan x, 1\right)}{\cos x}\right), \varepsilon, {\tan x}^{2}\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x eps) :precision binary64 (fma (/ (pow (sin x) 2.0) (pow (cos x) 2.0)) eps eps))
double code(double x, double eps) {
return fma((pow(sin(x), 2.0) / pow(cos(x), 2.0)), eps, eps);
}
function code(x, eps) return fma(Float64((sin(x) ^ 2.0) / (cos(x) ^ 2.0)), eps, eps) end
code[x_, eps_] := N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
sub-negN/A
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
lower-fma.f64N/A
mul-1-negN/A
remove-double-negN/A
lower-/.f64N/A
lower-pow.f64N/A
lower-sin.f64N/A
lower-pow.f64N/A
lower-cos.f6498.5
Applied rewrites98.5%
(FPCore (x eps)
:precision binary64
(/
(*
(fma
(fma
(fma
(* (* x x) 0.3333333333333333)
0.20833333333333334
0.16666666666666666)
(* x x)
0.3333333333333333)
(* eps eps)
(fma (fma (* x x) 0.20833333333333334 0.5) (* x x) 1.0))
eps)
(* (- 1.0 (* (tan eps) (tan x))) (cos x))))
double code(double x, double eps) {
return (fma(fma(fma(((x * x) * 0.3333333333333333), 0.20833333333333334, 0.16666666666666666), (x * x), 0.3333333333333333), (eps * eps), fma(fma((x * x), 0.20833333333333334, 0.5), (x * x), 1.0)) * eps) / ((1.0 - (tan(eps) * tan(x))) * cos(x));
}
function code(x, eps) return Float64(Float64(fma(fma(fma(Float64(Float64(x * x) * 0.3333333333333333), 0.20833333333333334, 0.16666666666666666), Float64(x * x), 0.3333333333333333), Float64(eps * eps), fma(fma(Float64(x * x), 0.20833333333333334, 0.5), Float64(x * x), 1.0)) * eps) / Float64(Float64(1.0 - Float64(tan(eps) * tan(x))) * cos(x))) end
code[x_, eps_] := N[(N[(N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 0.3333333333333333), $MachinePrecision] * 0.20833333333333334 + 0.16666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + N[(N[(N[(x * x), $MachinePrecision] * 0.20833333333333334 + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision] / N[(N[(1.0 - N[(N[Tan[eps], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(x \cdot x\right) \cdot 0.3333333333333333, 0.20833333333333334, 0.16666666666666666\right), x \cdot x, 0.3333333333333333\right), \varepsilon \cdot \varepsilon, \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.20833333333333334, 0.5\right), x \cdot x, 1\right)\right) \cdot \varepsilon}{\left(1 - \tan \varepsilon \cdot \tan x\right) \cdot \cos x}
\end{array}
Initial program 63.2%
lift--.f64N/A
flip--N/A
clear-numN/A
lower-/.f64N/A
clear-numN/A
flip--N/A
lift--.f64N/A
inv-powN/A
lower-pow.f6463.2
lift-+.f64N/A
+-commutativeN/A
lower-+.f6463.2
Applied rewrites63.2%
lift-/.f64N/A
lift-pow.f64N/A
unpow-1N/A
remove-double-div63.2
lift--.f64N/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
frac-subN/A
lower-/.f64N/A
Applied rewrites63.2%
Taylor expanded in x around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.0%
Taylor expanded in eps around 0
Applied rewrites97.9%
Final simplification97.9%
(FPCore (x eps) :precision binary64 (/ (* (fma (fma (* x x) 0.20833333333333334 0.5) (* x x) 1.0) eps) (* (- 1.0 (* (tan eps) (tan x))) (cos x))))
double code(double x, double eps) {
return (fma(fma((x * x), 0.20833333333333334, 0.5), (x * x), 1.0) * eps) / ((1.0 - (tan(eps) * tan(x))) * cos(x));
}
function code(x, eps) return Float64(Float64(fma(fma(Float64(x * x), 0.20833333333333334, 0.5), Float64(x * x), 1.0) * eps) / Float64(Float64(1.0 - Float64(tan(eps) * tan(x))) * cos(x))) end
code[x_, eps_] := N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 0.20833333333333334 + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * eps), $MachinePrecision] / N[(N[(1.0 - N[(N[Tan[eps], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.20833333333333334, 0.5\right), x \cdot x, 1\right) \cdot \varepsilon}{\left(1 - \tan \varepsilon \cdot \tan x\right) \cdot \cos x}
\end{array}
Initial program 63.2%
lift--.f64N/A
flip--N/A
clear-numN/A
lower-/.f64N/A
clear-numN/A
flip--N/A
lift--.f64N/A
inv-powN/A
lower-pow.f6463.2
lift-+.f64N/A
+-commutativeN/A
lower-+.f6463.2
Applied rewrites63.2%
lift-/.f64N/A
lift-pow.f64N/A
unpow-1N/A
remove-double-div63.2
lift--.f64N/A
lift-tan.f64N/A
lift-+.f64N/A
tan-sumN/A
lift-tan.f64N/A
tan-quotN/A
lift-sin.f64N/A
lift-cos.f64N/A
frac-subN/A
lower-/.f64N/A
Applied rewrites63.2%
Taylor expanded in x around 0
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.0%
Taylor expanded in eps around 0
Applied rewrites97.7%
Final simplification97.7%
(FPCore (x eps)
:precision binary64
(fma
(fma
(fma
(fma 1.3333333333333333 (* eps eps) 1.0)
eps
(* (* (* eps eps) 1.3333333333333333) x))
x
(* eps eps))
x
(fma (pow eps 3.0) 0.3333333333333333 eps)))
double code(double x, double eps) {
return fma(fma(fma(fma(1.3333333333333333, (eps * eps), 1.0), eps, (((eps * eps) * 1.3333333333333333) * x)), x, (eps * eps)), x, fma(pow(eps, 3.0), 0.3333333333333333, eps));
}
function code(x, eps) return fma(fma(fma(fma(1.3333333333333333, Float64(eps * eps), 1.0), eps, Float64(Float64(Float64(eps * eps) * 1.3333333333333333) * x)), x, Float64(eps * eps)), x, fma((eps ^ 3.0), 0.3333333333333333, eps)) end
code[x_, eps_] := N[(N[(N[(N[(1.3333333333333333 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision] * eps + N[(N[(N[(eps * eps), $MachinePrecision] * 1.3333333333333333), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * x + N[(eps * eps), $MachinePrecision]), $MachinePrecision] * x + N[(N[Power[eps, 3.0], $MachinePrecision] * 0.3333333333333333 + eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon \cdot \varepsilon, 1\right), \varepsilon, \left(\left(\varepsilon \cdot \varepsilon\right) \cdot 1.3333333333333333\right) \cdot x\right), x, \varepsilon \cdot \varepsilon\right), x, \mathsf{fma}\left({\varepsilon}^{3}, 0.3333333333333333, \varepsilon\right)\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites96.9%
Taylor expanded in x around 0
Applied rewrites97.6%
Final simplification97.6%
(FPCore (x eps) :precision binary64 (fma (fma (* 0.3333333333333333 eps) eps (* (fma (fma 1.3333333333333333 (* (+ x eps) eps) 1.0) x eps) x)) eps eps))
double code(double x, double eps) {
return fma(fma((0.3333333333333333 * eps), eps, (fma(fma(1.3333333333333333, ((x + eps) * eps), 1.0), x, eps) * x)), eps, eps);
}
function code(x, eps) return fma(fma(Float64(0.3333333333333333 * eps), eps, Float64(fma(fma(1.3333333333333333, Float64(Float64(x + eps) * eps), 1.0), x, eps) * x)), eps, eps) end
code[x_, eps_] := N[(N[(N[(0.3333333333333333 * eps), $MachinePrecision] * eps + N[(N[(N[(1.3333333333333333 * N[(N[(x + eps), $MachinePrecision] * eps), $MachinePrecision] + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333 \cdot \varepsilon, \varepsilon, \mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \left(x + \varepsilon\right) \cdot \varepsilon, 1\right), x, \varepsilon\right) \cdot x\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites97.6%
Final simplification97.6%
(FPCore (x eps) :precision binary64 (fma (fma (fma (fma 1.3333333333333333 (* eps eps) 1.0) x eps) x (* (* eps eps) 0.3333333333333333)) eps eps))
double code(double x, double eps) {
return fma(fma(fma(fma(1.3333333333333333, (eps * eps), 1.0), x, eps), x, ((eps * eps) * 0.3333333333333333)), eps, eps);
}
function code(x, eps) return fma(fma(fma(fma(1.3333333333333333, Float64(eps * eps), 1.0), x, eps), x, Float64(Float64(eps * eps) * 0.3333333333333333)), eps, eps) end
code[x_, eps_] := N[(N[(N[(N[(1.3333333333333333 * N[(eps * eps), $MachinePrecision] + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x + N[(N[(eps * eps), $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon \cdot \varepsilon, 1\right), x, \varepsilon\right), x, \left(\varepsilon \cdot \varepsilon\right) \cdot 0.3333333333333333\right), \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites97.6%
(FPCore (x eps) :precision binary64 (fma (* (* eps eps) 0.3333333333333333) eps eps))
double code(double x, double eps) {
return fma(((eps * eps) * 0.3333333333333333), eps, eps);
}
function code(x, eps) return fma(Float64(Float64(eps * eps) * 0.3333333333333333), eps, eps) end
code[x_, eps_] := N[(N[(N[(eps * eps), $MachinePrecision] * 0.3333333333333333), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot 0.3333333333333333, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites96.9%
(FPCore (x eps) :precision binary64 (fma (* x eps) eps eps))
double code(double x, double eps) {
return fma((x * eps), eps, eps);
}
function code(x, eps) return fma(Float64(x * eps), eps, eps) end
code[x_, eps_] := N[(N[(x * eps), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot \varepsilon, \varepsilon, \varepsilon\right)
\end{array}
Initial program 63.2%
Taylor expanded in eps around 0
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites96.9%
Taylor expanded in eps around 0
Applied rewrites96.9%
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2024273
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))