2tan (problem 3.3.2)

Percentage Accurate: 62.4% → 99.9%
Time: 15.4s
Alternatives: 8
Speedup: 17.3×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Alternative 1: 99.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)} \end{array} \]
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
double code(double x, double eps) {
	return sin(eps) / (cos(x) * cos((x + eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin(eps) / (cos(x) * cos((x + eps)))
end function
public static double code(double x, double eps) {
	return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
}
def code(x, eps):
	return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
function code(x, eps)
	return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps))))
end
function tmp = code(x, eps)
	tmp = sin(eps) / (cos(x) * cos((x + eps)));
end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}
Derivation
  1. Initial program 64.0%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right) - \tan x} \]
    2. lift-tan.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right)} - \tan x \]
    3. tan-quotN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)}} - \tan x \]
    4. lift-tan.f64N/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\tan x} \]
    5. tan-quotN/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\frac{\sin x}{\cos x}} \]
    6. frac-subN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    7. div-invN/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    8. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    9. sin-diffN/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    10. lower-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    11. lower--.f64N/A

      \[\leadsto \sin \color{blue}{\left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    12. lower-/.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \color{blue}{\frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    13. *-commutativeN/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    14. lower-*.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    15. lower-cos.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x} \cdot \cos \left(x + \varepsilon\right)} \]
    16. lower-cos.f6464.0

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \color{blue}{\cos \left(x + \varepsilon\right)}} \]
  4. Applied rewrites64.0%

    \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
  5. Taylor expanded in x around inf

    \[\leadsto \color{blue}{\frac{\sin \varepsilon}{\cos x \cdot \cos \left(\varepsilon + x\right)}} \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \frac{\sin \varepsilon}{\color{blue}{\cos \left(\varepsilon + x\right) \cdot \cos x}} \]
    2. +-commutativeN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos \color{blue}{\left(x + \varepsilon\right)} \cdot \cos x} \]
    3. remove-double-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos \left(x + \color{blue}{\left(\mathsf{neg}\left(\left(\mathsf{neg}\left(\varepsilon\right)\right)\right)\right)}\right) \cdot \cos x} \]
    4. mul-1-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos \left(x + \left(\mathsf{neg}\left(\color{blue}{-1 \cdot \varepsilon}\right)\right)\right) \cdot \cos x} \]
    5. sub-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos \color{blue}{\left(x - -1 \cdot \varepsilon\right)} \cdot \cos x} \]
    6. *-commutativeN/A

      \[\leadsto \frac{\sin \varepsilon}{\color{blue}{\cos x \cdot \cos \left(x - -1 \cdot \varepsilon\right)}} \]
    7. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x - -1 \cdot \varepsilon\right)}} \]
    8. lower-sin.f64N/A

      \[\leadsto \frac{\color{blue}{\sin \varepsilon}}{\cos x \cdot \cos \left(x - -1 \cdot \varepsilon\right)} \]
    9. sub-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \color{blue}{\left(x + \left(\mathsf{neg}\left(-1 \cdot \varepsilon\right)\right)\right)}} \]
    10. mul-1-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \left(\mathsf{neg}\left(\color{blue}{\left(\mathsf{neg}\left(\varepsilon\right)\right)}\right)\right)\right)} \]
    11. remove-double-negN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \color{blue}{\varepsilon}\right)} \]
    12. +-commutativeN/A

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \color{blue}{\left(\varepsilon + x\right)}} \]
    13. lower-*.f64N/A

      \[\leadsto \frac{\sin \varepsilon}{\color{blue}{\cos x \cdot \cos \left(\varepsilon + x\right)}} \]
    14. lower-cos.f64N/A

      \[\leadsto \frac{\sin \varepsilon}{\color{blue}{\cos x} \cdot \cos \left(\varepsilon + x\right)} \]
    15. lower-cos.f64N/A

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \color{blue}{\cos \left(\varepsilon + x\right)}} \]
    16. lower-+.f64100.0

      \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \color{blue}{\left(\varepsilon + x\right)}} \]
  7. Applied rewrites100.0%

    \[\leadsto \color{blue}{\frac{\sin \varepsilon}{\cos x \cdot \cos \left(\varepsilon + x\right)}} \]
  8. Final simplification100.0%

    \[\leadsto \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  9. Add Preprocessing

Alternative 2: 99.7% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.008333333333333333, -0.16666666666666666\right), \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  (fma
   (* eps eps)
   (* eps (fma eps (* eps 0.008333333333333333) -0.16666666666666666))
   eps)
  (/ 1.0 (* (cos x) (cos (+ x eps))))))
double code(double x, double eps) {
	return fma((eps * eps), (eps * fma(eps, (eps * 0.008333333333333333), -0.16666666666666666)), eps) * (1.0 / (cos(x) * cos((x + eps))));
}
function code(x, eps)
	return Float64(fma(Float64(eps * eps), Float64(eps * fma(eps, Float64(eps * 0.008333333333333333), -0.16666666666666666)), eps) * Float64(1.0 / Float64(cos(x) * cos(Float64(x + eps)))))
end
code[x_, eps_] := N[(N[(N[(eps * eps), $MachinePrecision] * N[(eps * N[(eps * N[(eps * 0.008333333333333333), $MachinePrecision] + -0.16666666666666666), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision] * N[(1.0 / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\varepsilon \cdot \varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.008333333333333333, -0.16666666666666666\right), \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}
Derivation
  1. Initial program 64.0%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right) - \tan x} \]
    2. lift-tan.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right)} - \tan x \]
    3. tan-quotN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)}} - \tan x \]
    4. lift-tan.f64N/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\tan x} \]
    5. tan-quotN/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\frac{\sin x}{\cos x}} \]
    6. frac-subN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    7. div-invN/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    8. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    9. sin-diffN/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    10. lower-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    11. lower--.f64N/A

      \[\leadsto \sin \color{blue}{\left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    12. lower-/.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \color{blue}{\frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    13. *-commutativeN/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    14. lower-*.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    15. lower-cos.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x} \cdot \cos \left(x + \varepsilon\right)} \]
    16. lower-cos.f6464.0

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \color{blue}{\cos \left(x + \varepsilon\right)}} \]
  4. Applied rewrites64.0%

    \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \left(1 + {\varepsilon}^{2} \cdot \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right)\right)\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  6. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(\varepsilon \cdot \color{blue}{\left({\varepsilon}^{2} \cdot \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) + 1\right)}\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    2. distribute-lft-inN/A

      \[\leadsto \color{blue}{\left(\varepsilon \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right)\right) + \varepsilon \cdot 1\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    3. *-commutativeN/A

      \[\leadsto \left(\color{blue}{\left({\varepsilon}^{2} \cdot \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right)\right) \cdot \varepsilon} + \varepsilon \cdot 1\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    4. associate-*l*N/A

      \[\leadsto \left(\color{blue}{{\varepsilon}^{2} \cdot \left(\left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon\right)} + \varepsilon \cdot 1\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    5. *-rgt-identityN/A

      \[\leadsto \left({\varepsilon}^{2} \cdot \left(\left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon\right) + \color{blue}{\varepsilon}\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    6. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left({\varepsilon}^{2}, \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon, \varepsilon\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    7. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\varepsilon \cdot \varepsilon}, \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    8. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{\varepsilon \cdot \varepsilon}, \left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    9. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \color{blue}{\left(\frac{1}{120} \cdot {\varepsilon}^{2} - \frac{1}{6}\right) \cdot \varepsilon}, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    10. sub-negN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \color{blue}{\left(\frac{1}{120} \cdot {\varepsilon}^{2} + \left(\mathsf{neg}\left(\frac{1}{6}\right)\right)\right)} \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    11. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \left(\color{blue}{{\varepsilon}^{2} \cdot \frac{1}{120}} + \left(\mathsf{neg}\left(\frac{1}{6}\right)\right)\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    12. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{1}{120} + \left(\mathsf{neg}\left(\frac{1}{6}\right)\right)\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    13. associate-*l*N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \left(\color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \frac{1}{120}\right)} + \left(\mathsf{neg}\left(\frac{1}{6}\right)\right)\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    14. metadata-evalN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \left(\varepsilon \cdot \left(\varepsilon \cdot \frac{1}{120}\right) + \color{blue}{\frac{-1}{6}}\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    15. lower-fma.f64N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \color{blue}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{1}{120}, \frac{-1}{6}\right)} \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    16. lower-*.f6499.6

      \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot 0.008333333333333333}, -0.16666666666666666\right) \cdot \varepsilon, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  7. Applied rewrites99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.008333333333333333, -0.16666666666666666\right) \cdot \varepsilon, \varepsilon\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  8. Final simplification99.6%

    \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.008333333333333333, -0.16666666666666666\right), \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  9. Add Preprocessing

Alternative 3: 99.7% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \cdot \mathsf{fma}\left(\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot -0.16666666666666666, \varepsilon\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  (/ 1.0 (* (cos x) (cos (+ x eps))))
  (fma eps (* (* eps eps) -0.16666666666666666) eps)))
double code(double x, double eps) {
	return (1.0 / (cos(x) * cos((x + eps)))) * fma(eps, ((eps * eps) * -0.16666666666666666), eps);
}
function code(x, eps)
	return Float64(Float64(1.0 / Float64(cos(x) * cos(Float64(x + eps)))) * fma(eps, Float64(Float64(eps * eps) * -0.16666666666666666), eps))
end
code[x_, eps_] := N[(N[(1.0 / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(eps * N[(N[(eps * eps), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \cdot \mathsf{fma}\left(\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot -0.16666666666666666, \varepsilon\right)
\end{array}
Derivation
  1. Initial program 64.0%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right) - \tan x} \]
    2. lift-tan.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right)} - \tan x \]
    3. tan-quotN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)}} - \tan x \]
    4. lift-tan.f64N/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\tan x} \]
    5. tan-quotN/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\frac{\sin x}{\cos x}} \]
    6. frac-subN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    7. div-invN/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    8. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    9. sin-diffN/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    10. lower-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    11. lower--.f64N/A

      \[\leadsto \sin \color{blue}{\left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    12. lower-/.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \color{blue}{\frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    13. *-commutativeN/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    14. lower-*.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    15. lower-cos.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x} \cdot \cos \left(x + \varepsilon\right)} \]
    16. lower-cos.f6464.0

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \color{blue}{\cos \left(x + \varepsilon\right)}} \]
  4. Applied rewrites64.0%

    \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \left(1 + \frac{-1}{6} \cdot {\varepsilon}^{2}\right)\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  6. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{6} \cdot {\varepsilon}^{2} + 1\right)}\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    2. distribute-lft-inN/A

      \[\leadsto \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{6} \cdot {\varepsilon}^{2}\right) + \varepsilon \cdot 1\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    3. *-rgt-identityN/A

      \[\leadsto \left(\varepsilon \cdot \left(\frac{-1}{6} \cdot {\varepsilon}^{2}\right) + \color{blue}{\varepsilon}\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    4. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{6} \cdot {\varepsilon}^{2}, \varepsilon\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    5. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{{\varepsilon}^{2} \cdot \frac{-1}{6}}, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{{\varepsilon}^{2} \cdot \frac{-1}{6}}, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    7. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{-1}{6}, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
    8. lower-*.f6499.6

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot -0.16666666666666666, \varepsilon\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  7. Applied rewrites99.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot -0.16666666666666666, \varepsilon\right)} \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \]
  8. Final simplification99.6%

    \[\leadsto \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)} \cdot \mathsf{fma}\left(\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot -0.16666666666666666, \varepsilon\right) \]
  9. Add Preprocessing

Alternative 4: 98.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \frac{\varepsilon}{{\cos x}^{2}} \end{array} \]
(FPCore (x eps) :precision binary64 (/ eps (pow (cos x) 2.0)))
double code(double x, double eps) {
	return eps / pow(cos(x), 2.0);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps / (cos(x) ** 2.0d0)
end function
public static double code(double x, double eps) {
	return eps / Math.pow(Math.cos(x), 2.0);
}
def code(x, eps):
	return eps / math.pow(math.cos(x), 2.0)
function code(x, eps)
	return Float64(eps / (cos(x) ^ 2.0))
end
function tmp = code(x, eps)
	tmp = eps / (cos(x) ^ 2.0);
end
code[x_, eps_] := N[(eps / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\varepsilon}{{\cos x}^{2}}
\end{array}
Derivation
  1. Initial program 64.0%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right) - \tan x} \]
    2. lift-tan.f64N/A

      \[\leadsto \color{blue}{\tan \left(x + \varepsilon\right)} - \tan x \]
    3. tan-quotN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)}} - \tan x \]
    4. lift-tan.f64N/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\tan x} \]
    5. tan-quotN/A

      \[\leadsto \frac{\sin \left(x + \varepsilon\right)}{\cos \left(x + \varepsilon\right)} - \color{blue}{\frac{\sin x}{\cos x}} \]
    6. frac-subN/A

      \[\leadsto \color{blue}{\frac{\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    7. div-invN/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    8. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(x + \varepsilon\right) \cdot \cos x - \cos \left(x + \varepsilon\right) \cdot \sin x\right) \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    9. sin-diffN/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    10. lower-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    11. lower--.f64N/A

      \[\leadsto \sin \color{blue}{\left(\left(x + \varepsilon\right) - x\right)} \cdot \frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x} \]
    12. lower-/.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \color{blue}{\frac{1}{\cos \left(x + \varepsilon\right) \cdot \cos x}} \]
    13. *-commutativeN/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    14. lower-*.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
    15. lower-cos.f64N/A

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\color{blue}{\cos x} \cdot \cos \left(x + \varepsilon\right)} \]
    16. lower-cos.f6464.0

      \[\leadsto \sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \color{blue}{\cos \left(x + \varepsilon\right)}} \]
  4. Applied rewrites64.0%

    \[\leadsto \color{blue}{\sin \left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{\cos x \cdot \cos \left(x + \varepsilon\right)}} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\frac{\varepsilon}{{\cos x}^{2}}} \]
  6. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{\varepsilon}{{\cos x}^{2}}} \]
    2. lower-pow.f64N/A

      \[\leadsto \frac{\varepsilon}{\color{blue}{{\cos x}^{2}}} \]
    3. lower-cos.f6498.9

      \[\leadsto \frac{\varepsilon}{{\color{blue}{\cos x}}^{2}} \]
  7. Applied rewrites98.9%

    \[\leadsto \color{blue}{\frac{\varepsilon}{{\cos x}^{2}}} \]
  8. Add Preprocessing

Alternative 5: 98.4% accurate, 4.4× speedup?

\[\begin{array}{l} \\ \varepsilon + x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 1.3333333333333333, \varepsilon \cdot \left(x \cdot 0.6666666666666666\right)\right), \varepsilon\right), \varepsilon \cdot \varepsilon\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (+
  eps
  (*
   x
   (fma
    x
    (fma
     x
     (fma eps (* eps 1.3333333333333333) (* eps (* x 0.6666666666666666)))
     eps)
    (* eps eps)))))
double code(double x, double eps) {
	return eps + (x * fma(x, fma(x, fma(eps, (eps * 1.3333333333333333), (eps * (x * 0.6666666666666666))), eps), (eps * eps)));
}
function code(x, eps)
	return Float64(eps + Float64(x * fma(x, fma(x, fma(eps, Float64(eps * 1.3333333333333333), Float64(eps * Float64(x * 0.6666666666666666))), eps), Float64(eps * eps))))
end
code[x_, eps_] := N[(eps + N[(x * N[(x * N[(x * N[(eps * N[(eps * 1.3333333333333333), $MachinePrecision] + N[(eps * N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision] + N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon + x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 1.3333333333333333, \varepsilon \cdot \left(x \cdot 0.6666666666666666\right)\right), \varepsilon\right), \varepsilon \cdot \varepsilon\right)
\end{array}
Derivation
  1. Initial program 64.0%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
    3. distribute-lft-inN/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
    4. *-rgt-identityN/A

      \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
    5. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
  5. Applied rewrites99.3%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{x}, \varepsilon\right) \]
  7. Step-by-step derivation
    1. Applied rewrites98.0%

      \[\leadsto \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{x}, \varepsilon\right) \]
    2. Step-by-step derivation
      1. Applied rewrites98.0%

        \[\leadsto \varepsilon \cdot \left(x \cdot \varepsilon\right) + \color{blue}{\varepsilon} \]
      2. Taylor expanded in x around 0

        \[\leadsto x \cdot \left(x \cdot \left(\varepsilon + x \cdot \left(\frac{2}{3} \cdot \left(\varepsilon \cdot x\right) + \varepsilon \cdot \left(\frac{5}{6} \cdot \varepsilon - \frac{-1}{2} \cdot \varepsilon\right)\right)\right) + {\varepsilon}^{2}\right) + \varepsilon \]
      3. Step-by-step derivation
        1. Applied rewrites98.6%

          \[\leadsto x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 1.3333333333333333, \varepsilon \cdot \left(x \cdot 0.6666666666666666\right)\right), \varepsilon\right), \varepsilon \cdot \varepsilon\right) + \varepsilon \]
        2. Final simplification98.6%

          \[\leadsto \varepsilon + x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 1.3333333333333333, \varepsilon \cdot \left(x \cdot 0.6666666666666666\right)\right), \varepsilon\right), \varepsilon \cdot \varepsilon\right) \]
        3. Add Preprocessing

        Alternative 6: 98.4% accurate, 5.9× speedup?

        \[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.6666666666666666, \varepsilon \cdot 1.3333333333333333\right), 1\right), \varepsilon\right), \varepsilon\right) \end{array} \]
        (FPCore (x eps)
         :precision binary64
         (fma
          eps
          (*
           x
           (fma
            x
            (fma x (fma x 0.6666666666666666 (* eps 1.3333333333333333)) 1.0)
            eps))
          eps))
        double code(double x, double eps) {
        	return fma(eps, (x * fma(x, fma(x, fma(x, 0.6666666666666666, (eps * 1.3333333333333333)), 1.0), eps)), eps);
        }
        
        function code(x, eps)
        	return fma(eps, Float64(x * fma(x, fma(x, fma(x, 0.6666666666666666, Float64(eps * 1.3333333333333333)), 1.0), eps)), eps)
        end
        
        code[x_, eps_] := N[(eps * N[(x * N[(x * N[(x * N[(x * 0.6666666666666666 + N[(eps * 1.3333333333333333), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.6666666666666666, \varepsilon \cdot 1.3333333333333333\right), 1\right), \varepsilon\right), \varepsilon\right)
        \end{array}
        
        Derivation
        1. Initial program 64.0%

          \[\tan \left(x + \varepsilon\right) - \tan x \]
        2. Add Preprocessing
        3. Taylor expanded in eps around 0

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
        4. Step-by-step derivation
          1. associate--l+N/A

            \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
          2. +-commutativeN/A

            \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
          3. distribute-lft-inN/A

            \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
          4. *-rgt-identityN/A

            \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
          5. lower-fma.f64N/A

            \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
        5. Applied rewrites99.3%

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
        6. Taylor expanded in x around 0

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\varepsilon + x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right)\right)}, \varepsilon\right) \]
        7. Step-by-step derivation
          1. Applied rewrites98.6%

            \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.6666666666666666, \varepsilon \cdot 1.3333333333333333\right), 1\right), \varepsilon\right)}, \varepsilon\right) \]
          2. Add Preprocessing

          Alternative 7: 98.2% accurate, 13.8× speedup?

          \[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, x \cdot \left(x + \varepsilon\right), \varepsilon\right) \end{array} \]
          (FPCore (x eps) :precision binary64 (fma eps (* x (+ x eps)) eps))
          double code(double x, double eps) {
          	return fma(eps, (x * (x + eps)), eps);
          }
          
          function code(x, eps)
          	return fma(eps, Float64(x * Float64(x + eps)), eps)
          end
          
          code[x_, eps_] := N[(eps * N[(x * N[(x + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          \mathsf{fma}\left(\varepsilon, x \cdot \left(x + \varepsilon\right), \varepsilon\right)
          \end{array}
          
          Derivation
          1. Initial program 64.0%

            \[\tan \left(x + \varepsilon\right) - \tan x \]
          2. Add Preprocessing
          3. Taylor expanded in eps around 0

            \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
          4. Step-by-step derivation
            1. associate--l+N/A

              \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
            2. +-commutativeN/A

              \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
            3. distribute-lft-inN/A

              \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
            4. *-rgt-identityN/A

              \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
            5. lower-fma.f64N/A

              \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
          5. Applied rewrites99.3%

            \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
          6. Taylor expanded in x around 0

            \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
          7. Step-by-step derivation
            1. Applied rewrites98.4%

              \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
            2. Final simplification98.4%

              \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \left(x + \varepsilon\right), \varepsilon\right) \]
            3. Add Preprocessing

            Alternative 8: 97.7% accurate, 17.3× speedup?

            \[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, x \cdot \varepsilon, \varepsilon\right) \end{array} \]
            (FPCore (x eps) :precision binary64 (fma eps (* x eps) eps))
            double code(double x, double eps) {
            	return fma(eps, (x * eps), eps);
            }
            
            function code(x, eps)
            	return fma(eps, Float64(x * eps), eps)
            end
            
            code[x_, eps_] := N[(eps * N[(x * eps), $MachinePrecision] + eps), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \mathsf{fma}\left(\varepsilon, x \cdot \varepsilon, \varepsilon\right)
            \end{array}
            
            Derivation
            1. Initial program 64.0%

              \[\tan \left(x + \varepsilon\right) - \tan x \]
            2. Add Preprocessing
            3. Taylor expanded in eps around 0

              \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
            4. Step-by-step derivation
              1. associate--l+N/A

                \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
              2. +-commutativeN/A

                \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
              3. distribute-lft-inN/A

                \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
              4. *-rgt-identityN/A

                \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
              5. lower-fma.f64N/A

                \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
            5. Applied rewrites99.3%

              \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
            6. Taylor expanded in x around 0

              \[\leadsto \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{x}, \varepsilon\right) \]
            7. Step-by-step derivation
              1. Applied rewrites98.0%

                \[\leadsto \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{x}, \varepsilon\right) \]
              2. Final simplification98.0%

                \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \varepsilon, \varepsilon\right) \]
              3. Add Preprocessing

              Developer Target 1: 99.9% accurate, 0.6× speedup?

              \[\begin{array}{l} \\ \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)} \end{array} \]
              (FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
              double code(double x, double eps) {
              	return sin(eps) / (cos(x) * cos((x + eps)));
              }
              
              real(8) function code(x, eps)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: eps
                  code = sin(eps) / (cos(x) * cos((x + eps)))
              end function
              
              public static double code(double x, double eps) {
              	return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
              }
              
              def code(x, eps):
              	return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
              
              function code(x, eps)
              	return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps))))
              end
              
              function tmp = code(x, eps)
              	tmp = sin(eps) / (cos(x) * cos((x + eps)));
              end
              
              code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
              \end{array}
              

              Developer Target 2: 62.6% accurate, 0.4× speedup?

              \[\begin{array}{l} \\ \frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x \end{array} \]
              (FPCore (x eps)
               :precision binary64
               (- (/ (+ (tan x) (tan eps)) (- 1.0 (* (tan x) (tan eps)))) (tan x)))
              double code(double x, double eps) {
              	return ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
              }
              
              real(8) function code(x, eps)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: eps
                  code = ((tan(x) + tan(eps)) / (1.0d0 - (tan(x) * tan(eps)))) - tan(x)
              end function
              
              public static double code(double x, double eps) {
              	return ((Math.tan(x) + Math.tan(eps)) / (1.0 - (Math.tan(x) * Math.tan(eps)))) - Math.tan(x);
              }
              
              def code(x, eps):
              	return ((math.tan(x) + math.tan(eps)) / (1.0 - (math.tan(x) * math.tan(eps)))) - math.tan(x)
              
              function code(x, eps)
              	return Float64(Float64(Float64(tan(x) + tan(eps)) / Float64(1.0 - Float64(tan(x) * tan(eps)))) - tan(x))
              end
              
              function tmp = code(x, eps)
              	tmp = ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
              end
              
              code[x_, eps_] := N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[eps], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x
              \end{array}
              

              Developer Target 3: 98.9% accurate, 1.0× speedup?

              \[\begin{array}{l} \\ \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x \end{array} \]
              (FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
              double code(double x, double eps) {
              	return eps + ((eps * tan(x)) * tan(x));
              }
              
              real(8) function code(x, eps)
                  real(8), intent (in) :: x
                  real(8), intent (in) :: eps
                  code = eps + ((eps * tan(x)) * tan(x))
              end function
              
              public static double code(double x, double eps) {
              	return eps + ((eps * Math.tan(x)) * Math.tan(x));
              }
              
              def code(x, eps):
              	return eps + ((eps * math.tan(x)) * math.tan(x))
              
              function code(x, eps)
              	return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x)))
              end
              
              function tmp = code(x, eps)
              	tmp = eps + ((eps * tan(x)) * tan(x));
              end
              
              code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
              \end{array}
              

              Reproduce

              ?
              herbie shell --seed 2024214 
              (FPCore (x eps)
                :name "2tan (problem 3.3.2)"
                :precision binary64
                :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
              
                :alt
                (! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))
              
                :alt
                (! :herbie-platform default (- (/ (+ (tan x) (tan eps)) (- 1 (* (tan x) (tan eps)))) (tan x)))
              
                :alt
                (! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
              
                (- (tan (+ x eps)) (tan x)))