2tan (problem 3.3.2)

Percentage Accurate: 62.4% → 99.1%
Time: 9.1s
Alternatives: 22
Speedup: 207.0×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, eps)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 22 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
module fmin_fmax_functions
    implicit none
    private
    public fmax
    public fmin

    interface fmax
        module procedure fmax88
        module procedure fmax44
        module procedure fmax84
        module procedure fmax48
    end interface
    interface fmin
        module procedure fmin88
        module procedure fmin44
        module procedure fmin84
        module procedure fmin48
    end interface
contains
    real(8) function fmax88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(4) function fmax44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, max(x, y), y /= y), x /= x)
    end function
    real(8) function fmax84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmax48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
    end function
    real(8) function fmin88(x, y) result (res)
        real(8), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(4) function fmin44(x, y) result (res)
        real(4), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(y, merge(x, min(x, y), y /= y), x /= x)
    end function
    real(8) function fmin84(x, y) result(res)
        real(8), intent (in) :: x
        real(4), intent (in) :: y
        res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
    end function
    real(8) function fmin48(x, y) result(res)
        real(4), intent (in) :: x
        real(8), intent (in) :: y
        res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
    end function
end module

real(8) function code(x, eps)
use fmin_fmax_functions
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Alternative 1: 99.1% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := {\tan x}^{2}\\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + t\_0\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + t\_0\right) \cdot \varepsilon \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (pow (tan x) 2.0)))
   (*
    (+
     (fma
      (fma
       (- eps)
       (-
        (* (- (* -1.8888888888888888 (* x x)) 1.3333333333333333) (* x x))
        0.3333333333333333)
       (/ (* (+ 1.0 t_0) (sin x)) (cos x)))
      eps
      1.0)
     t_0)
    eps)))
double code(double x, double eps) {
	double t_0 = pow(tan(x), 2.0);
	return (fma(fma(-eps, ((((-1.8888888888888888 * (x * x)) - 1.3333333333333333) * (x * x)) - 0.3333333333333333), (((1.0 + t_0) * sin(x)) / cos(x))), eps, 1.0) + t_0) * eps;
}
function code(x, eps)
	t_0 = tan(x) ^ 2.0
	return Float64(Float64(fma(fma(Float64(-eps), Float64(Float64(Float64(Float64(-1.8888888888888888 * Float64(x * x)) - 1.3333333333333333) * Float64(x * x)) - 0.3333333333333333), Float64(Float64(Float64(1.0 + t_0) * sin(x)) / cos(x))), eps, 1.0) + t_0) * eps)
end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[(N[((-eps) * N[(N[(N[(N[(-1.8888888888888888 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] + N[(N[(N[(1.0 + t$95$0), $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + t$95$0), $MachinePrecision] * eps), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + t\_0\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + t\_0\right) \cdot \varepsilon
\end{array}
\end{array}
Derivation
  1. Initial program 65.9%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Applied rewrites100.0%

    \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
  5. Taylor expanded in x around 0

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  6. Step-by-step derivation
    1. lower--.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    2. *-commutativeN/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    3. lower-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    4. lower--.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    5. lower-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. pow2N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    7. lift-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    8. pow2N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    9. lift-*.f64100.0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  7. Applied rewrites100.0%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  8. Final simplification100.0%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + {\tan x}^{2}\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
  9. Add Preprocessing

Alternative 2: 99.3% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := {\tan x}^{2}\\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot -1.3333333333333333 - 0.3333333333333333, \frac{\left(1 + t\_0\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + t\_0\right) \cdot \varepsilon \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (pow (tan x) 2.0)))
   (*
    (+
     (fma
      (fma
       (- eps)
       (- (* (* x x) -1.3333333333333333) 0.3333333333333333)
       (/ (* (+ 1.0 t_0) (sin x)) (cos x)))
      eps
      1.0)
     t_0)
    eps)))
double code(double x, double eps) {
	double t_0 = pow(tan(x), 2.0);
	return (fma(fma(-eps, (((x * x) * -1.3333333333333333) - 0.3333333333333333), (((1.0 + t_0) * sin(x)) / cos(x))), eps, 1.0) + t_0) * eps;
}
function code(x, eps)
	t_0 = tan(x) ^ 2.0
	return Float64(Float64(fma(fma(Float64(-eps), Float64(Float64(Float64(x * x) * -1.3333333333333333) - 0.3333333333333333), Float64(Float64(Float64(1.0 + t_0) * sin(x)) / cos(x))), eps, 1.0) + t_0) * eps)
end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[(N[(N[((-eps) * N[(N[(N[(x * x), $MachinePrecision] * -1.3333333333333333), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] + N[(N[(N[(1.0 + t$95$0), $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + t$95$0), $MachinePrecision] * eps), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot -1.3333333333333333 - 0.3333333333333333, \frac{\left(1 + t\_0\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + t\_0\right) \cdot \varepsilon
\end{array}
\end{array}
Derivation
  1. Initial program 65.9%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Applied rewrites100.0%

    \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
  5. Taylor expanded in x around 0

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-4}{3} \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  6. Step-by-step derivation
    1. lower--.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-4}{3} \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    2. *-commutativeN/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \frac{-4}{3} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    3. lower-*.f64N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \frac{-4}{3} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    4. pow2N/A

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot \frac{-4}{3} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    5. lift-*.f64100.0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot -1.3333333333333333 - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  7. Applied rewrites100.0%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot -1.3333333333333333 - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  8. Final simplification100.0%

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(x \cdot x\right) \cdot -1.3333333333333333 - 0.3333333333333333, \frac{\left(1 + {\tan x}^{2}\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
  9. Add Preprocessing

Alternative 3: 99.4% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := 1 + {\tan x}^{2}\\ \mathsf{fma}\left(\mathsf{fma}\left(t\_0, \tan x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, t\_0\right) \cdot \varepsilon \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (+ 1.0 (pow (tan x) 2.0))))
   (* (fma (fma t_0 (tan x) (* 0.3333333333333333 eps)) eps t_0) eps)))
double code(double x, double eps) {
	double t_0 = 1.0 + pow(tan(x), 2.0);
	return fma(fma(t_0, tan(x), (0.3333333333333333 * eps)), eps, t_0) * eps;
}
function code(x, eps)
	t_0 = Float64(1.0 + (tan(x) ^ 2.0))
	return Float64(fma(fma(t_0, tan(x), Float64(0.3333333333333333 * eps)), eps, t_0) * eps)
end
code[x_, eps_] := Block[{t$95$0 = N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[Tan[x], $MachinePrecision] + N[(0.3333333333333333 * eps), $MachinePrecision]), $MachinePrecision] * eps + t$95$0), $MachinePrecision] * eps), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := 1 + {\tan x}^{2}\\
\mathsf{fma}\left(\mathsf{fma}\left(t\_0, \tan x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, t\_0\right) \cdot \varepsilon
\end{array}
\end{array}
Derivation
  1. Initial program 65.9%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Applied rewrites100.0%

    \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
  5. Taylor expanded in x around 0

    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
  6. Step-by-step derivation
    1. Applied rewrites99.9%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    2. Applied rewrites99.9%

      \[\leadsto \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), \tan x, -0.3333333333333333 \cdot \left(-\varepsilon\right)\right), \varepsilon, 1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
    3. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), \tan x, \frac{1}{3} \cdot \varepsilon\right), \varepsilon, 1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    4. Step-by-step derivation
      1. lower-*.f6499.9

        \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), \tan x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, 1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    5. Applied rewrites99.9%

      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), \tan x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, 1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. Final simplification99.9%

      \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(1 + {\tan x}^{2}, \tan x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, 1 + {\tan x}^{2}\right) \cdot \varepsilon \]
    7. Add Preprocessing

    Alternative 4: 98.6% accurate, 0.4× speedup?

    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      (+
       (fma
        (fma
         (- eps)
         (-
          (* (- (* -1.8888888888888888 (* x x)) 1.3333333333333333) (* x x))
          0.3333333333333333)
         (/
          (*
           (+
            1.0
            (*
             (fma
              (fma
               (fma 0.19682539682539682 (* x x) 0.37777777777777777)
               (* x x)
               0.6666666666666666)
              (* x x)
              1.0)
             (* x x)))
           (sin x))
          (cos x)))
        eps
        1.0)
       (pow (tan x) 2.0))
      eps))
    double code(double x, double eps) {
    	return (fma(fma(-eps, ((((-1.8888888888888888 * (x * x)) - 1.3333333333333333) * (x * x)) - 0.3333333333333333), (((1.0 + (fma(fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * sin(x)) / cos(x))), eps, 1.0) + pow(tan(x), 2.0)) * eps;
    }
    
    function code(x, eps)
    	return Float64(Float64(fma(fma(Float64(-eps), Float64(Float64(Float64(Float64(-1.8888888888888888 * Float64(x * x)) - 1.3333333333333333) * Float64(x * x)) - 0.3333333333333333), Float64(Float64(Float64(1.0 + Float64(fma(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x))) * sin(x)) / cos(x))), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
    end
    
    code[x_, eps_] := N[(N[(N[(N[((-eps) * N[(N[(N[(N[(-1.8888888888888888 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] + N[(N[(N[(1.0 + N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
    \end{array}
    
    Derivation
    1. Initial program 65.9%

      \[\tan \left(x + \varepsilon\right) - \tan x \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
    4. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
    5. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      3. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      4. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      5. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      6. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      7. lift-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      8. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      9. lift-*.f64100.0

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    7. Applied rewrites100.0%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    8. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right)\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    9. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    10. Applied rewrites99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    11. Final simplification99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\left(1 + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
    12. Add Preprocessing

    Alternative 5: 98.6% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.2748015873015873, x \cdot x, 0.5083333333333333\right), x \cdot x, 0.8333333333333334\right), x \cdot x, 1\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      (+
       (fma
        (fma
         (- eps)
         (-
          (* (- (* -1.8888888888888888 (* x x)) 1.3333333333333333) (* x x))
          0.3333333333333333)
         (/
          (*
           (fma
            (fma
             (fma 0.2748015873015873 (* x x) 0.5083333333333333)
             (* x x)
             0.8333333333333334)
            (* x x)
            1.0)
           x)
          (cos x)))
        eps
        1.0)
       (pow (tan x) 2.0))
      eps))
    double code(double x, double eps) {
    	return (fma(fma(-eps, ((((-1.8888888888888888 * (x * x)) - 1.3333333333333333) * (x * x)) - 0.3333333333333333), ((fma(fma(fma(0.2748015873015873, (x * x), 0.5083333333333333), (x * x), 0.8333333333333334), (x * x), 1.0) * x) / cos(x))), eps, 1.0) + pow(tan(x), 2.0)) * eps;
    }
    
    function code(x, eps)
    	return Float64(Float64(fma(fma(Float64(-eps), Float64(Float64(Float64(Float64(-1.8888888888888888 * Float64(x * x)) - 1.3333333333333333) * Float64(x * x)) - 0.3333333333333333), Float64(Float64(fma(fma(fma(0.2748015873015873, Float64(x * x), 0.5083333333333333), Float64(x * x), 0.8333333333333334), Float64(x * x), 1.0) * x) / cos(x))), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
    end
    
    code[x_, eps_] := N[(N[(N[(N[((-eps) * N[(N[(N[(N[(-1.8888888888888888 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] + N[(N[(N[(N[(N[(0.2748015873015873 * N[(x * x), $MachinePrecision] + 0.5083333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.8333333333333334), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.2748015873015873, x \cdot x, 0.5083333333333333\right), x \cdot x, 0.8333333333333334\right), x \cdot x, 1\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
    \end{array}
    
    Derivation
    1. Initial program 65.9%

      \[\tan \left(x + \varepsilon\right) - \tan x \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
    4. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
    5. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      3. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      4. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      5. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      6. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      7. lift-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      8. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      9. lift-*.f64100.0

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    7. Applied rewrites100.0%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    8. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{x \cdot \left(1 + {x}^{2} \cdot \left(\frac{5}{6} + {x}^{2} \cdot \left(\frac{61}{120} + \frac{277}{1008} \cdot {x}^{2}\right)\right)\right)}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    9. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 + {x}^{2} \cdot \left(\frac{5}{6} + {x}^{2} \cdot \left(\frac{61}{120} + \frac{277}{1008} \cdot {x}^{2}\right)\right)\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 + {x}^{2} \cdot \left(\frac{5}{6} + {x}^{2} \cdot \left(\frac{61}{120} + \frac{277}{1008} \cdot {x}^{2}\right)\right)\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    10. Applied rewrites99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.2748015873015873, x \cdot x, 0.5083333333333333\right), x \cdot x, 0.8333333333333334\right), x \cdot x, 1\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    11. Final simplification99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.2748015873015873, x \cdot x, 0.5083333333333333\right), x \cdot x, 0.8333333333333334\right), x \cdot x, 1\right) \cdot x}{\cos x}\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
    12. Add Preprocessing

    Alternative 6: 98.6% accurate, 0.7× speedup?

    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      (+
       (fma
        (fma
         (- eps)
         (-
          (* (- (* -1.8888888888888888 (* x x)) 1.3333333333333333) (* x x))
          0.3333333333333333)
         (*
          (fma
           (fma
            (fma 0.7873015873015873 (* x x) 1.1333333333333333)
            (* x x)
            1.3333333333333333)
           (* x x)
           1.0)
          x))
        eps
        1.0)
       (pow (tan x) 2.0))
      eps))
    double code(double x, double eps) {
    	return (fma(fma(-eps, ((((-1.8888888888888888 * (x * x)) - 1.3333333333333333) * (x * x)) - 0.3333333333333333), (fma(fma(fma(0.7873015873015873, (x * x), 1.1333333333333333), (x * x), 1.3333333333333333), (x * x), 1.0) * x)), eps, 1.0) + pow(tan(x), 2.0)) * eps;
    }
    
    function code(x, eps)
    	return Float64(Float64(fma(fma(Float64(-eps), Float64(Float64(Float64(Float64(-1.8888888888888888 * Float64(x * x)) - 1.3333333333333333) * Float64(x * x)) - 0.3333333333333333), Float64(fma(fma(fma(0.7873015873015873, Float64(x * x), 1.1333333333333333), Float64(x * x), 1.3333333333333333), Float64(x * x), 1.0) * x)), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
    end
    
    code[x_, eps_] := N[(N[(N[(N[((-eps) * N[(N[(N[(N[(-1.8888888888888888 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.3333333333333333), $MachinePrecision] + N[(N[(N[(N[(0.7873015873015873 * N[(x * x), $MachinePrecision] + 1.1333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
    \end{array}
    
    Derivation
    1. Initial program 65.9%

      \[\tan \left(x + \varepsilon\right) - \tan x \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
    4. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
    5. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. Step-by-step derivation
      1. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, {x}^{2} \cdot \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      3. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      4. lower--.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      5. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot {x}^{2} - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      6. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      7. lift-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot {x}^{2} - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      8. pow2N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      9. lift-*.f64100.0

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    7. Applied rewrites100.0%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    8. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, x \cdot \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right)\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    9. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. lower-*.f64N/A

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(\frac{-17}{9} \cdot \left(x \cdot x\right) - \frac{4}{3}\right) \cdot \left(x \cdot x\right) - \frac{1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    10. Applied rewrites99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    11. Final simplification99.8%

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \left(-1.8888888888888888 \cdot \left(x \cdot x\right) - 1.3333333333333333\right) \cdot \left(x \cdot x\right) - 0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
    12. Add Preprocessing

    Alternative 7: 98.6% accurate, 0.8× speedup?

    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      (+
       (fma
        (fma
         (- eps)
         -0.3333333333333333
         (*
          (fma
           (fma
            (fma 0.7873015873015873 (* x x) 1.1333333333333333)
            (* x x)
            1.3333333333333333)
           (* x x)
           1.0)
          x))
        eps
        1.0)
       (pow (tan x) 2.0))
      eps))
    double code(double x, double eps) {
    	return (fma(fma(-eps, -0.3333333333333333, (fma(fma(fma(0.7873015873015873, (x * x), 1.1333333333333333), (x * x), 1.3333333333333333), (x * x), 1.0) * x)), eps, 1.0) + pow(tan(x), 2.0)) * eps;
    }
    
    function code(x, eps)
    	return Float64(Float64(fma(fma(Float64(-eps), -0.3333333333333333, Float64(fma(fma(fma(0.7873015873015873, Float64(x * x), 1.1333333333333333), Float64(x * x), 1.3333333333333333), Float64(x * x), 1.0) * x)), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
    end
    
    code[x_, eps_] := N[(N[(N[(N[((-eps) * -0.3333333333333333 + N[(N[(N[(N[(0.7873015873015873 * N[(x * x), $MachinePrecision] + 1.1333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
    \end{array}
    
    Derivation
    1. Initial program 65.9%

      \[\tan \left(x + \varepsilon\right) - \tan x \]
    2. Add Preprocessing
    3. Taylor expanded in eps around 0

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
    4. Applied rewrites100.0%

      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
    5. Taylor expanded in x around 0

      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
    6. Step-by-step derivation
      1. Applied rewrites99.9%

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      2. Taylor expanded in x around 0

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, x \cdot \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right)\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      3. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        2. lower-*.f64N/A

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + {x}^{2} \cdot \left(\frac{17}{15} + \frac{248}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      4. Applied rewrites99.8%

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      5. Final simplification99.8%

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.7873015873015873, x \cdot x, 1.1333333333333333\right), x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
      6. Add Preprocessing

      Alternative 8: 98.7% accurate, 0.8× speedup?

      \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
      (FPCore (x eps)
       :precision binary64
       (*
        (+
         (fma
          (fma
           (- eps)
           -0.3333333333333333
           (*
            (fma (fma 1.1333333333333333 (* x x) 1.3333333333333333) (* x x) 1.0)
            x))
          eps
          1.0)
         (pow (tan x) 2.0))
        eps))
      double code(double x, double eps) {
      	return (fma(fma(-eps, -0.3333333333333333, (fma(fma(1.1333333333333333, (x * x), 1.3333333333333333), (x * x), 1.0) * x)), eps, 1.0) + pow(tan(x), 2.0)) * eps;
      }
      
      function code(x, eps)
      	return Float64(Float64(fma(fma(Float64(-eps), -0.3333333333333333, Float64(fma(fma(1.1333333333333333, Float64(x * x), 1.3333333333333333), Float64(x * x), 1.0) * x)), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
      end
      
      code[x_, eps_] := N[(N[(N[(N[((-eps) * -0.3333333333333333 + N[(N[(N[(1.1333333333333333 * N[(x * x), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
      \end{array}
      
      Derivation
      1. Initial program 65.9%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Applied rewrites100.0%

        \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
      5. Taylor expanded in x around 0

        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
      6. Step-by-step derivation
        1. Applied rewrites99.9%

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        2. Taylor expanded in x around 0

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, x \cdot \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}\right)\right)\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        3. Step-by-step derivation
          1. *-commutativeN/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          2. lower-*.f64N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + {x}^{2} \cdot \left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}\right)\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          3. +-commutativeN/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left({x}^{2} \cdot \left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}\right) + 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          4. *-commutativeN/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(\left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}\right) \cdot {x}^{2} + 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          5. lower-fma.f64N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\frac{4}{3} + \frac{17}{15} \cdot {x}^{2}, {x}^{2}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          6. +-commutativeN/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\frac{17}{15} \cdot {x}^{2} + \frac{4}{3}, {x}^{2}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          7. lower-fma.f64N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{15}, {x}^{2}, \frac{4}{3}\right), {x}^{2}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          8. pow2N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{15}, x \cdot x, \frac{4}{3}\right), {x}^{2}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          9. lift-*.f64N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{15}, x \cdot x, \frac{4}{3}\right), {x}^{2}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          10. pow2N/A

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{15}, x \cdot x, \frac{4}{3}\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          11. lift-*.f6499.7

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        4. Applied rewrites99.7%

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        5. Final simplification99.7%

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
        6. Add Preprocessing

        Alternative 9: 98.7% accurate, 0.9× speedup?

        \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(x \cdot x, 1.3333333333333333, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
        (FPCore (x eps)
         :precision binary64
         (*
          (+
           (fma
            (fma
             (- eps)
             -0.3333333333333333
             (* (fma (* x x) 1.3333333333333333 1.0) x))
            eps
            1.0)
           (pow (tan x) 2.0))
          eps))
        double code(double x, double eps) {
        	return (fma(fma(-eps, -0.3333333333333333, (fma((x * x), 1.3333333333333333, 1.0) * x)), eps, 1.0) + pow(tan(x), 2.0)) * eps;
        }
        
        function code(x, eps)
        	return Float64(Float64(fma(fma(Float64(-eps), -0.3333333333333333, Float64(fma(Float64(x * x), 1.3333333333333333, 1.0) * x)), eps, 1.0) + (tan(x) ^ 2.0)) * eps)
        end
        
        code[x_, eps_] := N[(N[(N[(N[((-eps) * -0.3333333333333333 + N[(N[(N[(x * x), $MachinePrecision] * 1.3333333333333333 + 1.0), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(x \cdot x, 1.3333333333333333, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
        \end{array}
        
        Derivation
        1. Initial program 65.9%

          \[\tan \left(x + \varepsilon\right) - \tan x \]
        2. Add Preprocessing
        3. Taylor expanded in eps around 0

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
        4. Applied rewrites100.0%

          \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
        5. Taylor expanded in x around 0

          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
        6. Step-by-step derivation
          1. Applied rewrites99.9%

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          2. Taylor expanded in x around 0

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, x \cdot \left(1 + \frac{4}{3} \cdot {x}^{2}\right)\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          3. Step-by-step derivation
            1. *-commutativeN/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + \frac{4}{3} \cdot {x}^{2}\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            2. lower-*.f64N/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(1 + \frac{4}{3} \cdot {x}^{2}\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            3. +-commutativeN/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left(\frac{4}{3} \cdot {x}^{2} + 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            4. *-commutativeN/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \left({x}^{2} \cdot \frac{4}{3} + 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            5. lower-fma.f64N/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left({x}^{2}, \frac{4}{3}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            6. pow2N/A

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, \mathsf{fma}\left(x \cdot x, \frac{4}{3}, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            7. lift-*.f6499.6

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(x \cdot x, 1.3333333333333333, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          4. Applied rewrites99.6%

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(x \cdot x, 1.3333333333333333, 1\right) \cdot x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          5. Final simplification99.6%

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \mathsf{fma}\left(x \cdot x, 1.3333333333333333, 1\right) \cdot x\right), \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
          6. Add Preprocessing

          Alternative 10: 98.9% accurate, 1.0× speedup?

          \[\begin{array}{l} \\ \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
          (FPCore (x eps)
           :precision binary64
           (* (+ (fma x eps 1.0) (pow (tan x) 2.0)) eps))
          double code(double x, double eps) {
          	return (fma(x, eps, 1.0) + pow(tan(x), 2.0)) * eps;
          }
          
          function code(x, eps)
          	return Float64(Float64(fma(x, eps, 1.0) + (tan(x) ^ 2.0)) * eps)
          end
          
          code[x_, eps_] := N[(N[(N[(x * eps + 1.0), $MachinePrecision] + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon
          \end{array}
          
          Derivation
          1. Initial program 65.9%

            \[\tan \left(x + \varepsilon\right) - \tan x \]
          2. Add Preprocessing
          3. Taylor expanded in eps around 0

            \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
          4. Applied rewrites100.0%

            \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
          5. Taylor expanded in x around 0

            \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          6. Step-by-step derivation
            1. +-commutativeN/A

              \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            2. lower-fma.f6499.6

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          7. Applied rewrites99.6%

            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          8. Taylor expanded in x around inf

            \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
          9. Step-by-step derivation
            1. Applied rewrites99.6%

              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            2. Final simplification99.6%

              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\tan x}^{2}\right) \cdot \varepsilon \]
            3. Add Preprocessing

            Alternative 11: 98.9% accurate, 1.0× speedup?

            \[\begin{array}{l} \\ \left(1 + {\tan x}^{2}\right) \cdot \varepsilon \end{array} \]
            (FPCore (x eps) :precision binary64 (* (+ 1.0 (pow (tan x) 2.0)) eps))
            double code(double x, double eps) {
            	return (1.0 + pow(tan(x), 2.0)) * eps;
            }
            
            module fmin_fmax_functions
                implicit none
                private
                public fmax
                public fmin
            
                interface fmax
                    module procedure fmax88
                    module procedure fmax44
                    module procedure fmax84
                    module procedure fmax48
                end interface
                interface fmin
                    module procedure fmin88
                    module procedure fmin44
                    module procedure fmin84
                    module procedure fmin48
                end interface
            contains
                real(8) function fmax88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(4) function fmax44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                end function
                real(8) function fmax84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmax48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                end function
                real(8) function fmin88(x, y) result (res)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(4) function fmin44(x, y) result (res)
                    real(4), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                end function
                real(8) function fmin84(x, y) result(res)
                    real(8), intent (in) :: x
                    real(4), intent (in) :: y
                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                end function
                real(8) function fmin48(x, y) result(res)
                    real(4), intent (in) :: x
                    real(8), intent (in) :: y
                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                end function
            end module
            
            real(8) function code(x, eps)
            use fmin_fmax_functions
                real(8), intent (in) :: x
                real(8), intent (in) :: eps
                code = (1.0d0 + (tan(x) ** 2.0d0)) * eps
            end function
            
            public static double code(double x, double eps) {
            	return (1.0 + Math.pow(Math.tan(x), 2.0)) * eps;
            }
            
            def code(x, eps):
            	return (1.0 + math.pow(math.tan(x), 2.0)) * eps
            
            function code(x, eps)
            	return Float64(Float64(1.0 + (tan(x) ^ 2.0)) * eps)
            end
            
            function tmp = code(x, eps)
            	tmp = (1.0 + (tan(x) ^ 2.0)) * eps;
            end
            
            code[x_, eps_] := N[(N[(1.0 + N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \left(1 + {\tan x}^{2}\right) \cdot \varepsilon
            \end{array}
            
            Derivation
            1. Initial program 65.9%

              \[\tan \left(x + \varepsilon\right) - \tan x \]
            2. Add Preprocessing
            3. Taylor expanded in eps around 0

              \[\leadsto \color{blue}{\varepsilon \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
            4. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) \cdot \color{blue}{\varepsilon} \]
              2. lower-*.f64N/A

                \[\leadsto \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) \cdot \color{blue}{\varepsilon} \]
              3. lower--.f64N/A

                \[\leadsto \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) \cdot \varepsilon \]
              4. mul-1-negN/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) \cdot \varepsilon \]
              5. unpow2N/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\frac{\sin x \cdot \sin x}{{\cos x}^{2}}\right)\right)\right) \cdot \varepsilon \]
              6. unpow2N/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\frac{\sin x \cdot \sin x}{\cos x \cdot \cos x}\right)\right)\right) \cdot \varepsilon \]
              7. frac-timesN/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\frac{\sin x}{\cos x} \cdot \frac{\sin x}{\cos x}\right)\right)\right) \cdot \varepsilon \]
              8. tan-quotN/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\tan x \cdot \frac{\sin x}{\cos x}\right)\right)\right) \cdot \varepsilon \]
              9. tan-quotN/A

                \[\leadsto \left(1 - \left(\mathsf{neg}\left(\tan x \cdot \tan x\right)\right)\right) \cdot \varepsilon \]
              10. lower-neg.f64N/A

                \[\leadsto \left(1 - \left(-\tan x \cdot \tan x\right)\right) \cdot \varepsilon \]
              11. pow2N/A

                \[\leadsto \left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
              12. lower-pow.f64N/A

                \[\leadsto \left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
              13. lift-tan.f6499.4

                \[\leadsto \left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            5. Applied rewrites99.4%

              \[\leadsto \color{blue}{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
            6. Final simplification99.4%

              \[\leadsto \left(1 + {\tan x}^{2}\right) \cdot \varepsilon \]
            7. Add Preprocessing

            Alternative 12: 98.4% accurate, 1.2× speedup?

            \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \frac{x}{\cos x}\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
            (FPCore (x eps)
             :precision binary64
             (*
              (+
               (fma (fma (- eps) -0.3333333333333333 (/ x (cos x))) eps 1.0)
               (*
                (fma
                 (fma
                  (fma 0.19682539682539682 (* x x) 0.37777777777777777)
                  (* x x)
                  0.6666666666666666)
                 (* x x)
                 1.0)
                (* x x)))
              eps))
            double code(double x, double eps) {
            	return (fma(fma(-eps, -0.3333333333333333, (x / cos(x))), eps, 1.0) + (fma(fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * eps;
            }
            
            function code(x, eps)
            	return Float64(Float64(fma(fma(Float64(-eps), -0.3333333333333333, Float64(x / cos(x))), eps, 1.0) + Float64(fma(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x))) * eps)
            end
            
            code[x_, eps_] := N[(N[(N[(N[((-eps) * -0.3333333333333333 + N[(x / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \frac{x}{\cos x}\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
            \end{array}
            
            Derivation
            1. Initial program 65.9%

              \[\tan \left(x + \varepsilon\right) - \tan x \]
            2. Add Preprocessing
            3. Taylor expanded in eps around 0

              \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
            4. Applied rewrites100.0%

              \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
            5. Taylor expanded in x around 0

              \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
            6. Step-by-step derivation
              1. Applied rewrites99.9%

                \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
              2. Taylor expanded in x around 0

                \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
              3. Step-by-step derivation
                1. Applied rewrites99.6%

                  \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                2. Taylor expanded in x around 0

                  \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right)\right)\right) \cdot \varepsilon \]
                3. Step-by-step derivation
                  1. *-commutativeN/A

                    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                  2. lower-*.f64N/A

                    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \frac{-1}{3}, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                4. Applied rewrites99.4%

                  \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, 1 \cdot \frac{x}{\cos x}\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                5. Final simplification99.4%

                  \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, -0.3333333333333333, \frac{x}{\cos x}\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                6. Add Preprocessing

                Alternative 13: 98.3% accurate, 1.3× speedup?

                \[\begin{array}{l} \\ \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right) \cdot \varepsilon \end{array} \]
                (FPCore (x eps)
                 :precision binary64
                 (*
                  (+
                   (fma x eps 1.0)
                   (pow
                    (*
                     (fma
                      (fma
                       (fma 0.05396825396825397 (* x x) 0.13333333333333333)
                       (* x x)
                       0.3333333333333333)
                      (* x x)
                      1.0)
                     x)
                    2.0))
                  eps))
                double code(double x, double eps) {
                	return (fma(x, eps, 1.0) + pow((fma(fma(fma(0.05396825396825397, (x * x), 0.13333333333333333), (x * x), 0.3333333333333333), (x * x), 1.0) * x), 2.0)) * eps;
                }
                
                function code(x, eps)
                	return Float64(Float64(fma(x, eps, 1.0) + (Float64(fma(fma(fma(0.05396825396825397, Float64(x * x), 0.13333333333333333), Float64(x * x), 0.3333333333333333), Float64(x * x), 1.0) * x) ^ 2.0)) * eps)
                end
                
                code[x_, eps_] := N[(N[(N[(x * eps + 1.0), $MachinePrecision] + N[Power[N[(N[(N[(N[(0.05396825396825397 * N[(x * x), $MachinePrecision] + 0.13333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                
                \begin{array}{l}
                
                \\
                \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right) \cdot \varepsilon
                \end{array}
                
                Derivation
                1. Initial program 65.9%

                  \[\tan \left(x + \varepsilon\right) - \tan x \]
                2. Add Preprocessing
                3. Taylor expanded in eps around 0

                  \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                4. Applied rewrites100.0%

                  \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                5. Taylor expanded in x around 0

                  \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                6. Step-by-step derivation
                  1. +-commutativeN/A

                    \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  2. lower-fma.f6499.6

                    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                7. Applied rewrites99.6%

                  \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                8. Taylor expanded in x around inf

                  \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                9. Step-by-step derivation
                  1. Applied rewrites99.6%

                    \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  2. Taylor expanded in x around 0

                    \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right)\right)\right)\right)}^{2}\right)\right) \cdot \varepsilon \]
                  3. Step-by-step derivation
                    1. *-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    2. lower-*.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\left(1 + {x}^{2} \cdot \left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right)\right)\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    3. +-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\left({x}^{2} \cdot \left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right)\right) + 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    4. *-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\left(\left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right)\right) \cdot {x}^{2} + 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    5. lower-fma.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\frac{1}{3} + {x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    6. +-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left({x}^{2} \cdot \left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right) + \frac{1}{3}, {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    7. *-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}\right) \cdot {x}^{2} + \frac{1}{3}, {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    8. lower-fma.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{2}{15} + \frac{17}{315} \cdot {x}^{2}, {x}^{2}, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    9. +-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315} \cdot {x}^{2} + \frac{2}{15}, {x}^{2}, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    10. lower-fma.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, {x}^{2}, \frac{2}{15}\right), {x}^{2}, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    11. pow2N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, x \cdot x, \frac{2}{15}\right), {x}^{2}, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    12. lift-*.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, x \cdot x, \frac{2}{15}\right), {x}^{2}, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    13. pow2N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, x \cdot x, \frac{2}{15}\right), x \cdot x, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    14. lift-*.f64N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, x \cdot x, \frac{2}{15}\right), x \cdot x, \frac{1}{3}\right), {x}^{2}, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    15. pow2N/A

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{315}, x \cdot x, \frac{2}{15}\right), x \cdot x, \frac{1}{3}\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                    16. lift-*.f6499.3

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                  4. Applied rewrites99.3%

                    \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon \]
                  5. Final simplification99.3%

                    \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + {\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right) \cdot \varepsilon \]
                  6. Add Preprocessing

                  Alternative 14: 98.3% accurate, 3.6× speedup?

                  \[\begin{array}{l} \\ \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
                  (FPCore (x eps)
                   :precision binary64
                   (*
                    (+
                     (fma x eps 1.0)
                     (*
                      (fma
                       (fma
                        (fma 0.19682539682539682 (* x x) 0.37777777777777777)
                        (* x x)
                        0.6666666666666666)
                       (* x x)
                       1.0)
                      (* x x)))
                    eps))
                  double code(double x, double eps) {
                  	return (fma(x, eps, 1.0) + (fma(fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * eps;
                  }
                  
                  function code(x, eps)
                  	return Float64(Float64(fma(x, eps, 1.0) + Float64(fma(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x))) * eps)
                  end
                  
                  code[x_, eps_] := N[(N[(N[(x * eps + 1.0), $MachinePrecision] + N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                  
                  \begin{array}{l}
                  
                  \\
                  \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
                  \end{array}
                  
                  Derivation
                  1. Initial program 65.9%

                    \[\tan \left(x + \varepsilon\right) - \tan x \]
                  2. Add Preprocessing
                  3. Taylor expanded in eps around 0

                    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                  4. Applied rewrites100.0%

                    \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                  5. Taylor expanded in x around 0

                    \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  6. Step-by-step derivation
                    1. +-commutativeN/A

                      \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    2. lower-fma.f6499.6

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  7. Applied rewrites99.6%

                    \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  8. Taylor expanded in x around inf

                    \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                  9. Step-by-step derivation
                    1. Applied rewrites99.6%

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    2. Taylor expanded in x around 0

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right)\right)\right) \cdot \varepsilon \]
                    3. Step-by-step derivation
                      1. *-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      2. lower-*.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + {x}^{2} \cdot \left(\frac{17}{45} + \frac{62}{315} \cdot {x}^{2}\right)\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                    4. Applied rewrites99.3%

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                    5. Final simplification99.3%

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                    6. Add Preprocessing

                    Alternative 15: 98.3% accurate, 3.9× speedup?

                    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
                    (FPCore (x eps)
                     :precision binary64
                     (*
                      (+
                       (fma (fma 0.3333333333333333 eps x) eps 1.0)
                       (*
                        (fma (fma 0.37777777777777777 (* x x) 0.6666666666666666) (* x x) 1.0)
                        (* x x)))
                      eps))
                    double code(double x, double eps) {
                    	return (fma(fma(0.3333333333333333, eps, x), eps, 1.0) + (fma(fma(0.37777777777777777, (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * eps;
                    }
                    
                    function code(x, eps)
                    	return Float64(Float64(fma(fma(0.3333333333333333, eps, x), eps, 1.0) + Float64(fma(fma(0.37777777777777777, Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x))) * eps)
                    end
                    
                    code[x_, eps_] := N[(N[(N[(N[(0.3333333333333333 * eps + x), $MachinePrecision] * eps + 1.0), $MachinePrecision] + N[(N[(N[(0.37777777777777777 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                    
                    \begin{array}{l}
                    
                    \\
                    \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
                    \end{array}
                    
                    Derivation
                    1. Initial program 65.9%

                      \[\tan \left(x + \varepsilon\right) - \tan x \]
                    2. Add Preprocessing
                    3. Taylor expanded in eps around 0

                      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                    4. Applied rewrites100.0%

                      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                    5. Taylor expanded in x around 0

                      \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    6. Step-by-step derivation
                      1. +-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      2. lower-fma.f6499.6

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    7. Applied rewrites99.6%

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    8. Taylor expanded in x around 0

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right)\right) \cdot \varepsilon \]
                    9. Step-by-step derivation
                      1. *-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      2. lower-*.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      3. +-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left({x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right) + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      4. *-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(\left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right) \cdot {x}^{2} + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      5. lower-fma.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}, {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      6. +-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\frac{17}{45} \cdot {x}^{2} + \frac{2}{3}, {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      7. lower-fma.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, {x}^{2}, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      8. pow2N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      9. lift-*.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      10. pow2N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      11. lift-*.f64N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                      12. pow2N/A

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                      13. lift-*.f6499.3

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                    10. Applied rewrites99.3%

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                    11. Final simplification99.3%

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                    12. Add Preprocessing

                    Alternative 16: 98.3% accurate, 4.1× speedup?

                    \[\begin{array}{l} \\ \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(-0.37777777777777777 \cdot \left(x \cdot x\right) - 0.6666666666666666\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
                    (FPCore (x eps)
                     :precision binary64
                     (*
                      (-
                       (fma x eps 1.0)
                       (*
                        (- (* (- (* -0.37777777777777777 (* x x)) 0.6666666666666666) (* x x)) 1.0)
                        (* x x)))
                      eps))
                    double code(double x, double eps) {
                    	return (fma(x, eps, 1.0) - (((((-0.37777777777777777 * (x * x)) - 0.6666666666666666) * (x * x)) - 1.0) * (x * x))) * eps;
                    }
                    
                    function code(x, eps)
                    	return Float64(Float64(fma(x, eps, 1.0) - Float64(Float64(Float64(Float64(Float64(-0.37777777777777777 * Float64(x * x)) - 0.6666666666666666) * Float64(x * x)) - 1.0) * Float64(x * x))) * eps)
                    end
                    
                    code[x_, eps_] := N[(N[(N[(x * eps + 1.0), $MachinePrecision] - N[(N[(N[(N[(N[(-0.37777777777777777 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                    
                    \begin{array}{l}
                    
                    \\
                    \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(-0.37777777777777777 \cdot \left(x \cdot x\right) - 0.6666666666666666\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
                    \end{array}
                    
                    Derivation
                    1. Initial program 65.9%

                      \[\tan \left(x + \varepsilon\right) - \tan x \]
                    2. Add Preprocessing
                    3. Taylor expanded in eps around 0

                      \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                    4. Applied rewrites100.0%

                      \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                    5. Taylor expanded in x around 0

                      \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    6. Step-by-step derivation
                      1. +-commutativeN/A

                        \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      2. lower-fma.f6499.6

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    7. Applied rewrites99.6%

                      \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    8. Taylor expanded in x around inf

                      \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                    9. Step-by-step derivation
                      1. Applied rewrites99.6%

                        \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      2. Taylor expanded in x around 0

                        \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) - 1\right)\right) \cdot \varepsilon \]
                      3. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left({x}^{2} \cdot \left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        2. lower-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left({x}^{2} \cdot \left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        3. lower--.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left({x}^{2} \cdot \left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        4. *-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        5. lower-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        6. lower--.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        7. lower-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot {x}^{2} - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        8. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot \left(x \cdot x\right) - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        9. lift-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot \left(x \cdot x\right) - \frac{2}{3}\right) \cdot {x}^{2} - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        10. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot \left(x \cdot x\right) - \frac{2}{3}\right) \cdot \left(x \cdot x\right) - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        11. lift-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot \left(x \cdot x\right) - \frac{2}{3}\right) \cdot \left(x \cdot x\right) - 1\right) \cdot {x}^{2}\right) \cdot \varepsilon \]
                        12. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(\frac{-17}{45} \cdot \left(x \cdot x\right) - \frac{2}{3}\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                        13. lift-*.f6499.3

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(-0.37777777777777777 \cdot \left(x \cdot x\right) - 0.6666666666666666\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                      4. Applied rewrites99.3%

                        \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(\left(-0.37777777777777777 \cdot \left(x \cdot x\right) - 0.6666666666666666\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                      5. Add Preprocessing

                      Alternative 17: 98.2% accurate, 5.0× speedup?

                      \[\begin{array}{l} \\ \left(1 + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
                      (FPCore (x eps)
                       :precision binary64
                       (*
                        (+
                         1.0
                         (*
                          (fma (fma 0.37777777777777777 (* x x) 0.6666666666666666) (* x x) 1.0)
                          (* x x)))
                        eps))
                      double code(double x, double eps) {
                      	return (1.0 + (fma(fma(0.37777777777777777, (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * eps;
                      }
                      
                      function code(x, eps)
                      	return Float64(Float64(1.0 + Float64(fma(fma(0.37777777777777777, Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x))) * eps)
                      end
                      
                      code[x_, eps_] := N[(N[(1.0 + N[(N[(N[(0.37777777777777777 * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                      
                      \begin{array}{l}
                      
                      \\
                      \left(1 + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
                      \end{array}
                      
                      Derivation
                      1. Initial program 65.9%

                        \[\tan \left(x + \varepsilon\right) - \tan x \]
                      2. Add Preprocessing
                      3. Taylor expanded in eps around 0

                        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                      4. Applied rewrites100.0%

                        \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                      5. Taylor expanded in x around 0

                        \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      6. Step-by-step derivation
                        1. +-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                        2. lower-fma.f6499.6

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      7. Applied rewrites99.6%

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                      8. Taylor expanded in x around 0

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right)\right) \cdot \varepsilon \]
                      9. Step-by-step derivation
                        1. *-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        2. lower-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        3. +-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left({x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right) + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        4. *-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\left(\left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right) \cdot {x}^{2} + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        5. lower-fma.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}, {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        6. +-commutativeN/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\frac{17}{45} \cdot {x}^{2} + \frac{2}{3}, {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        7. lower-fma.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, {x}^{2}, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        8. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        9. lift-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), {x}^{2}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        10. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        11. lift-*.f64N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                        12. pow2N/A

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(\frac{1}{3}, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                        13. lift-*.f6499.3

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                      10. Applied rewrites99.3%

                        \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                      11. Taylor expanded in eps around 0

                        \[\leadsto \left(1 - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\frac{17}{45}, x \cdot x, \frac{2}{3}\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                      12. Step-by-step derivation
                        1. Applied rewrites99.1%

                          \[\leadsto \left(1 - \left(-\mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                        2. Final simplification99.1%

                          \[\leadsto \left(1 + \mathsf{fma}\left(\mathsf{fma}\left(0.37777777777777777, x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                        3. Add Preprocessing

                        Alternative 18: 98.2% accurate, 5.8× speedup?

                        \[\begin{array}{l} \\ \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(x \cdot x, 0.6666666666666666, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \end{array} \]
                        (FPCore (x eps)
                         :precision binary64
                         (* (+ (fma x eps 1.0) (* (fma (* x x) 0.6666666666666666 1.0) (* x x))) eps))
                        double code(double x, double eps) {
                        	return (fma(x, eps, 1.0) + (fma((x * x), 0.6666666666666666, 1.0) * (x * x))) * eps;
                        }
                        
                        function code(x, eps)
                        	return Float64(Float64(fma(x, eps, 1.0) + Float64(fma(Float64(x * x), 0.6666666666666666, 1.0) * Float64(x * x))) * eps)
                        end
                        
                        code[x_, eps_] := N[(N[(N[(x * eps + 1.0), $MachinePrecision] + N[(N[(N[(x * x), $MachinePrecision] * 0.6666666666666666 + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
                        
                        \begin{array}{l}
                        
                        \\
                        \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(x \cdot x, 0.6666666666666666, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
                        \end{array}
                        
                        Derivation
                        1. Initial program 65.9%

                          \[\tan \left(x + \varepsilon\right) - \tan x \]
                        2. Add Preprocessing
                        3. Taylor expanded in eps around 0

                          \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                        4. Applied rewrites100.0%

                          \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                        5. Taylor expanded in x around 0

                          \[\leadsto \left(\mathsf{fma}\left(x + \frac{1}{3} \cdot \varepsilon, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                        6. Step-by-step derivation
                          1. +-commutativeN/A

                            \[\leadsto \left(\mathsf{fma}\left(\frac{1}{3} \cdot \varepsilon + x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                          2. lower-fma.f6499.6

                            \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                        7. Applied rewrites99.6%

                          \[\leadsto \left(\mathsf{fma}\left(\mathsf{fma}\left(0.3333333333333333, \varepsilon, x\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                        8. Taylor expanded in x around inf

                          \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                        9. Step-by-step derivation
                          1. Applied rewrites99.6%

                            \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon \]
                          2. Taylor expanded in x around 0

                            \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-{x}^{2} \cdot \left(1 + \frac{2}{3} \cdot {x}^{2}\right)\right)\right) \cdot \varepsilon \]
                          3. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            2. lower-*.f64N/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            3. +-commutativeN/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left(\frac{2}{3} \cdot {x}^{2} + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            4. *-commutativeN/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\left({x}^{2} \cdot \frac{2}{3} + 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            5. lower-fma.f64N/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left({x}^{2}, \frac{2}{3}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            6. pow2N/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(x \cdot x, \frac{2}{3}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            7. lift-*.f64N/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(x \cdot x, \frac{2}{3}, 1\right) \cdot {x}^{2}\right)\right) \cdot \varepsilon \]
                            8. pow2N/A

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(x \cdot x, \frac{2}{3}, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                            9. lift-*.f6499.1

                              \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(x \cdot x, 0.6666666666666666, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                          4. Applied rewrites99.1%

                            \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) - \left(-\mathsf{fma}\left(x \cdot x, 0.6666666666666666, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon \]
                          5. Final simplification99.1%

                            \[\leadsto \left(\mathsf{fma}\left(x, \varepsilon, 1\right) + \mathsf{fma}\left(x \cdot x, 0.6666666666666666, 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon \]
                          6. Add Preprocessing

                          Alternative 19: 98.1% accurate, 6.7× speedup?

                          \[\begin{array}{l} \\ \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \end{array} \]
                          (FPCore (x eps)
                           :precision binary64
                           (fma (* (+ eps x) eps) x (* (fma (* eps eps) 0.3333333333333333 1.0) eps)))
                          double code(double x, double eps) {
                          	return fma(((eps + x) * eps), x, (fma((eps * eps), 0.3333333333333333, 1.0) * eps));
                          }
                          
                          function code(x, eps)
                          	return fma(Float64(Float64(eps + x) * eps), x, Float64(fma(Float64(eps * eps), 0.3333333333333333, 1.0) * eps))
                          end
                          
                          code[x_, eps_] := N[(N[(N[(eps + x), $MachinePrecision] * eps), $MachinePrecision] * x + N[(N[(N[(eps * eps), $MachinePrecision] * 0.3333333333333333 + 1.0), $MachinePrecision] * eps), $MachinePrecision]), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right)
                          \end{array}
                          
                          Derivation
                          1. Initial program 65.9%

                            \[\tan \left(x + \varepsilon\right) - \tan x \]
                          2. Add Preprocessing
                          3. Taylor expanded in eps around 0

                            \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                          4. Applied rewrites100.0%

                            \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                          5. Taylor expanded in x around 0

                            \[\leadsto \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right) + \color{blue}{x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right)} \]
                          6. Step-by-step derivation
                            1. +-commutativeN/A

                              \[\leadsto x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) + \varepsilon \cdot \color{blue}{\left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)} \]
                            2. *-commutativeN/A

                              \[\leadsto \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) \cdot x + \varepsilon \cdot \left(\color{blue}{1} + \frac{1}{3} \cdot {\varepsilon}^{2}\right) \]
                            3. lower-fma.f64N/A

                              \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}, x, \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)\right) \]
                          7. Applied rewrites98.9%

                            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon \cdot \varepsilon, 1\right) \cdot x, \varepsilon, \varepsilon \cdot \varepsilon\right), \color{blue}{x}, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \]
                          8. Taylor expanded in eps around 0

                            \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \left(\varepsilon + x\right), x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3}, 1\right) \cdot \varepsilon\right) \]
                          9. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3}, 1\right) \cdot \varepsilon\right) \]
                            2. lower-*.f64N/A

                              \[\leadsto \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3}, 1\right) \cdot \varepsilon\right) \]
                            3. lower-+.f6498.9

                              \[\leadsto \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \]
                          10. Applied rewrites98.9%

                            \[\leadsto \mathsf{fma}\left(\left(\varepsilon + x\right) \cdot \varepsilon, x, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \]
                          11. Add Preprocessing

                          Alternative 20: 98.1% accurate, 10.4× speedup?

                          \[\begin{array}{l} \\ \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) - -1\right) \cdot \varepsilon \end{array} \]
                          (FPCore (x eps) :precision binary64 (* (- (fma eps x (* x x)) -1.0) eps))
                          double code(double x, double eps) {
                          	return (fma(eps, x, (x * x)) - -1.0) * eps;
                          }
                          
                          function code(x, eps)
                          	return Float64(Float64(fma(eps, x, Float64(x * x)) - -1.0) * eps)
                          end
                          
                          code[x_, eps_] := N[(N[(N[(eps * x + N[(x * x), $MachinePrecision]), $MachinePrecision] - -1.0), $MachinePrecision] * eps), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) - -1\right) \cdot \varepsilon
                          \end{array}
                          
                          Derivation
                          1. Initial program 65.9%

                            \[\tan \left(x + \varepsilon\right) - \tan x \]
                          2. Add Preprocessing
                          3. Taylor expanded in eps around 0

                            \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                          4. Applied rewrites100.0%

                            \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                          5. Taylor expanded in x around 0

                            \[\leadsto \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right) + \color{blue}{x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right)} \]
                          6. Step-by-step derivation
                            1. +-commutativeN/A

                              \[\leadsto x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) + \varepsilon \cdot \color{blue}{\left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)} \]
                            2. *-commutativeN/A

                              \[\leadsto \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) \cdot x + \varepsilon \cdot \left(\color{blue}{1} + \frac{1}{3} \cdot {\varepsilon}^{2}\right) \]
                            3. lower-fma.f64N/A

                              \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}, x, \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)\right) \]
                          7. Applied rewrites98.9%

                            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon \cdot \varepsilon, 1\right) \cdot x, \varepsilon, \varepsilon \cdot \varepsilon\right), \color{blue}{x}, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \]
                          8. Taylor expanded in eps around 0

                            \[\leadsto \varepsilon \cdot \left(1 + \color{blue}{\left(\varepsilon \cdot x + {x}^{2}\right)}\right) \]
                          9. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(1 + \left(\varepsilon \cdot x + {x}^{2}\right)\right) \cdot \varepsilon \]
                            2. lower-*.f64N/A

                              \[\leadsto \left(1 + \left(\varepsilon \cdot x + {x}^{2}\right)\right) \cdot \varepsilon \]
                            3. +-commutativeN/A

                              \[\leadsto \left(\left(\varepsilon \cdot x + {x}^{2}\right) + 1\right) \cdot \varepsilon \]
                            4. lower-+.f64N/A

                              \[\leadsto \left(\left(\varepsilon \cdot x + {x}^{2}\right) + 1\right) \cdot \varepsilon \]
                            5. lower-fma.f64N/A

                              \[\leadsto \left(\mathsf{fma}\left(\varepsilon, x, {x}^{2}\right) + 1\right) \cdot \varepsilon \]
                            6. pow2N/A

                              \[\leadsto \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) + 1\right) \cdot \varepsilon \]
                            7. lift-*.f6498.9

                              \[\leadsto \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) + 1\right) \cdot \varepsilon \]
                          10. Applied rewrites98.9%

                            \[\leadsto \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) + 1\right) \cdot \varepsilon \]
                          11. Final simplification98.9%

                            \[\leadsto \left(\mathsf{fma}\left(\varepsilon, x, x \cdot x\right) - -1\right) \cdot \varepsilon \]
                          12. Add Preprocessing

                          Alternative 21: 98.0% accurate, 17.3× speedup?

                          \[\begin{array}{l} \\ \mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right) \end{array} \]
                          (FPCore (x eps) :precision binary64 (fma (* x x) eps eps))
                          double code(double x, double eps) {
                          	return fma((x * x), eps, eps);
                          }
                          
                          function code(x, eps)
                          	return fma(Float64(x * x), eps, eps)
                          end
                          
                          code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * eps + eps), $MachinePrecision]
                          
                          \begin{array}{l}
                          
                          \\
                          \mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right)
                          \end{array}
                          
                          Derivation
                          1. Initial program 65.9%

                            \[\tan \left(x + \varepsilon\right) - \tan x \]
                          2. Add Preprocessing
                          3. Taylor expanded in eps around 0

                            \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(\frac{1}{6} + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(\frac{-1}{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{1}{6} \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
                          4. Applied rewrites100.0%

                            \[\leadsto \color{blue}{\left(\mathsf{fma}\left(\mathsf{fma}\left(-\varepsilon, \mathsf{fma}\left(\frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(1 - \left(-{\tan x}^{2}\right), -0.5, {\tan x}^{2} \cdot 0.16666666666666666\right)\right) + 0.16666666666666666, 1 \cdot \frac{\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \sin x}{\cos x}\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon} \]
                          5. Taylor expanded in x around 0

                            \[\leadsto \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right) + \color{blue}{x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right)} \]
                          6. Step-by-step derivation
                            1. +-commutativeN/A

                              \[\leadsto x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) + \varepsilon \cdot \color{blue}{\left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)} \]
                            2. *-commutativeN/A

                              \[\leadsto \left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right) \cdot x + \varepsilon \cdot \left(\color{blue}{1} + \frac{1}{3} \cdot {\varepsilon}^{2}\right) \]
                            3. lower-fma.f64N/A

                              \[\leadsto \mathsf{fma}\left(\varepsilon \cdot \left(x \cdot \left(1 + \frac{4}{3} \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}, x, \varepsilon \cdot \left(1 + \frac{1}{3} \cdot {\varepsilon}^{2}\right)\right) \]
                          7. Applied rewrites98.9%

                            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, \varepsilon \cdot \varepsilon, 1\right) \cdot x, \varepsilon, \varepsilon \cdot \varepsilon\right), \color{blue}{x}, \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.3333333333333333, 1\right) \cdot \varepsilon\right) \]
                          8. Taylor expanded in eps around 0

                            \[\leadsto \varepsilon \cdot \left(1 + \color{blue}{{x}^{2}}\right) \]
                          9. Step-by-step derivation
                            1. *-commutativeN/A

                              \[\leadsto \left(1 + {x}^{2}\right) \cdot \varepsilon \]
                            2. lower-*.f64N/A

                              \[\leadsto \left(1 + {x}^{2}\right) \cdot \varepsilon \]
                            3. +-commutativeN/A

                              \[\leadsto \left({x}^{2} + 1\right) \cdot \varepsilon \]
                            4. pow2N/A

                              \[\leadsto \left(x \cdot x + 1\right) \cdot \varepsilon \]
                            5. lower-fma.f6498.8

                              \[\leadsto \mathsf{fma}\left(x, x, 1\right) \cdot \varepsilon \]
                          10. Applied rewrites98.8%

                            \[\leadsto \mathsf{fma}\left(x, x, 1\right) \cdot \varepsilon \]
                          11. Taylor expanded in x around 0

                            \[\leadsto \varepsilon + \varepsilon \cdot {x}^{\color{blue}{2}} \]
                          12. Step-by-step derivation
                            1. +-commutativeN/A

                              \[\leadsto \varepsilon \cdot {x}^{2} + \varepsilon \]
                            2. *-commutativeN/A

                              \[\leadsto {x}^{2} \cdot \varepsilon + \varepsilon \]
                            3. lower-fma.f64N/A

                              \[\leadsto \mathsf{fma}\left({x}^{2}, \varepsilon, \varepsilon\right) \]
                            4. pow2N/A

                              \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right) \]
                            5. lift-*.f6498.8

                              \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right) \]
                          13. Applied rewrites98.8%

                            \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right) \]
                          14. Add Preprocessing

                          Alternative 22: 97.7% accurate, 207.0× speedup?

                          \[\begin{array}{l} \\ \varepsilon \end{array} \]
                          (FPCore (x eps) :precision binary64 eps)
                          double code(double x, double eps) {
                          	return eps;
                          }
                          
                          module fmin_fmax_functions
                              implicit none
                              private
                              public fmax
                              public fmin
                          
                              interface fmax
                                  module procedure fmax88
                                  module procedure fmax44
                                  module procedure fmax84
                                  module procedure fmax48
                              end interface
                              interface fmin
                                  module procedure fmin88
                                  module procedure fmin44
                                  module procedure fmin84
                                  module procedure fmin48
                              end interface
                          contains
                              real(8) function fmax88(x, y) result (res)
                                  real(8), intent (in) :: x
                                  real(8), intent (in) :: y
                                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                              end function
                              real(4) function fmax44(x, y) result (res)
                                  real(4), intent (in) :: x
                                  real(4), intent (in) :: y
                                  res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                              end function
                              real(8) function fmax84(x, y) result(res)
                                  real(8), intent (in) :: x
                                  real(4), intent (in) :: y
                                  res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                              end function
                              real(8) function fmax48(x, y) result(res)
                                  real(4), intent (in) :: x
                                  real(8), intent (in) :: y
                                  res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                              end function
                              real(8) function fmin88(x, y) result (res)
                                  real(8), intent (in) :: x
                                  real(8), intent (in) :: y
                                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                              end function
                              real(4) function fmin44(x, y) result (res)
                                  real(4), intent (in) :: x
                                  real(4), intent (in) :: y
                                  res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                              end function
                              real(8) function fmin84(x, y) result(res)
                                  real(8), intent (in) :: x
                                  real(4), intent (in) :: y
                                  res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                              end function
                              real(8) function fmin48(x, y) result(res)
                                  real(4), intent (in) :: x
                                  real(8), intent (in) :: y
                                  res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                              end function
                          end module
                          
                          real(8) function code(x, eps)
                          use fmin_fmax_functions
                              real(8), intent (in) :: x
                              real(8), intent (in) :: eps
                              code = eps
                          end function
                          
                          public static double code(double x, double eps) {
                          	return eps;
                          }
                          
                          def code(x, eps):
                          	return eps
                          
                          function code(x, eps)
                          	return eps
                          end
                          
                          function tmp = code(x, eps)
                          	tmp = eps;
                          end
                          
                          code[x_, eps_] := eps
                          
                          \begin{array}{l}
                          
                          \\
                          \varepsilon
                          \end{array}
                          
                          Derivation
                          1. Initial program 65.9%

                            \[\tan \left(x + \varepsilon\right) - \tan x \]
                          2. Add Preprocessing
                          3. Taylor expanded in x around 0

                            \[\leadsto \color{blue}{\frac{\sin \varepsilon}{\cos \varepsilon}} \]
                          4. Step-by-step derivation
                            1. quot-tanN/A

                              \[\leadsto \tan \varepsilon \]
                            2. lower-tan.f6498.2

                              \[\leadsto \tan \varepsilon \]
                          5. Applied rewrites98.2%

                            \[\leadsto \color{blue}{\tan \varepsilon} \]
                          6. Taylor expanded in eps around 0

                            \[\leadsto \varepsilon \]
                          7. Step-by-step derivation
                            1. Applied rewrites98.2%

                              \[\leadsto \varepsilon \]
                            2. Add Preprocessing

                            Developer Target 1: 98.9% accurate, 1.0× speedup?

                            \[\begin{array}{l} \\ \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x \end{array} \]
                            (FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
                            double code(double x, double eps) {
                            	return eps + ((eps * tan(x)) * tan(x));
                            }
                            
                            module fmin_fmax_functions
                                implicit none
                                private
                                public fmax
                                public fmin
                            
                                interface fmax
                                    module procedure fmax88
                                    module procedure fmax44
                                    module procedure fmax84
                                    module procedure fmax48
                                end interface
                                interface fmin
                                    module procedure fmin88
                                    module procedure fmin44
                                    module procedure fmin84
                                    module procedure fmin48
                                end interface
                            contains
                                real(8) function fmax88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmax44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, max(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmax84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmax48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
                                end function
                                real(8) function fmin88(x, y) result (res)
                                    real(8), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(4) function fmin44(x, y) result (res)
                                    real(4), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(y, merge(x, min(x, y), y /= y), x /= x)
                                end function
                                real(8) function fmin84(x, y) result(res)
                                    real(8), intent (in) :: x
                                    real(4), intent (in) :: y
                                    res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
                                end function
                                real(8) function fmin48(x, y) result(res)
                                    real(4), intent (in) :: x
                                    real(8), intent (in) :: y
                                    res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
                                end function
                            end module
                            
                            real(8) function code(x, eps)
                            use fmin_fmax_functions
                                real(8), intent (in) :: x
                                real(8), intent (in) :: eps
                                code = eps + ((eps * tan(x)) * tan(x))
                            end function
                            
                            public static double code(double x, double eps) {
                            	return eps + ((eps * Math.tan(x)) * Math.tan(x));
                            }
                            
                            def code(x, eps):
                            	return eps + ((eps * math.tan(x)) * math.tan(x))
                            
                            function code(x, eps)
                            	return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x)))
                            end
                            
                            function tmp = code(x, eps)
                            	tmp = eps + ((eps * tan(x)) * tan(x));
                            end
                            
                            code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                            
                            \begin{array}{l}
                            
                            \\
                            \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
                            \end{array}
                            

                            Reproduce

                            ?
                            herbie shell --seed 2025084 
                            (FPCore (x eps)
                              :name "2tan (problem 3.3.2)"
                              :precision binary64
                              :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
                            
                              :alt
                              (! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
                            
                              (- (tan (+ x eps)) (tan x)))