2tan (problem 3.3.2)

Percentage Accurate: 62.3% → 98.6%
Time: 14.1s
Alternatives: 7
Speedup: 207.0×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.3% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Alternative 1: 98.6% accurate, 4.6× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \varepsilon, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)\right), \varepsilon\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma
  eps
  (fma
   x
   eps
   (*
    x
    (*
     x
     (fma (* x x) (fma (* x x) 0.37777777777777777 0.6666666666666666) 1.0))))
  eps))
double code(double x, double eps) {
	return fma(eps, fma(x, eps, (x * (x * fma((x * x), fma((x * x), 0.37777777777777777, 0.6666666666666666), 1.0)))), eps);
}
function code(x, eps)
	return fma(eps, fma(x, eps, Float64(x * Float64(x * fma(Float64(x * x), fma(Float64(x * x), 0.37777777777777777, 0.6666666666666666), 1.0)))), eps)
end
code[x_, eps_] := N[(eps * N[(x * eps + N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.37777777777777777 + 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \varepsilon, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)\right), \varepsilon\right)
\end{array}
Derivation
  1. Initial program 60.4%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Step-by-step derivation
    1. associate--l+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
    3. distribute-lft-inN/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
    4. *-rgt-identityN/A

      \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
    5. accelerator-lowering-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
  5. Simplified100.0%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\color{blue}{x}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right) \]
  7. Step-by-step derivation
    1. Simplified100.0%

      \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\color{blue}{x}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right) \]
    2. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, \color{blue}{{x}^{2} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)}\right), \varepsilon\right) \]
    3. Step-by-step derivation
      1. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, \color{blue}{\left(x \cdot x\right)} \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right), \varepsilon\right) \]
      2. associate-*l*N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right)}\right), \varepsilon\right) \]
      3. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, \color{blue}{x \cdot \left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right)}\right), \varepsilon\right) \]
      4. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \color{blue}{\left(x \cdot \left(1 + {x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right)\right)\right)}\right), \varepsilon\right) \]
      5. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \color{blue}{\left({x}^{2} \cdot \left(\frac{2}{3} + \frac{17}{45} \cdot {x}^{2}\right) + 1\right)}\right)\right), \varepsilon\right) \]
      6. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{2}{3} + \frac{17}{45} \cdot {x}^{2}, 1\right)}\right)\right), \varepsilon\right) \]
      7. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{3} + \frac{17}{45} \cdot {x}^{2}, 1\right)\right)\right), \varepsilon\right) \]
      8. *-lowering-*.f64N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{2}{3} + \frac{17}{45} \cdot {x}^{2}, 1\right)\right)\right), \varepsilon\right) \]
      9. +-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{17}{45} \cdot {x}^{2} + \frac{2}{3}}, 1\right)\right)\right), \varepsilon\right) \]
      10. *-commutativeN/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{17}{45}} + \frac{2}{3}, 1\right)\right)\right), \varepsilon\right) \]
      11. accelerator-lowering-fma.f64N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{17}{45}, \frac{2}{3}\right)}, 1\right)\right)\right), \varepsilon\right) \]
      12. unpow2N/A

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{17}{45}, \frac{2}{3}\right), 1\right)\right)\right), \varepsilon\right) \]
      13. *-lowering-*.f64100.0

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)\right), \varepsilon\right) \]
    4. Simplified100.0%

      \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \frac{\varepsilon}{\cos x}, \color{blue}{x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)}\right), \varepsilon\right) \]
    5. Taylor expanded in x around 0

      \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \color{blue}{\varepsilon}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \frac{17}{45}, \frac{2}{3}\right), 1\right)\right)\right), \varepsilon\right) \]
    6. Step-by-step derivation
      1. Simplified100.0%

        \[\leadsto \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(x, \color{blue}{\varepsilon}, x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.37777777777777777, 0.6666666666666666\right), 1\right)\right)\right), \varepsilon\right) \]
      2. Add Preprocessing

      Alternative 2: 98.5% accurate, 7.1× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right), \varepsilon\right) \end{array} \]
      (FPCore (x eps)
       :precision binary64
       (fma eps (* x (fma x (fma x (* x 0.6666666666666666) 1.0) eps)) eps))
      double code(double x, double eps) {
      	return fma(eps, (x * fma(x, fma(x, (x * 0.6666666666666666), 1.0), eps)), eps);
      }
      
      function code(x, eps)
      	return fma(eps, Float64(x * fma(x, fma(x, Float64(x * 0.6666666666666666), 1.0), eps)), eps)
      end
      
      code[x_, eps_] := N[(eps * N[(x * N[(x * N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision] + eps), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right), \varepsilon\right)
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\varepsilon + x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right)\right)}, \varepsilon\right) \]
      7. Step-by-step derivation
        1. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\varepsilon + x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right)\right)}, \varepsilon\right) \]
        2. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right) + \varepsilon\right)}, \varepsilon\right) \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, 1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right), \varepsilon\right)}, \varepsilon\right) \]
        4. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right) + 1}, \varepsilon\right), \varepsilon\right) \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, \left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon, 1\right)}, \varepsilon\right), \varepsilon\right) \]
        6. associate--l+N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\frac{2}{3} \cdot x + \left(\frac{5}{6} \cdot \varepsilon - \frac{-1}{2} \cdot \varepsilon\right)}, 1\right), \varepsilon\right), \varepsilon\right) \]
        7. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\left(\frac{5}{6} \cdot \varepsilon - \frac{-1}{2} \cdot \varepsilon\right) + \frac{2}{3} \cdot x}, 1\right), \varepsilon\right), \varepsilon\right) \]
        8. distribute-rgt-out--N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\frac{5}{6} - \frac{-1}{2}\right)} + \frac{2}{3} \cdot x, 1\right), \varepsilon\right), \varepsilon\right) \]
        9. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{5}{6} - \frac{-1}{2}, \frac{2}{3} \cdot x\right)}, 1\right), \varepsilon\right), \varepsilon\right) \]
        10. metadata-evalN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{4}{3}}, \frac{2}{3} \cdot x\right), 1\right), \varepsilon\right), \varepsilon\right) \]
        11. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \frac{4}{3}, \color{blue}{x \cdot \frac{2}{3}}\right), 1\right), \varepsilon\right), \varepsilon\right) \]
        12. *-lowering-*.f6499.9

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, 1.3333333333333333, \color{blue}{x \cdot 0.6666666666666666}\right), 1\right), \varepsilon\right), \varepsilon\right) \]
      8. Simplified99.9%

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, 1.3333333333333333, x \cdot 0.6666666666666666\right), 1\right), \varepsilon\right)}, \varepsilon\right) \]
      9. Taylor expanded in eps around 0

        \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{1 + \frac{2}{3} \cdot {x}^{2}}, \varepsilon\right), \varepsilon\right) \]
      10. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\frac{2}{3} \cdot {x}^{2} + 1}, \varepsilon\right), \varepsilon\right) \]
        2. unpow2N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \frac{2}{3} \cdot \color{blue}{\left(x \cdot x\right)} + 1, \varepsilon\right), \varepsilon\right) \]
        3. associate-*r*N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\left(\frac{2}{3} \cdot x\right) \cdot x} + 1, \varepsilon\right), \varepsilon\right) \]
        4. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\frac{2}{3} \cdot x\right)} + 1, \varepsilon\right), \varepsilon\right) \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, \frac{2}{3} \cdot x, 1\right)}, \varepsilon\right), \varepsilon\right) \]
        6. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{2}{3}}, 1\right), \varepsilon\right), \varepsilon\right) \]
        7. *-lowering-*.f6499.9

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{x \cdot 0.6666666666666666}, 1\right), \varepsilon\right), \varepsilon\right) \]
      11. Simplified99.9%

        \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right)}, \varepsilon\right), \varepsilon\right) \]
      12. Add Preprocessing

      Alternative 3: 98.4% accurate, 7.4× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right) \end{array} \]
      (FPCore (x eps)
       :precision binary64
       (fma (* x x) (* eps (fma x (* x 0.6666666666666666) 1.0)) eps))
      double code(double x, double eps) {
      	return fma((x * x), (eps * fma(x, (x * 0.6666666666666666), 1.0)), eps);
      }
      
      function code(x, eps)
      	return fma(Float64(x * x), Float64(eps * fma(x, Float64(x * 0.6666666666666666), 1.0)), eps)
      end
      
      code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * N[(eps * N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right)
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\varepsilon + x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right)\right)}, \varepsilon\right) \]
      7. Step-by-step derivation
        1. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\varepsilon + x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right)\right)}, \varepsilon\right) \]
        2. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(x \cdot \left(1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right)\right) + \varepsilon\right)}, \varepsilon\right) \]
        3. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, 1 + x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right), \varepsilon\right)}, \varepsilon\right) \]
        4. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon\right) + 1}, \varepsilon\right), \varepsilon\right) \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(x, \left(\frac{2}{3} \cdot x + \frac{5}{6} \cdot \varepsilon\right) - \frac{-1}{2} \cdot \varepsilon, 1\right)}, \varepsilon\right), \varepsilon\right) \]
        6. associate--l+N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\frac{2}{3} \cdot x + \left(\frac{5}{6} \cdot \varepsilon - \frac{-1}{2} \cdot \varepsilon\right)}, 1\right), \varepsilon\right), \varepsilon\right) \]
        7. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\left(\frac{5}{6} \cdot \varepsilon - \frac{-1}{2} \cdot \varepsilon\right) + \frac{2}{3} \cdot x}, 1\right), \varepsilon\right), \varepsilon\right) \]
        8. distribute-rgt-out--N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\frac{5}{6} - \frac{-1}{2}\right)} + \frac{2}{3} \cdot x, 1\right), \varepsilon\right), \varepsilon\right) \]
        9. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{5}{6} - \frac{-1}{2}, \frac{2}{3} \cdot x\right)}, 1\right), \varepsilon\right), \varepsilon\right) \]
        10. metadata-evalN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{4}{3}}, \frac{2}{3} \cdot x\right), 1\right), \varepsilon\right), \varepsilon\right) \]
        11. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \frac{4}{3}, \color{blue}{x \cdot \frac{2}{3}}\right), 1\right), \varepsilon\right), \varepsilon\right) \]
        12. *-lowering-*.f6499.9

          \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, 1.3333333333333333, \color{blue}{x \cdot 0.6666666666666666}\right), 1\right), \varepsilon\right), \varepsilon\right) \]
      8. Simplified99.9%

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, 1.3333333333333333, x \cdot 0.6666666666666666\right), 1\right), \varepsilon\right)}, \varepsilon\right) \]
      9. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(1 + {x}^{2} \cdot \left(1 + \frac{2}{3} \cdot {x}^{2}\right)\right)} \]
      10. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left({x}^{2} \cdot \left(1 + \frac{2}{3} \cdot {x}^{2}\right) + 1\right)} \]
        2. distribute-rgt-inN/A

          \[\leadsto \color{blue}{\left({x}^{2} \cdot \left(1 + \frac{2}{3} \cdot {x}^{2}\right)\right) \cdot \varepsilon + 1 \cdot \varepsilon} \]
        3. associate-*l*N/A

          \[\leadsto \color{blue}{{x}^{2} \cdot \left(\left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon\right)} + 1 \cdot \varepsilon \]
        4. *-lft-identityN/A

          \[\leadsto {x}^{2} \cdot \left(\left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon, \varepsilon\right)} \]
        6. unpow2N/A

          \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon, \varepsilon\right) \]
        7. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon, \varepsilon\right) \]
        8. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\left(1 + \frac{2}{3} \cdot {x}^{2}\right) \cdot \varepsilon}, \varepsilon\right) \]
        9. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\left(\frac{2}{3} \cdot {x}^{2} + 1\right)} \cdot \varepsilon, \varepsilon\right) \]
        10. unpow2N/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \left(\frac{2}{3} \cdot \color{blue}{\left(x \cdot x\right)} + 1\right) \cdot \varepsilon, \varepsilon\right) \]
        11. associate-*r*N/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \left(\color{blue}{\left(\frac{2}{3} \cdot x\right) \cdot x} + 1\right) \cdot \varepsilon, \varepsilon\right) \]
        12. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \left(\color{blue}{x \cdot \left(\frac{2}{3} \cdot x\right)} + 1\right) \cdot \varepsilon, \varepsilon\right) \]
        13. accelerator-lowering-fma.f64N/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left(x, \frac{2}{3} \cdot x, 1\right)} \cdot \varepsilon, \varepsilon\right) \]
        14. *-commutativeN/A

          \[\leadsto \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{2}{3}}, 1\right) \cdot \varepsilon, \varepsilon\right) \]
        15. *-lowering-*.f6499.9

          \[\leadsto \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, \color{blue}{x \cdot 0.6666666666666666}, 1\right) \cdot \varepsilon, \varepsilon\right) \]
      11. Simplified99.9%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right) \cdot \varepsilon, \varepsilon\right)} \]
      12. Final simplification99.9%

        \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 1\right), \varepsilon\right) \]
      13. Add Preprocessing

      Alternative 4: 98.4% accurate, 13.8× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(x, \varepsilon \cdot \left(\varepsilon + x\right), \varepsilon\right) \end{array} \]
      (FPCore (x eps) :precision binary64 (fma x (* eps (+ eps x)) eps))
      double code(double x, double eps) {
      	return fma(x, (eps * (eps + x)), eps);
      }
      
      function code(x, eps)
      	return fma(x, Float64(eps * Float64(eps + x)), eps)
      end
      
      code[x_, eps_] := N[(x * N[(eps * N[(eps + x), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(x, \varepsilon \cdot \left(\varepsilon + x\right), \varepsilon\right)
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\varepsilon + x \cdot \left(\varepsilon \cdot x + {\varepsilon}^{2}\right)} \]
      7. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \color{blue}{x \cdot \left(\varepsilon \cdot x + {\varepsilon}^{2}\right) + \varepsilon} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot x + {\varepsilon}^{2}, \varepsilon\right)} \]
        3. unpow2N/A

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot x + \color{blue}{\varepsilon \cdot \varepsilon}, \varepsilon\right) \]
        4. distribute-lft-outN/A

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(x + \varepsilon\right)}, \varepsilon\right) \]
        5. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
        6. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\varepsilon + x\right)}, \varepsilon\right) \]
        7. +-lowering-+.f6499.6

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
      8. Simplified99.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\varepsilon + x\right), \varepsilon\right)} \]
      9. Add Preprocessing

      Alternative 5: 98.3% accurate, 17.3× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(x, \varepsilon \cdot x, \varepsilon\right) \end{array} \]
      (FPCore (x eps) :precision binary64 (fma x (* eps x) eps))
      double code(double x, double eps) {
      	return fma(x, (eps * x), eps);
      }
      
      function code(x, eps)
      	return fma(x, Float64(eps * x), eps)
      end
      
      code[x_, eps_] := N[(x * N[(eps * x), $MachinePrecision] + eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(x, \varepsilon \cdot x, \varepsilon\right)
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\varepsilon + x \cdot \left(\varepsilon \cdot x + {\varepsilon}^{2}\right)} \]
      7. Step-by-step derivation
        1. +-commutativeN/A

          \[\leadsto \color{blue}{x \cdot \left(\varepsilon \cdot x + {\varepsilon}^{2}\right) + \varepsilon} \]
        2. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot x + {\varepsilon}^{2}, \varepsilon\right)} \]
        3. unpow2N/A

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot x + \color{blue}{\varepsilon \cdot \varepsilon}, \varepsilon\right) \]
        4. distribute-lft-outN/A

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(x + \varepsilon\right)}, \varepsilon\right) \]
        5. +-commutativeN/A

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
        6. *-lowering-*.f64N/A

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\varepsilon + x\right)}, \varepsilon\right) \]
        7. +-lowering-+.f6499.6

          \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\varepsilon + x\right)}, \varepsilon\right) \]
      8. Simplified99.6%

        \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\varepsilon + x\right), \varepsilon\right)} \]
      9. Taylor expanded in eps around 0

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      10. Step-by-step derivation
        1. *-lowering-*.f6499.6

          \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      11. Simplified99.6%

        \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      12. Add Preprocessing

      Alternative 6: 97.9% accurate, 17.3× speedup?

      \[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, \varepsilon \cdot x, \varepsilon\right) \end{array} \]
      (FPCore (x eps) :precision binary64 (fma eps (* eps x) eps))
      double code(double x, double eps) {
      	return fma(eps, (eps * x), eps);
      }
      
      function code(x, eps)
      	return fma(eps, Float64(eps * x), eps)
      end
      
      code[x_, eps_] := N[(eps * N[(eps * x), $MachinePrecision] + eps), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{fma}\left(\varepsilon, \varepsilon \cdot x, \varepsilon\right)
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      7. Step-by-step derivation
        1. *-lowering-*.f6498.8

          \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      8. Simplified98.8%

        \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot x}, \varepsilon\right) \]
      9. Add Preprocessing

      Alternative 7: 97.9% accurate, 207.0× speedup?

      \[\begin{array}{l} \\ \varepsilon \end{array} \]
      (FPCore (x eps) :precision binary64 eps)
      double code(double x, double eps) {
      	return eps;
      }
      
      real(8) function code(x, eps)
          real(8), intent (in) :: x
          real(8), intent (in) :: eps
          code = eps
      end function
      
      public static double code(double x, double eps) {
      	return eps;
      }
      
      def code(x, eps):
      	return eps
      
      function code(x, eps)
      	return eps
      end
      
      function tmp = code(x, eps)
      	tmp = eps;
      end
      
      code[x_, eps_] := eps
      
      \begin{array}{l}
      
      \\
      \varepsilon
      \end{array}
      
      Derivation
      1. Initial program 60.4%

        \[\tan \left(x + \varepsilon\right) - \tan x \]
      2. Add Preprocessing
      3. Taylor expanded in eps around 0

        \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
      4. Step-by-step derivation
        1. associate--l+N/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} \]
        2. +-commutativeN/A

          \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 1\right)} \]
        3. distribute-lft-inN/A

          \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot 1} \]
        4. *-rgt-identityN/A

          \[\leadsto \varepsilon \cdot \left(\frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon} \]
        5. accelerator-lowering-fma.f64N/A

          \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{\varepsilon \cdot \left(\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x} - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}, \varepsilon\right)} \]
      5. Simplified100.0%

        \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\sin x + \frac{{\sin x}^{3}}{{\cos x}^{2}}, \frac{\varepsilon}{\cos x}, \frac{{\sin x}^{2}}{{\cos x}^{2}}\right), \varepsilon\right)} \]
      6. Taylor expanded in x around 0

        \[\leadsto \color{blue}{\varepsilon} \]
      7. Step-by-step derivation
        1. Simplified98.8%

          \[\leadsto \color{blue}{\varepsilon} \]
        2. Add Preprocessing

        Developer Target 1: 99.9% accurate, 0.6× speedup?

        \[\begin{array}{l} \\ \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)} \end{array} \]
        (FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
        double code(double x, double eps) {
        	return sin(eps) / (cos(x) * cos((x + eps)));
        }
        
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = sin(eps) / (cos(x) * cos((x + eps)))
        end function
        
        public static double code(double x, double eps) {
        	return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
        }
        
        def code(x, eps):
        	return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
        
        function code(x, eps)
        	return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps))))
        end
        
        function tmp = code(x, eps)
        	tmp = sin(eps) / (cos(x) * cos((x + eps)));
        end
        
        code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
        \end{array}
        

        Developer Target 2: 62.4% accurate, 0.4× speedup?

        \[\begin{array}{l} \\ \frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x \end{array} \]
        (FPCore (x eps)
         :precision binary64
         (- (/ (+ (tan x) (tan eps)) (- 1.0 (* (tan x) (tan eps)))) (tan x)))
        double code(double x, double eps) {
        	return ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
        }
        
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = ((tan(x) + tan(eps)) / (1.0d0 - (tan(x) * tan(eps)))) - tan(x)
        end function
        
        public static double code(double x, double eps) {
        	return ((Math.tan(x) + Math.tan(eps)) / (1.0 - (Math.tan(x) * Math.tan(eps)))) - Math.tan(x);
        }
        
        def code(x, eps):
        	return ((math.tan(x) + math.tan(eps)) / (1.0 - (math.tan(x) * math.tan(eps)))) - math.tan(x)
        
        function code(x, eps)
        	return Float64(Float64(Float64(tan(x) + tan(eps)) / Float64(1.0 - Float64(tan(x) * tan(eps)))) - tan(x))
        end
        
        function tmp = code(x, eps)
        	tmp = ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
        end
        
        code[x_, eps_] := N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[eps], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x
        \end{array}
        

        Developer Target 3: 98.9% accurate, 1.0× speedup?

        \[\begin{array}{l} \\ \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x \end{array} \]
        (FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
        double code(double x, double eps) {
        	return eps + ((eps * tan(x)) * tan(x));
        }
        
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = eps + ((eps * tan(x)) * tan(x))
        end function
        
        public static double code(double x, double eps) {
        	return eps + ((eps * Math.tan(x)) * Math.tan(x));
        }
        
        def code(x, eps):
        	return eps + ((eps * math.tan(x)) * math.tan(x))
        
        function code(x, eps)
        	return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x)))
        end
        
        function tmp = code(x, eps)
        	tmp = eps + ((eps * tan(x)) * tan(x));
        end
        
        code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
        \end{array}
        

        Reproduce

        ?
        herbie shell --seed 2024205 
        (FPCore (x eps)
          :name "2tan (problem 3.3.2)"
          :precision binary64
          :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
        
          :alt
          (! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))
        
          :alt
          (! :herbie-platform default (- (/ (+ (tan x) (tan eps)) (- 1 (* (tan x) (tan eps)))) (tan x)))
        
          :alt
          (! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
        
          (- (tan (+ x eps)) (tan x)))