2tan (problem 3.3.2)

Percentage Accurate: 62.4% → 99.6%
Time: 19.8s
Alternatives: 13
Speedup: 205.0×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 13 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \tan \left(x + \varepsilon\right) - \tan x \end{array} \]
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
	return tan((x + eps)) - tan(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
	return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps):
	return math.tan((x + eps)) - math.tan(x)
function code(x, eps)
	return Float64(tan(Float64(x + eps)) - tan(x))
end
function tmp = code(x, eps)
	tmp = tan((x + eps)) - tan(x);
end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}

Alternative 1: 99.6% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := {\sin x}^{2}\\ t_1 := {\cos x}^{2}\\ t_2 := \frac{t\_0}{t\_1}\\ t_3 := t\_2 + 1\\ \varepsilon \cdot \left(\left(t\_2 + \varepsilon \cdot \left(\frac{\sin x \cdot t\_3}{\cos x} - \varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, t\_0 \cdot \frac{t\_3}{t\_1}, \mathsf{fma}\left(-0.5, t\_3, 0.16666666666666666 \cdot t\_2\right)\right)\right)\right)\right) + 1\right) \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (pow (sin x) 2.0))
        (t_1 (pow (cos x) 2.0))
        (t_2 (/ t_0 t_1))
        (t_3 (+ t_2 1.0)))
   (*
    eps
    (+
     (+
      t_2
      (*
       eps
       (-
        (/ (* (sin x) t_3) (cos x))
        (*
         eps
         (+
          0.16666666666666666
          (fma
           -1.0
           (* t_0 (/ t_3 t_1))
           (fma -0.5 t_3 (* 0.16666666666666666 t_2))))))))
     1.0))))
double code(double x, double eps) {
	double t_0 = pow(sin(x), 2.0);
	double t_1 = pow(cos(x), 2.0);
	double t_2 = t_0 / t_1;
	double t_3 = t_2 + 1.0;
	return eps * ((t_2 + (eps * (((sin(x) * t_3) / cos(x)) - (eps * (0.16666666666666666 + fma(-1.0, (t_0 * (t_3 / t_1)), fma(-0.5, t_3, (0.16666666666666666 * t_2)))))))) + 1.0);
}
function code(x, eps)
	t_0 = sin(x) ^ 2.0
	t_1 = cos(x) ^ 2.0
	t_2 = Float64(t_0 / t_1)
	t_3 = Float64(t_2 + 1.0)
	return Float64(eps * Float64(Float64(t_2 + Float64(eps * Float64(Float64(Float64(sin(x) * t_3) / cos(x)) - Float64(eps * Float64(0.16666666666666666 + fma(-1.0, Float64(t_0 * Float64(t_3 / t_1)), fma(-0.5, t_3, Float64(0.16666666666666666 * t_2)))))))) + 1.0))
end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$2 = N[(t$95$0 / t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 + 1.0), $MachinePrecision]}, N[(eps * N[(N[(t$95$2 + N[(eps * N[(N[(N[(N[Sin[x], $MachinePrecision] * t$95$3), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision] - N[(eps * N[(0.16666666666666666 + N[(-1.0 * N[(t$95$0 * N[(t$95$3 / t$95$1), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * t$95$3 + N[(0.16666666666666666 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := {\sin x}^{2}\\
t_1 := {\cos x}^{2}\\
t_2 := \frac{t\_0}{t\_1}\\
t_3 := t\_2 + 1\\
\varepsilon \cdot \left(\left(t\_2 + \varepsilon \cdot \left(\frac{\sin x \cdot t\_3}{\cos x} - \varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, t\_0 \cdot \frac{t\_3}{t\_1}, \mathsf{fma}\left(-0.5, t\_3, 0.16666666666666666 \cdot t\_2\right)\right)\right)\right)\right) + 1\right)
\end{array}
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(-0.5 \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Simplified99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, {\sin x}^{2} \cdot \frac{1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}}, \mathsf{fma}\left(-0.5, 1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right), 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) - \frac{\sin x \cdot \left(1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right)\right) - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)} \]
  5. Final simplification99.2%

    \[\leadsto \varepsilon \cdot \left(\left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + \varepsilon \cdot \left(\frac{\sin x \cdot \left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + 1\right)}{\cos x} - \varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, {\sin x}^{2} \cdot \frac{\frac{{\sin x}^{2}}{{\cos x}^{2}} + 1}{{\cos x}^{2}}, \mathsf{fma}\left(-0.5, \frac{{\sin x}^{2}}{{\cos x}^{2}} + 1, 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right)\right) + 1\right) \]
  6. Add Preprocessing

Alternative 2: 99.6% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := {\sin x}^{2}\\ t_1 := {\cos x}^{2}\\ t_2 := \frac{t\_0}{t\_1}\\ t_3 := t\_2 + 1\\ \varepsilon \cdot \left(t\_2 + \left(\varepsilon \cdot \left(\varepsilon \cdot \left(\left(\frac{t\_0 \cdot t\_3}{t\_1} + \left(-0.5 \cdot \left(-1 - t\_2\right) - 0.16666666666666666 \cdot t\_2\right)\right) - 0.16666666666666666\right) + \frac{\sin x \cdot t\_3}{\cos x}\right) + 1\right)\right) \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (pow (sin x) 2.0))
        (t_1 (pow (cos x) 2.0))
        (t_2 (/ t_0 t_1))
        (t_3 (+ t_2 1.0)))
   (*
    eps
    (+
     t_2
     (+
      (*
       eps
       (+
        (*
         eps
         (-
          (+
           (/ (* t_0 t_3) t_1)
           (- (* -0.5 (- -1.0 t_2)) (* 0.16666666666666666 t_2)))
          0.16666666666666666))
        (/ (* (sin x) t_3) (cos x))))
      1.0)))))
double code(double x, double eps) {
	double t_0 = pow(sin(x), 2.0);
	double t_1 = pow(cos(x), 2.0);
	double t_2 = t_0 / t_1;
	double t_3 = t_2 + 1.0;
	return eps * (t_2 + ((eps * ((eps * ((((t_0 * t_3) / t_1) + ((-0.5 * (-1.0 - t_2)) - (0.16666666666666666 * t_2))) - 0.16666666666666666)) + ((sin(x) * t_3) / cos(x)))) + 1.0));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    real(8) :: t_1
    real(8) :: t_2
    real(8) :: t_3
    t_0 = sin(x) ** 2.0d0
    t_1 = cos(x) ** 2.0d0
    t_2 = t_0 / t_1
    t_3 = t_2 + 1.0d0
    code = eps * (t_2 + ((eps * ((eps * ((((t_0 * t_3) / t_1) + (((-0.5d0) * ((-1.0d0) - t_2)) - (0.16666666666666666d0 * t_2))) - 0.16666666666666666d0)) + ((sin(x) * t_3) / cos(x)))) + 1.0d0))
end function
public static double code(double x, double eps) {
	double t_0 = Math.pow(Math.sin(x), 2.0);
	double t_1 = Math.pow(Math.cos(x), 2.0);
	double t_2 = t_0 / t_1;
	double t_3 = t_2 + 1.0;
	return eps * (t_2 + ((eps * ((eps * ((((t_0 * t_3) / t_1) + ((-0.5 * (-1.0 - t_2)) - (0.16666666666666666 * t_2))) - 0.16666666666666666)) + ((Math.sin(x) * t_3) / Math.cos(x)))) + 1.0));
}
def code(x, eps):
	t_0 = math.pow(math.sin(x), 2.0)
	t_1 = math.pow(math.cos(x), 2.0)
	t_2 = t_0 / t_1
	t_3 = t_2 + 1.0
	return eps * (t_2 + ((eps * ((eps * ((((t_0 * t_3) / t_1) + ((-0.5 * (-1.0 - t_2)) - (0.16666666666666666 * t_2))) - 0.16666666666666666)) + ((math.sin(x) * t_3) / math.cos(x)))) + 1.0))
function code(x, eps)
	t_0 = sin(x) ^ 2.0
	t_1 = cos(x) ^ 2.0
	t_2 = Float64(t_0 / t_1)
	t_3 = Float64(t_2 + 1.0)
	return Float64(eps * Float64(t_2 + Float64(Float64(eps * Float64(Float64(eps * Float64(Float64(Float64(Float64(t_0 * t_3) / t_1) + Float64(Float64(-0.5 * Float64(-1.0 - t_2)) - Float64(0.16666666666666666 * t_2))) - 0.16666666666666666)) + Float64(Float64(sin(x) * t_3) / cos(x)))) + 1.0)))
end
function tmp = code(x, eps)
	t_0 = sin(x) ^ 2.0;
	t_1 = cos(x) ^ 2.0;
	t_2 = t_0 / t_1;
	t_3 = t_2 + 1.0;
	tmp = eps * (t_2 + ((eps * ((eps * ((((t_0 * t_3) / t_1) + ((-0.5 * (-1.0 - t_2)) - (0.16666666666666666 * t_2))) - 0.16666666666666666)) + ((sin(x) * t_3) / cos(x)))) + 1.0));
end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$2 = N[(t$95$0 / t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$2 + 1.0), $MachinePrecision]}, N[(eps * N[(t$95$2 + N[(N[(eps * N[(N[(eps * N[(N[(N[(N[(t$95$0 * t$95$3), $MachinePrecision] / t$95$1), $MachinePrecision] + N[(N[(-0.5 * N[(-1.0 - t$95$2), $MachinePrecision]), $MachinePrecision] - N[(0.16666666666666666 * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + N[(N[(N[Sin[x], $MachinePrecision] * t$95$3), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := {\sin x}^{2}\\
t_1 := {\cos x}^{2}\\
t_2 := \frac{t\_0}{t\_1}\\
t_3 := t\_2 + 1\\
\varepsilon \cdot \left(t\_2 + \left(\varepsilon \cdot \left(\varepsilon \cdot \left(\left(\frac{t\_0 \cdot t\_3}{t\_1} + \left(-0.5 \cdot \left(-1 - t\_2\right) - 0.16666666666666666 \cdot t\_2\right)\right) - 0.16666666666666666\right) + \frac{\sin x \cdot t\_3}{\cos x}\right) + 1\right)\right)
\end{array}
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(-0.5 \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Final simplification99.2%

    \[\leadsto \varepsilon \cdot \left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + \left(\varepsilon \cdot \left(\varepsilon \cdot \left(\left(\frac{{\sin x}^{2} \cdot \left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + 1\right)}{{\cos x}^{2}} + \left(-0.5 \cdot \left(-1 - \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) - 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) - 0.16666666666666666\right) + \frac{\sin x \cdot \left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + 1\right)}{\cos x}\right) + 1\right)\right) \]
  5. Add Preprocessing

Alternative 3: 99.4% accurate, 0.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (* (fma (pow (sin x) 2.0) (pow (cos x) -2.0) 1.0) (pow (exp eps) (tan x)))))
double code(double x, double eps) {
	return eps * (fma(pow(sin(x), 2.0), pow(cos(x), -2.0), 1.0) * pow(exp(eps), tan(x)));
}
function code(x, eps)
	return Float64(eps * Float64(fma((sin(x) ^ 2.0), (cos(x) ^ -2.0), 1.0) * (exp(eps) ^ tan(x))))
end
code[x_, eps_] := N[(eps * N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] * N[Power[N[Cos[x], $MachinePrecision], -2.0], $MachinePrecision] + 1.0), $MachinePrecision] * N[Power[N[Exp[eps], $MachinePrecision], N[Tan[x], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-exp-log60.9%

      \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  4. Applied egg-rr60.9%

    \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  5. Taylor expanded in eps around 0 90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\log \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)}} \]
  6. Step-by-step derivation
    1. sub-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\log \color{blue}{\left(1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    2. log1p-define90.0%

      \[\leadsto e^{\log \varepsilon + \left(\color{blue}{\mathsf{log1p}\left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    3. mul-1-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    4. remove-double-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    5. associate-/l*90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon \cdot \frac{\sin x}{\cos x}}\right)} \]
  7. Simplified90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot \frac{\sin x}{\cos x}\right)}} \]
  8. Step-by-step derivation
    1. associate-+r+90.0%

      \[\leadsto e^{\color{blue}{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \frac{\sin x}{\cos x}}} \]
    2. tan-quot90.0%

      \[\leadsto e^{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \color{blue}{\tan x}} \]
    3. exp-sum90.0%

      \[\leadsto \color{blue}{e^{\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \cdot e^{\varepsilon \cdot \tan x}} \]
  9. Applied egg-rr99.0%

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right)\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}} \]
  10. Step-by-step derivation
    1. associate-*l*99.0%

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  11. Simplified99.0%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  12. Add Preprocessing

Alternative 4: 99.4% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left({\left(e^{\varepsilon}\right)}^{\tan x} \cdot \left({\tan x}^{2} + 1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (* (pow (exp eps) (tan x)) (+ (pow (tan x) 2.0) 1.0))))
double code(double x, double eps) {
	return eps * (pow(exp(eps), tan(x)) * (pow(tan(x), 2.0) + 1.0));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((exp(eps) ** tan(x)) * ((tan(x) ** 2.0d0) + 1.0d0))
end function
public static double code(double x, double eps) {
	return eps * (Math.pow(Math.exp(eps), Math.tan(x)) * (Math.pow(Math.tan(x), 2.0) + 1.0));
}
def code(x, eps):
	return eps * (math.pow(math.exp(eps), math.tan(x)) * (math.pow(math.tan(x), 2.0) + 1.0))
function code(x, eps)
	return Float64(eps * Float64((exp(eps) ^ tan(x)) * Float64((tan(x) ^ 2.0) + 1.0)))
end
function tmp = code(x, eps)
	tmp = eps * ((exp(eps) ^ tan(x)) * ((tan(x) ^ 2.0) + 1.0));
end
code[x_, eps_] := N[(eps * N[(N[Power[N[Exp[eps], $MachinePrecision], N[Tan[x], $MachinePrecision]], $MachinePrecision] * N[(N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left({\left(e^{\varepsilon}\right)}^{\tan x} \cdot \left({\tan x}^{2} + 1\right)\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-exp-log60.9%

      \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  4. Applied egg-rr60.9%

    \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  5. Taylor expanded in eps around 0 90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\log \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)}} \]
  6. Step-by-step derivation
    1. sub-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\log \color{blue}{\left(1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    2. log1p-define90.0%

      \[\leadsto e^{\log \varepsilon + \left(\color{blue}{\mathsf{log1p}\left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    3. mul-1-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    4. remove-double-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    5. associate-/l*90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon \cdot \frac{\sin x}{\cos x}}\right)} \]
  7. Simplified90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot \frac{\sin x}{\cos x}\right)}} \]
  8. Step-by-step derivation
    1. associate-+r+90.0%

      \[\leadsto e^{\color{blue}{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \frac{\sin x}{\cos x}}} \]
    2. tan-quot90.0%

      \[\leadsto e^{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \color{blue}{\tan x}} \]
    3. exp-sum90.0%

      \[\leadsto \color{blue}{e^{\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \cdot e^{\varepsilon \cdot \tan x}} \]
  9. Applied egg-rr99.0%

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right)\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}} \]
  10. Step-by-step derivation
    1. associate-*l*99.0%

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  11. Simplified99.0%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  12. Step-by-step derivation
    1. fma-undefine99.0%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left({\sin x}^{2} \cdot {\cos x}^{-2} + 1\right)} \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    2. unpow299.0%

      \[\leadsto \varepsilon \cdot \left(\left(\color{blue}{\left(\sin x \cdot \sin x\right)} \cdot {\cos x}^{-2} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    3. sqr-pow99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\left(\sin x \cdot \sin x\right) \cdot \color{blue}{\left({\cos x}^{\left(\frac{-2}{2}\right)} \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right)} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    4. unswap-sqr99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\color{blue}{\left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right) \cdot \left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right)} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    5. metadata-eval99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\left(\sin x \cdot {\cos x}^{\color{blue}{-1}}\right) \cdot \left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    6. unpow-199.0%

      \[\leadsto \varepsilon \cdot \left(\left(\left(\sin x \cdot \color{blue}{\frac{1}{\cos x}}\right) \cdot \left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    7. div-inv99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\color{blue}{\frac{\sin x}{\cos x}} \cdot \left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    8. tan-quot99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\color{blue}{\tan x} \cdot \left(\sin x \cdot {\cos x}^{\left(\frac{-2}{2}\right)}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    9. metadata-eval99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\tan x \cdot \left(\sin x \cdot {\cos x}^{\color{blue}{-1}}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    10. unpow-199.0%

      \[\leadsto \varepsilon \cdot \left(\left(\tan x \cdot \left(\sin x \cdot \color{blue}{\frac{1}{\cos x}}\right) + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    11. div-inv99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\tan x \cdot \color{blue}{\frac{\sin x}{\cos x}} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    12. tan-quot99.0%

      \[\leadsto \varepsilon \cdot \left(\left(\tan x \cdot \color{blue}{\tan x} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
    13. pow299.0%

      \[\leadsto \varepsilon \cdot \left(\left(\color{blue}{{\tan x}^{2}} + 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
  13. Applied egg-rr99.0%

    \[\leadsto \varepsilon \cdot \left(\color{blue}{\left({\tan x}^{2} + 1\right)} \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right) \]
  14. Final simplification99.0%

    \[\leadsto \varepsilon \cdot \left({\left(e^{\varepsilon}\right)}^{\tan x} \cdot \left({\tan x}^{2} + 1\right)\right) \]
  15. Add Preprocessing

Alternative 5: 99.0% accurate, 0.5× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + \varepsilon \cdot \left(x + \varepsilon \cdot 0.3333333333333333\right)\right) + 1\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (+
   (+
    (/ (pow (sin x) 2.0) (pow (cos x) 2.0))
    (* eps (+ x (* eps 0.3333333333333333))))
   1.0)))
double code(double x, double eps) {
	return eps * (((pow(sin(x), 2.0) / pow(cos(x), 2.0)) + (eps * (x + (eps * 0.3333333333333333)))) + 1.0);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((((sin(x) ** 2.0d0) / (cos(x) ** 2.0d0)) + (eps * (x + (eps * 0.3333333333333333d0)))) + 1.0d0)
end function
public static double code(double x, double eps) {
	return eps * (((Math.pow(Math.sin(x), 2.0) / Math.pow(Math.cos(x), 2.0)) + (eps * (x + (eps * 0.3333333333333333)))) + 1.0);
}
def code(x, eps):
	return eps * (((math.pow(math.sin(x), 2.0) / math.pow(math.cos(x), 2.0)) + (eps * (x + (eps * 0.3333333333333333)))) + 1.0)
function code(x, eps)
	return Float64(eps * Float64(Float64(Float64((sin(x) ^ 2.0) / (cos(x) ^ 2.0)) + Float64(eps * Float64(x + Float64(eps * 0.3333333333333333)))) + 1.0))
end
function tmp = code(x, eps)
	tmp = eps * ((((sin(x) ^ 2.0) / (cos(x) ^ 2.0)) + (eps * (x + (eps * 0.3333333333333333)))) + 1.0);
end
code[x_, eps_] := N[(eps * N[(N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] + N[(eps * N[(x + N[(eps * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + \varepsilon \cdot \left(x + \varepsilon \cdot 0.3333333333333333\right)\right) + 1\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(-0.5 \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Simplified99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, {\sin x}^{2} \cdot \frac{1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}}, \mathsf{fma}\left(-0.5, 1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right), 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) - \frac{\sin x \cdot \left(1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right)\right) - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)} \]
  5. Taylor expanded in x around 0 98.8%

    \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \color{blue}{\left(x + 0.3333333333333333 \cdot \varepsilon\right)} - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) \]
  6. Step-by-step derivation
    1. *-commutative98.8%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(x + \color{blue}{\varepsilon \cdot 0.3333333333333333}\right) - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) \]
  7. Simplified98.8%

    \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \color{blue}{\left(x + \varepsilon \cdot 0.3333333333333333\right)} - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) \]
  8. Final simplification98.8%

    \[\leadsto \varepsilon \cdot \left(\left(\frac{{\sin x}^{2}}{{\cos x}^{2}} + \varepsilon \cdot \left(x + \varepsilon \cdot 0.3333333333333333\right)\right) + 1\right) \]
  9. Add Preprocessing

Alternative 6: 98.9% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\left(\varepsilon \cdot \left(\varepsilon \cdot 0.3333333333333333 + x \cdot \left(x \cdot \left(\varepsilon \cdot 1.3333333333333333 + x \cdot 1.3333333333333333\right) + 1\right)\right) + \frac{0.5 - \frac{\cos \left(x \cdot 2\right)}{2}}{{\cos x}^{2}}\right) + 1\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (+
   (+
    (*
     eps
     (+
      (* eps 0.3333333333333333)
      (*
       x
       (+ (* x (+ (* eps 1.3333333333333333) (* x 1.3333333333333333))) 1.0))))
    (/ (- 0.5 (/ (cos (* x 2.0)) 2.0)) (pow (cos x) 2.0)))
   1.0)))
double code(double x, double eps) {
	return eps * (((eps * ((eps * 0.3333333333333333) + (x * ((x * ((eps * 1.3333333333333333) + (x * 1.3333333333333333))) + 1.0)))) + ((0.5 - (cos((x * 2.0)) / 2.0)) / pow(cos(x), 2.0))) + 1.0);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * (((eps * ((eps * 0.3333333333333333d0) + (x * ((x * ((eps * 1.3333333333333333d0) + (x * 1.3333333333333333d0))) + 1.0d0)))) + ((0.5d0 - (cos((x * 2.0d0)) / 2.0d0)) / (cos(x) ** 2.0d0))) + 1.0d0)
end function
public static double code(double x, double eps) {
	return eps * (((eps * ((eps * 0.3333333333333333) + (x * ((x * ((eps * 1.3333333333333333) + (x * 1.3333333333333333))) + 1.0)))) + ((0.5 - (Math.cos((x * 2.0)) / 2.0)) / Math.pow(Math.cos(x), 2.0))) + 1.0);
}
def code(x, eps):
	return eps * (((eps * ((eps * 0.3333333333333333) + (x * ((x * ((eps * 1.3333333333333333) + (x * 1.3333333333333333))) + 1.0)))) + ((0.5 - (math.cos((x * 2.0)) / 2.0)) / math.pow(math.cos(x), 2.0))) + 1.0)
function code(x, eps)
	return Float64(eps * Float64(Float64(Float64(eps * Float64(Float64(eps * 0.3333333333333333) + Float64(x * Float64(Float64(x * Float64(Float64(eps * 1.3333333333333333) + Float64(x * 1.3333333333333333))) + 1.0)))) + Float64(Float64(0.5 - Float64(cos(Float64(x * 2.0)) / 2.0)) / (cos(x) ^ 2.0))) + 1.0))
end
function tmp = code(x, eps)
	tmp = eps * (((eps * ((eps * 0.3333333333333333) + (x * ((x * ((eps * 1.3333333333333333) + (x * 1.3333333333333333))) + 1.0)))) + ((0.5 - (cos((x * 2.0)) / 2.0)) / (cos(x) ^ 2.0))) + 1.0);
end
code[x_, eps_] := N[(eps * N[(N[(N[(eps * N[(N[(eps * 0.3333333333333333), $MachinePrecision] + N[(x * N[(N[(x * N[(N[(eps * 1.3333333333333333), $MachinePrecision] + N[(x * 1.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.5 - N[(N[Cos[N[(x * 2.0), $MachinePrecision]], $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\left(\varepsilon \cdot \left(\varepsilon \cdot 0.3333333333333333 + x \cdot \left(x \cdot \left(\varepsilon \cdot 1.3333333333333333 + x \cdot 1.3333333333333333\right) + 1\right)\right) + \frac{0.5 - \frac{\cos \left(x \cdot 2\right)}{2}}{{\cos x}^{2}}\right) + 1\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(1 + \varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \left(-1 \cdot \frac{{\sin x}^{2} \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}} + \left(-0.5 \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)\right) - -1 \cdot \frac{\sin x \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{\cos x}\right)\right) - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \]
  4. Simplified99.2%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(-1 \cdot \left(\varepsilon \cdot \left(0.16666666666666666 + \mathsf{fma}\left(-1, {\sin x}^{2} \cdot \frac{1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}{{\cos x}^{2}}, \mathsf{fma}\left(-0.5, 1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right), 0.16666666666666666 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) - \frac{\sin x \cdot \left(1 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)}{\cos x}\right)\right) - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right)} \]
  5. Taylor expanded in x around 0 98.7%

    \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \color{blue}{\left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right)} - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)\right) \]
  6. Step-by-step derivation
    1. unpow298.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{\sin x \cdot \sin x}}{{\cos x}^{2}}\right)\right)\right) \]
    2. sin-mult98.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{\frac{\cos \left(x - x\right) - \cos \left(x + x\right)}{2}}}{{\cos x}^{2}}\right)\right)\right) \]
  7. Applied egg-rr98.7%

    \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{\frac{\cos \left(x - x\right) - \cos \left(x + x\right)}{2}}}{{\cos x}^{2}}\right)\right)\right) \]
  8. Step-by-step derivation
    1. div-sub98.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{\frac{\cos \left(x - x\right)}{2} - \frac{\cos \left(x + x\right)}{2}}}{{\cos x}^{2}}\right)\right)\right) \]
    2. +-inverses98.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\frac{\cos \color{blue}{0}}{2} - \frac{\cos \left(x + x\right)}{2}}{{\cos x}^{2}}\right)\right)\right) \]
    3. cos-098.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\frac{\color{blue}{1}}{2} - \frac{\cos \left(x + x\right)}{2}}{{\cos x}^{2}}\right)\right)\right) \]
    4. metadata-eval98.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{0.5} - \frac{\cos \left(x + x\right)}{2}}{{\cos x}^{2}}\right)\right)\right) \]
    5. count-298.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{0.5 - \frac{\cos \color{blue}{\left(2 \cdot x\right)}}{2}}{{\cos x}^{2}}\right)\right)\right) \]
    6. *-commutative98.7%

      \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{0.5 - \frac{\cos \color{blue}{\left(x \cdot 2\right)}}{2}}{{\cos x}^{2}}\right)\right)\right) \]
  9. Simplified98.7%

    \[\leadsto \varepsilon \cdot \left(1 + \left(\varepsilon \cdot \left(0.3333333333333333 \cdot \varepsilon + x \cdot \left(1 + x \cdot \left(1.3333333333333333 \cdot \varepsilon + 1.3333333333333333 \cdot x\right)\right)\right) - \left(-\frac{\color{blue}{0.5 - \frac{\cos \left(x \cdot 2\right)}{2}}}{{\cos x}^{2}}\right)\right)\right) \]
  10. Final simplification98.7%

    \[\leadsto \varepsilon \cdot \left(\left(\varepsilon \cdot \left(\varepsilon \cdot 0.3333333333333333 + x \cdot \left(x \cdot \left(\varepsilon \cdot 1.3333333333333333 + x \cdot 1.3333333333333333\right) + 1\right)\right) + \frac{0.5 - \frac{\cos \left(x \cdot 2\right)}{2}}{{\cos x}^{2}}\right) + 1\right) \]
  11. Add Preprocessing

Alternative 7: 98.6% accurate, 0.6× speedup?

\[\begin{array}{l} \\ \frac{1}{\frac{\frac{1}{\varepsilon}}{\frac{{\sin x}^{2}}{\frac{\cos \left(x \cdot 2\right) + 1}{2}} + 1}} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (/
  1.0
  (/
   (/ 1.0 eps)
   (+ (/ (pow (sin x) 2.0) (/ (+ (cos (* x 2.0)) 1.0) 2.0)) 1.0))))
double code(double x, double eps) {
	return 1.0 / ((1.0 / eps) / ((pow(sin(x), 2.0) / ((cos((x * 2.0)) + 1.0) / 2.0)) + 1.0));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = 1.0d0 / ((1.0d0 / eps) / (((sin(x) ** 2.0d0) / ((cos((x * 2.0d0)) + 1.0d0) / 2.0d0)) + 1.0d0))
end function
public static double code(double x, double eps) {
	return 1.0 / ((1.0 / eps) / ((Math.pow(Math.sin(x), 2.0) / ((Math.cos((x * 2.0)) + 1.0) / 2.0)) + 1.0));
}
def code(x, eps):
	return 1.0 / ((1.0 / eps) / ((math.pow(math.sin(x), 2.0) / ((math.cos((x * 2.0)) + 1.0) / 2.0)) + 1.0))
function code(x, eps)
	return Float64(1.0 / Float64(Float64(1.0 / eps) / Float64(Float64((sin(x) ^ 2.0) / Float64(Float64(cos(Float64(x * 2.0)) + 1.0) / 2.0)) + 1.0)))
end
function tmp = code(x, eps)
	tmp = 1.0 / ((1.0 / eps) / (((sin(x) ^ 2.0) / ((cos((x * 2.0)) + 1.0) / 2.0)) + 1.0));
end
code[x_, eps_] := N[(1.0 / N[(N[(1.0 / eps), $MachinePrecision] / N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / N[(N[(N[Cos[N[(x * 2.0), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{1}{\frac{\frac{1}{\varepsilon}}{\frac{{\sin x}^{2}}{\frac{\cos \left(x \cdot 2\right) + 1}{2}} + 1}}
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--35.1%

      \[\leadsto \color{blue}{\frac{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}{\tan \left(x + \varepsilon\right) + \tan x}} \]
    2. clear-num35.1%

      \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}}} \]
    3. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\color{blue}{{\tan \left(x + \varepsilon\right)}^{2}} - \tan x \cdot \tan x}} \]
    4. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - \color{blue}{{\tan x}^{2}}}} \]
  4. Applied egg-rr35.1%

    \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - {\tan x}^{2}}}} \]
  5. Taylor expanded in eps around 0 98.3%

    \[\leadsto \frac{1}{\color{blue}{\frac{1}{\varepsilon \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
  6. Step-by-step derivation
    1. associate-/r*98.2%

      \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
    2. sub-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{\color{blue}{1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
    3. mul-1-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right)}} \]
    4. remove-double-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  7. Simplified98.2%

    \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  8. Step-by-step derivation
    1. unpow298.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\color{blue}{\cos x \cdot \cos x}}}} \]
    2. cos-mult98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\color{blue}{\frac{\cos \left(x + x\right) + \cos \left(x - x\right)}{2}}}}} \]
  9. Applied egg-rr98.2%

    \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\color{blue}{\frac{\cos \left(x + x\right) + \cos \left(x - x\right)}{2}}}}} \]
  10. Step-by-step derivation
    1. +-commutative98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\frac{\color{blue}{\cos \left(x - x\right) + \cos \left(x + x\right)}}{2}}}} \]
    2. +-inverses98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\frac{\cos \color{blue}{0} + \cos \left(x + x\right)}{2}}}} \]
    3. cos-098.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\frac{\color{blue}{1} + \cos \left(x + x\right)}{2}}}} \]
    4. count-298.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\frac{1 + \cos \color{blue}{\left(2 \cdot x\right)}}{2}}}} \]
    5. *-commutative98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\frac{1 + \cos \color{blue}{\left(x \cdot 2\right)}}{2}}}} \]
  11. Simplified98.2%

    \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{\color{blue}{\frac{1 + \cos \left(x \cdot 2\right)}{2}}}}} \]
  12. Final simplification98.2%

    \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{\frac{{\sin x}^{2}}{\frac{\cos \left(x \cdot 2\right) + 1}{2}} + 1}} \]
  13. Add Preprocessing

Alternative 8: 98.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \varepsilon + {x}^{2} \cdot \left(\varepsilon + {x}^{2} \cdot \left(\varepsilon \cdot 0.6666666666666666\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (+ eps (* (pow x 2.0) (+ eps (* (pow x 2.0) (* eps 0.6666666666666666))))))
double code(double x, double eps) {
	return eps + (pow(x, 2.0) * (eps + (pow(x, 2.0) * (eps * 0.6666666666666666))));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps + ((x ** 2.0d0) * (eps + ((x ** 2.0d0) * (eps * 0.6666666666666666d0))))
end function
public static double code(double x, double eps) {
	return eps + (Math.pow(x, 2.0) * (eps + (Math.pow(x, 2.0) * (eps * 0.6666666666666666))));
}
def code(x, eps):
	return eps + (math.pow(x, 2.0) * (eps + (math.pow(x, 2.0) * (eps * 0.6666666666666666))))
function code(x, eps)
	return Float64(eps + Float64((x ^ 2.0) * Float64(eps + Float64((x ^ 2.0) * Float64(eps * 0.6666666666666666)))))
end
function tmp = code(x, eps)
	tmp = eps + ((x ^ 2.0) * (eps + ((x ^ 2.0) * (eps * 0.6666666666666666))));
end
code[x_, eps_] := N[(eps + N[(N[Power[x, 2.0], $MachinePrecision] * N[(eps + N[(N[Power[x, 2.0], $MachinePrecision] * N[(eps * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon + {x}^{2} \cdot \left(\varepsilon + {x}^{2} \cdot \left(\varepsilon \cdot 0.6666666666666666\right)\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--35.1%

      \[\leadsto \color{blue}{\frac{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}{\tan \left(x + \varepsilon\right) + \tan x}} \]
    2. clear-num35.1%

      \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}}} \]
    3. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\color{blue}{{\tan \left(x + \varepsilon\right)}^{2}} - \tan x \cdot \tan x}} \]
    4. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - \color{blue}{{\tan x}^{2}}}} \]
  4. Applied egg-rr35.1%

    \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - {\tan x}^{2}}}} \]
  5. Taylor expanded in eps around 0 98.3%

    \[\leadsto \frac{1}{\color{blue}{\frac{1}{\varepsilon \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
  6. Step-by-step derivation
    1. associate-/r*98.2%

      \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
    2. sub-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{\color{blue}{1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
    3. mul-1-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right)}} \]
    4. remove-double-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  7. Simplified98.2%

    \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  8. Taylor expanded in x around 0 97.9%

    \[\leadsto \color{blue}{\varepsilon + {x}^{2} \cdot \left(\varepsilon + 0.6666666666666666 \cdot \left(\varepsilon \cdot {x}^{2}\right)\right)} \]
  9. Step-by-step derivation
    1. associate-*r*97.9%

      \[\leadsto \varepsilon + {x}^{2} \cdot \left(\varepsilon + \color{blue}{\left(0.6666666666666666 \cdot \varepsilon\right) \cdot {x}^{2}}\right) \]
    2. *-commutative97.9%

      \[\leadsto \varepsilon + {x}^{2} \cdot \left(\varepsilon + \color{blue}{\left(\varepsilon \cdot 0.6666666666666666\right)} \cdot {x}^{2}\right) \]
  10. Simplified97.9%

    \[\leadsto \color{blue}{\varepsilon + {x}^{2} \cdot \left(\varepsilon + \left(\varepsilon \cdot 0.6666666666666666\right) \cdot {x}^{2}\right)} \]
  11. Final simplification97.9%

    \[\leadsto \varepsilon + {x}^{2} \cdot \left(\varepsilon + {x}^{2} \cdot \left(\varepsilon \cdot 0.6666666666666666\right)\right) \]
  12. Add Preprocessing

Alternative 9: 98.2% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (+ eps (* x (* eps (+ eps (* x (+ (* 0.5 (pow eps 2.0)) 1.0)))))))
double code(double x, double eps) {
	return eps + (x * (eps * (eps + (x * ((0.5 * pow(eps, 2.0)) + 1.0)))));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps + (x * (eps * (eps + (x * ((0.5d0 * (eps ** 2.0d0)) + 1.0d0)))))
end function
public static double code(double x, double eps) {
	return eps + (x * (eps * (eps + (x * ((0.5 * Math.pow(eps, 2.0)) + 1.0)))));
}
def code(x, eps):
	return eps + (x * (eps * (eps + (x * ((0.5 * math.pow(eps, 2.0)) + 1.0)))))
function code(x, eps)
	return Float64(eps + Float64(x * Float64(eps * Float64(eps + Float64(x * Float64(Float64(0.5 * (eps ^ 2.0)) + 1.0))))))
end
function tmp = code(x, eps)
	tmp = eps + (x * (eps * (eps + (x * ((0.5 * (eps ^ 2.0)) + 1.0)))));
end
code[x_, eps_] := N[(eps + N[(x * N[(eps * N[(eps + N[(x * N[(N[(0.5 * N[Power[eps, 2.0], $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right)\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-exp-log60.9%

      \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  4. Applied egg-rr60.9%

    \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  5. Taylor expanded in eps around 0 90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\log \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)}} \]
  6. Step-by-step derivation
    1. sub-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\log \color{blue}{\left(1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    2. log1p-define90.0%

      \[\leadsto e^{\log \varepsilon + \left(\color{blue}{\mathsf{log1p}\left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    3. mul-1-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    4. remove-double-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    5. associate-/l*90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon \cdot \frac{\sin x}{\cos x}}\right)} \]
  7. Simplified90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot \frac{\sin x}{\cos x}\right)}} \]
  8. Taylor expanded in x around 0 97.9%

    \[\leadsto \color{blue}{\varepsilon + x \cdot \left(\varepsilon \cdot \left(x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right) + {\varepsilon}^{2}\right)} \]
  9. Step-by-step derivation
    1. +-commutative97.9%

      \[\leadsto \varepsilon + x \cdot \color{blue}{\left({\varepsilon}^{2} + \varepsilon \cdot \left(x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right)\right)} \]
    2. unpow297.9%

      \[\leadsto \varepsilon + x \cdot \left(\color{blue}{\varepsilon \cdot \varepsilon} + \varepsilon \cdot \left(x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right)\right) \]
    3. distribute-lft-out97.9%

      \[\leadsto \varepsilon + x \cdot \color{blue}{\left(\varepsilon \cdot \left(\varepsilon + x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right)\right)} \]
  10. Simplified97.9%

    \[\leadsto \color{blue}{\varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon + x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right)\right)} \]
  11. Final simplification97.9%

    \[\leadsto \varepsilon + x \cdot \left(\varepsilon \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right)\right) \]
  12. Add Preprocessing

Alternative 10: 98.2% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(x \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right) + 1\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (+ (* x (+ eps (* x (+ (* 0.5 (pow eps 2.0)) 1.0)))) 1.0)))
double code(double x, double eps) {
	return eps * ((x * (eps + (x * ((0.5 * pow(eps, 2.0)) + 1.0)))) + 1.0);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((x * (eps + (x * ((0.5d0 * (eps ** 2.0d0)) + 1.0d0)))) + 1.0d0)
end function
public static double code(double x, double eps) {
	return eps * ((x * (eps + (x * ((0.5 * Math.pow(eps, 2.0)) + 1.0)))) + 1.0);
}
def code(x, eps):
	return eps * ((x * (eps + (x * ((0.5 * math.pow(eps, 2.0)) + 1.0)))) + 1.0)
function code(x, eps)
	return Float64(eps * Float64(Float64(x * Float64(eps + Float64(x * Float64(Float64(0.5 * (eps ^ 2.0)) + 1.0)))) + 1.0))
end
function tmp = code(x, eps)
	tmp = eps * ((x * (eps + (x * ((0.5 * (eps ^ 2.0)) + 1.0)))) + 1.0);
end
code[x_, eps_] := N[(eps * N[(N[(x * N[(eps + N[(x * N[(N[(0.5 * N[Power[eps, 2.0], $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(x \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right) + 1\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-exp-log60.9%

      \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  4. Applied egg-rr60.9%

    \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  5. Taylor expanded in eps around 0 90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\log \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)}} \]
  6. Step-by-step derivation
    1. sub-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\log \color{blue}{\left(1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    2. log1p-define90.0%

      \[\leadsto e^{\log \varepsilon + \left(\color{blue}{\mathsf{log1p}\left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    3. mul-1-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    4. remove-double-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    5. associate-/l*90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon \cdot \frac{\sin x}{\cos x}}\right)} \]
  7. Simplified90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot \frac{\sin x}{\cos x}\right)}} \]
  8. Step-by-step derivation
    1. associate-+r+90.0%

      \[\leadsto e^{\color{blue}{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \frac{\sin x}{\cos x}}} \]
    2. tan-quot90.0%

      \[\leadsto e^{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \color{blue}{\tan x}} \]
    3. exp-sum90.0%

      \[\leadsto \color{blue}{e^{\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \cdot e^{\varepsilon \cdot \tan x}} \]
  9. Applied egg-rr99.0%

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right)\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}} \]
  10. Step-by-step derivation
    1. associate-*l*99.0%

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  11. Simplified99.0%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  12. Taylor expanded in x around 0 97.9%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + x \cdot \left(\varepsilon + x \cdot \left(1 + 0.5 \cdot {\varepsilon}^{2}\right)\right)\right)} \]
  13. Final simplification97.9%

    \[\leadsto \varepsilon \cdot \left(x \cdot \left(\varepsilon + x \cdot \left(0.5 \cdot {\varepsilon}^{2} + 1\right)\right) + 1\right) \]
  14. Add Preprocessing

Alternative 11: 98.1% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \varepsilon + \varepsilon \cdot {x}^{2} \end{array} \]
(FPCore (x eps) :precision binary64 (+ eps (* eps (pow x 2.0))))
double code(double x, double eps) {
	return eps + (eps * pow(x, 2.0));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps + (eps * (x ** 2.0d0))
end function
public static double code(double x, double eps) {
	return eps + (eps * Math.pow(x, 2.0));
}
def code(x, eps):
	return eps + (eps * math.pow(x, 2.0))
function code(x, eps)
	return Float64(eps + Float64(eps * (x ^ 2.0)))
end
function tmp = code(x, eps)
	tmp = eps + (eps * (x ^ 2.0));
end
code[x_, eps_] := N[(eps + N[(eps * N[Power[x, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon + \varepsilon \cdot {x}^{2}
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. flip--35.1%

      \[\leadsto \color{blue}{\frac{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}{\tan \left(x + \varepsilon\right) + \tan x}} \]
    2. clear-num35.1%

      \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\tan \left(x + \varepsilon\right) \cdot \tan \left(x + \varepsilon\right) - \tan x \cdot \tan x}}} \]
    3. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{\color{blue}{{\tan \left(x + \varepsilon\right)}^{2}} - \tan x \cdot \tan x}} \]
    4. pow235.1%

      \[\leadsto \frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - \color{blue}{{\tan x}^{2}}}} \]
  4. Applied egg-rr35.1%

    \[\leadsto \color{blue}{\frac{1}{\frac{\tan \left(x + \varepsilon\right) + \tan x}{{\tan \left(x + \varepsilon\right)}^{2} - {\tan x}^{2}}}} \]
  5. Taylor expanded in eps around 0 98.3%

    \[\leadsto \frac{1}{\color{blue}{\frac{1}{\varepsilon \cdot \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
  6. Step-by-step derivation
    1. associate-/r*98.2%

      \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
    2. sub-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{\color{blue}{1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}}} \]
    3. mul-1-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right)}} \]
    4. remove-double-neg98.2%

      \[\leadsto \frac{1}{\frac{\frac{1}{\varepsilon}}{1 + \color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  7. Simplified98.2%

    \[\leadsto \frac{1}{\color{blue}{\frac{\frac{1}{\varepsilon}}{1 + \frac{{\sin x}^{2}}{{\cos x}^{2}}}}} \]
  8. Taylor expanded in x around 0 97.8%

    \[\leadsto \color{blue}{\varepsilon + \varepsilon \cdot {x}^{2}} \]
  9. Add Preprocessing

Alternative 12: 97.7% accurate, 29.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot x + 1\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (+ (* eps x) 1.0)))
double code(double x, double eps) {
	return eps * ((eps * x) + 1.0);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * x) + 1.0d0)
end function
public static double code(double x, double eps) {
	return eps * ((eps * x) + 1.0);
}
def code(x, eps):
	return eps * ((eps * x) + 1.0)
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * x) + 1.0))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * x) + 1.0);
end
code[x_, eps_] := N[(eps * N[(N[(eps * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot x + 1\right)
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. add-exp-log60.9%

      \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  4. Applied egg-rr60.9%

    \[\leadsto \color{blue}{e^{\log \left(\tan \left(x + \varepsilon\right) - \tan x\right)}} \]
  5. Taylor expanded in eps around 0 90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\log \left(1 - -1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)}} \]
  6. Step-by-step derivation
    1. sub-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\log \color{blue}{\left(1 + \left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    2. log1p-define90.0%

      \[\leadsto e^{\log \varepsilon + \left(\color{blue}{\mathsf{log1p}\left(--1 \cdot \frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    3. mul-1-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(-\color{blue}{\left(-\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    4. remove-double-neg90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\color{blue}{\frac{{\sin x}^{2}}{{\cos x}^{2}}}\right) + \frac{\varepsilon \cdot \sin x}{\cos x}\right)} \]
    5. associate-/l*90.0%

      \[\leadsto e^{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \color{blue}{\varepsilon \cdot \frac{\sin x}{\cos x}}\right)} \]
  7. Simplified90.0%

    \[\leadsto e^{\color{blue}{\log \varepsilon + \left(\mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right) + \varepsilon \cdot \frac{\sin x}{\cos x}\right)}} \]
  8. Step-by-step derivation
    1. associate-+r+90.0%

      \[\leadsto e^{\color{blue}{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \frac{\sin x}{\cos x}}} \]
    2. tan-quot90.0%

      \[\leadsto e^{\left(\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)\right) + \varepsilon \cdot \color{blue}{\tan x}} \]
    3. exp-sum90.0%

      \[\leadsto \color{blue}{e^{\log \varepsilon + \mathsf{log1p}\left(\frac{{\sin x}^{2}}{{\cos x}^{2}}\right)} \cdot e^{\varepsilon \cdot \tan x}} \]
  9. Applied egg-rr99.0%

    \[\leadsto \color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right)\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}} \]
  10. Step-by-step derivation
    1. associate-*l*99.0%

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  11. Simplified99.0%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\mathsf{fma}\left({\sin x}^{2}, {\cos x}^{-2}, 1\right) \cdot {\left(e^{\varepsilon}\right)}^{\tan x}\right)} \]
  12. Taylor expanded in x around 0 97.6%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + \varepsilon \cdot x\right)} \]
  13. Final simplification97.6%

    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot x + 1\right) \]
  14. Add Preprocessing

Alternative 13: 97.7% accurate, 205.0× speedup?

\[\begin{array}{l} \\ \varepsilon \end{array} \]
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
	return eps;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps
end function
public static double code(double x, double eps) {
	return eps;
}
def code(x, eps):
	return eps
function code(x, eps)
	return eps
end
function tmp = code(x, eps)
	tmp = eps;
end
code[x_, eps_] := eps
\begin{array}{l}

\\
\varepsilon
\end{array}
Derivation
  1. Initial program 62.2%

    \[\tan \left(x + \varepsilon\right) - \tan x \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0 8.2%

    \[\leadsto \tan \color{blue}{\varepsilon} - \tan x \]
  4. Taylor expanded in eps around 0 8.2%

    \[\leadsto \color{blue}{\varepsilon} - \tan x \]
  5. Taylor expanded in eps around inf 97.6%

    \[\leadsto \color{blue}{\varepsilon} \]
  6. Add Preprocessing

Developer Target 1: 99.9% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)} \end{array} \]
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
double code(double x, double eps) {
	return sin(eps) / (cos(x) * cos((x + eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin(eps) / (cos(x) * cos((x + eps)))
end function
public static double code(double x, double eps) {
	return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
}
def code(x, eps):
	return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
function code(x, eps)
	return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps))))
end
function tmp = code(x, eps)
	tmp = sin(eps) / (cos(x) * cos((x + eps)));
end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}

Reproduce

?
herbie shell --seed 2024163 
(FPCore (x eps)
  :name "2tan (problem 3.3.2)"
  :precision binary64
  :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))

  :alt
  (! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))

  (- (tan (+ x eps)) (tan x)))