2cos (problem 3.3.5)

Percentage Accurate: 53.1% → 99.8%
Time: 15.1s
Alternatives: 12
Speedup: 51.3×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \cos \left(x + \varepsilon\right) - \cos x \end{array} \]
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
	return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
	return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps):
	return math.cos((x + eps)) - math.cos(x)
function code(x, eps)
	return Float64(cos(Float64(x + eps)) - cos(x))
end
function tmp = code(x, eps)
	tmp = cos((x + eps)) - cos(x);
end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 12 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.1% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \cos \left(x + \varepsilon\right) - \cos x \end{array} \]
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
	return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
	return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps):
	return math.cos((x + eps)) - math.cos(x)
function code(x, eps)
	return Float64(cos(Float64(x + eps)) - cos(x))
end
function tmp = code(x, eps)
	tmp = cos((x + eps)) - cos(x);
end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}

Alternative 1: 99.8% accurate, 0.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := x \cdot \left(0.5 \cdot \frac{\varepsilon}{x}\right)\\ \left(\sin x \cdot \cos t\_0 + \cos x \cdot \sin t\_0\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (* x (* 0.5 (/ eps x)))))
   (*
    (+ (* (sin x) (cos t_0)) (* (cos x) (sin t_0)))
    (* -2.0 (sin (* 0.5 eps))))))
double code(double x, double eps) {
	double t_0 = x * (0.5 * (eps / x));
	return ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * (-2.0 * sin((0.5 * eps)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    real(8) :: t_0
    t_0 = x * (0.5d0 * (eps / x))
    code = ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * ((-2.0d0) * sin((0.5d0 * eps)))
end function
public static double code(double x, double eps) {
	double t_0 = x * (0.5 * (eps / x));
	return ((Math.sin(x) * Math.cos(t_0)) + (Math.cos(x) * Math.sin(t_0))) * (-2.0 * Math.sin((0.5 * eps)));
}
def code(x, eps):
	t_0 = x * (0.5 * (eps / x))
	return ((math.sin(x) * math.cos(t_0)) + (math.cos(x) * math.sin(t_0))) * (-2.0 * math.sin((0.5 * eps)))
function code(x, eps)
	t_0 = Float64(x * Float64(0.5 * Float64(eps / x)))
	return Float64(Float64(Float64(sin(x) * cos(t_0)) + Float64(cos(x) * sin(t_0))) * Float64(-2.0 * sin(Float64(0.5 * eps))))
end
function tmp = code(x, eps)
	t_0 = x * (0.5 * (eps / x));
	tmp = ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * (-2.0 * sin((0.5 * eps)));
end
code[x_, eps_] := Block[{t$95$0 = N[(x * N[(0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Sin[x], $MachinePrecision] * N[Cos[t$95$0], $MachinePrecision]), $MachinePrecision] + N[(N[Cos[x], $MachinePrecision] * N[Sin[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-2.0 * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := x \cdot \left(0.5 \cdot \frac{\varepsilon}{x}\right)\\
\left(\sin x \cdot \cos t\_0 + \cos x \cdot \sin t\_0\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right)
\end{array}
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    3. associate--l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    4. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    5. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \]
    6. +-commutative82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \]
    7. associate-+l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \]
    8. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right)} \]
  5. Step-by-step derivation
    1. associate-*r*82.2%

      \[\leadsto \color{blue}{\left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right)} \]
    3. *-commutative82.2%

      \[\leadsto \sin \color{blue}{\left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)} \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    4. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\left(\left(x + x\right) + \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    5. count-282.2%

      \[\leadsto \sin \left(0.5 \cdot \left(\color{blue}{2 \cdot x} + \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    6. fma-define82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\mathsf{fma}\left(2, x, \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    7. *-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \left(x + \left(\varepsilon - x\right)\right)\right)}\right) \]
    8. associate-+r-82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\left(x + \varepsilon\right) - x\right)}\right)\right) \]
    9. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\color{blue}{\left(\varepsilon + x\right)} - x\right)\right)\right) \]
    10. associate--l+99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\varepsilon + \left(x - x\right)\right)}\right)\right) \]
    11. +-inverses99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\varepsilon + \color{blue}{0}\right)\right)\right) \]
    12. distribute-lft-in99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \varepsilon + 0.5 \cdot 0\right)}\right) \]
    13. metadata-eval99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + \color{blue}{0}\right)\right) \]
  6. Simplified99.8%

    \[\leadsto \color{blue}{\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right)} \]
  7. Taylor expanded in x around inf 99.6%

    \[\leadsto \sin \color{blue}{\left(x \cdot \left(1 + 0.5 \cdot \frac{\varepsilon}{x}\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  8. Step-by-step derivation
    1. associate-*r/99.6%

      \[\leadsto \sin \left(x \cdot \left(1 + \color{blue}{\frac{0.5 \cdot \varepsilon}{x}}\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  9. Simplified99.6%

    \[\leadsto \sin \color{blue}{\left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  10. Step-by-step derivation
    1. distribute-rgt-in99.8%

      \[\leadsto \sin \color{blue}{\left(1 \cdot x + \frac{0.5 \cdot \varepsilon}{x} \cdot x\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
    2. *-un-lft-identity99.8%

      \[\leadsto \sin \left(\color{blue}{x} + \frac{0.5 \cdot \varepsilon}{x} \cdot x\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
    3. sin-sum99.8%

      \[\leadsto \color{blue}{\left(\sin x \cdot \cos \left(\frac{0.5 \cdot \varepsilon}{x} \cdot x\right) + \cos x \cdot \sin \left(\frac{0.5 \cdot \varepsilon}{x} \cdot x\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
    4. associate-/l*99.8%

      \[\leadsto \left(\sin x \cdot \cos \left(\color{blue}{\left(0.5 \cdot \frac{\varepsilon}{x}\right)} \cdot x\right) + \cos x \cdot \sin \left(\frac{0.5 \cdot \varepsilon}{x} \cdot x\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
    5. associate-/l*99.8%

      \[\leadsto \left(\sin x \cdot \cos \left(\left(0.5 \cdot \frac{\varepsilon}{x}\right) \cdot x\right) + \cos x \cdot \sin \left(\color{blue}{\left(0.5 \cdot \frac{\varepsilon}{x}\right)} \cdot x\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  11. Applied egg-rr99.8%

    \[\leadsto \color{blue}{\left(\sin x \cdot \cos \left(\left(0.5 \cdot \frac{\varepsilon}{x}\right) \cdot x\right) + \cos x \cdot \sin \left(\left(0.5 \cdot \frac{\varepsilon}{x}\right) \cdot x\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  12. Final simplification99.8%

    \[\leadsto \left(\sin x \cdot \cos \left(x \cdot \left(0.5 \cdot \frac{\varepsilon}{x}\right)\right) + \cos x \cdot \sin \left(x \cdot \left(0.5 \cdot \frac{\varepsilon}{x}\right)\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \]
  13. Add Preprocessing

Alternative 2: 99.7% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* (* -2.0 (sin (* 0.5 eps))) (sin (* 0.5 (fma 2.0 x eps)))))
double code(double x, double eps) {
	return (-2.0 * sin((0.5 * eps))) * sin((0.5 * fma(2.0, x, eps)));
}
function code(x, eps)
	return Float64(Float64(-2.0 * sin(Float64(0.5 * eps))) * sin(Float64(0.5 * fma(2.0, x, eps))))
end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    3. associate--l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    4. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    5. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \]
    6. +-commutative82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \]
    7. associate-+l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \]
    8. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right)} \]
  5. Step-by-step derivation
    1. associate-*r*82.2%

      \[\leadsto \color{blue}{\left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right)} \]
    3. *-commutative82.2%

      \[\leadsto \sin \color{blue}{\left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)} \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    4. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\left(\left(x + x\right) + \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    5. count-282.2%

      \[\leadsto \sin \left(0.5 \cdot \left(\color{blue}{2 \cdot x} + \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    6. fma-define82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\mathsf{fma}\left(2, x, \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    7. *-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \left(x + \left(\varepsilon - x\right)\right)\right)}\right) \]
    8. associate-+r-82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\left(x + \varepsilon\right) - x\right)}\right)\right) \]
    9. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\color{blue}{\left(\varepsilon + x\right)} - x\right)\right)\right) \]
    10. associate--l+99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\varepsilon + \left(x - x\right)\right)}\right)\right) \]
    11. +-inverses99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\varepsilon + \color{blue}{0}\right)\right)\right) \]
    12. distribute-lft-in99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \varepsilon + 0.5 \cdot 0\right)}\right) \]
    13. metadata-eval99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + \color{blue}{0}\right)\right) \]
  6. Simplified99.8%

    \[\leadsto \color{blue}{\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right)} \]
  7. Final simplification99.8%

    \[\leadsto \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \]
  8. Add Preprocessing

Alternative 3: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ -2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* -2.0 (* (sin (* 0.5 eps)) (sin (* 0.5 (+ eps (+ x x)))))))
double code(double x, double eps) {
	return -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps + (x + x)))));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (-2.0d0) * (sin((0.5d0 * eps)) * sin((0.5d0 * (eps + (x + x)))))
end function
public static double code(double x, double eps) {
	return -2.0 * (Math.sin((0.5 * eps)) * Math.sin((0.5 * (eps + (x + x)))));
}
def code(x, eps):
	return -2.0 * (math.sin((0.5 * eps)) * math.sin((0.5 * (eps + (x + x)))))
function code(x, eps)
	return Float64(-2.0 * Float64(sin(Float64(0.5 * eps)) * sin(Float64(0.5 * Float64(eps + Float64(x + x))))))
end
function tmp = code(x, eps)
	tmp = -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps + (x + x)))));
end
code[x_, eps_] := N[(-2.0 * N[(N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * N[(eps + N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2} \]
    3. div-inv82.2%

      \[\leadsto \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    4. associate--l+82.2%

      \[\leadsto \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    5. metadata-eval82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    6. div-inv82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \cdot -2 \]
    7. +-commutative82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \cdot -2 \]
    8. associate-+l+82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \cdot -2 \]
    9. metadata-eval82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \cdot -2 \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{\left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right) \cdot -2} \]
  5. Taylor expanded in x around 0 99.7%

    \[\leadsto \left(\sin \left(\color{blue}{\varepsilon} \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right) \cdot -2 \]
  6. Final simplification99.7%

    \[\leadsto -2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right) \]
  7. Add Preprocessing

Alternative 4: 99.2% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(-\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (- (sin (* 0.5 (fma 2.0 x eps))))))
double code(double x, double eps) {
	return eps * -sin((0.5 * fma(2.0, x, eps)));
}
function code(x, eps)
	return Float64(eps * Float64(-sin(Float64(0.5 * fma(2.0, x, eps)))))
end
code[x_, eps_] := N[(eps * (-N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(-\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    3. associate--l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    4. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    5. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \]
    6. +-commutative82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \]
    7. associate-+l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \]
    8. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right)} \]
  5. Step-by-step derivation
    1. associate-*r*82.2%

      \[\leadsto \color{blue}{\left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right)} \]
    3. *-commutative82.2%

      \[\leadsto \sin \color{blue}{\left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)} \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    4. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\left(\left(x + x\right) + \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    5. count-282.2%

      \[\leadsto \sin \left(0.5 \cdot \left(\color{blue}{2 \cdot x} + \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    6. fma-define82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\mathsf{fma}\left(2, x, \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    7. *-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \left(x + \left(\varepsilon - x\right)\right)\right)}\right) \]
    8. associate-+r-82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\left(x + \varepsilon\right) - x\right)}\right)\right) \]
    9. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\color{blue}{\left(\varepsilon + x\right)} - x\right)\right)\right) \]
    10. associate--l+99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\varepsilon + \left(x - x\right)\right)}\right)\right) \]
    11. +-inverses99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\varepsilon + \color{blue}{0}\right)\right)\right) \]
    12. distribute-lft-in99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \varepsilon + 0.5 \cdot 0\right)}\right) \]
    13. metadata-eval99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + \color{blue}{0}\right)\right) \]
  6. Simplified99.8%

    \[\leadsto \color{blue}{\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right)} \]
  7. Taylor expanded in eps around 0 99.4%

    \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)} \]
  8. Step-by-step derivation
    1. mul-1-neg99.4%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \color{blue}{\left(-\varepsilon\right)} \]
  9. Simplified99.4%

    \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \color{blue}{\left(-\varepsilon\right)} \]
  10. Final simplification99.4%

    \[\leadsto \varepsilon \cdot \left(-\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \]
  11. Add Preprocessing

Alternative 5: 99.1% accurate, 1.8× speedup?

\[\begin{array}{l} \\ -2 \cdot \left(\left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* -2.0 (* (* 0.5 eps) (sin (* 0.5 (+ eps (+ x x)))))))
double code(double x, double eps) {
	return -2.0 * ((0.5 * eps) * sin((0.5 * (eps + (x + x)))));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (-2.0d0) * ((0.5d0 * eps) * sin((0.5d0 * (eps + (x + x)))))
end function
public static double code(double x, double eps) {
	return -2.0 * ((0.5 * eps) * Math.sin((0.5 * (eps + (x + x)))));
}
def code(x, eps):
	return -2.0 * ((0.5 * eps) * math.sin((0.5 * (eps + (x + x)))))
function code(x, eps)
	return Float64(-2.0 * Float64(Float64(0.5 * eps) * sin(Float64(0.5 * Float64(eps + Float64(x + x))))))
end
function tmp = code(x, eps)
	tmp = -2.0 * ((0.5 * eps) * sin((0.5 * (eps + (x + x)))));
end
code[x_, eps_] := N[(-2.0 * N[(N[(0.5 * eps), $MachinePrecision] * N[Sin[N[(0.5 * N[(eps + N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
-2 \cdot \left(\left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2} \]
    3. div-inv82.2%

      \[\leadsto \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    4. associate--l+82.2%

      \[\leadsto \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    5. metadata-eval82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot -2 \]
    6. div-inv82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \cdot -2 \]
    7. +-commutative82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \cdot -2 \]
    8. associate-+l+82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \cdot -2 \]
    9. metadata-eval82.2%

      \[\leadsto \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \cdot -2 \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{\left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right) \cdot -2} \]
  5. Taylor expanded in x around 0 99.7%

    \[\leadsto \left(\sin \left(\color{blue}{\varepsilon} \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right) \cdot -2 \]
  6. Taylor expanded in eps around 0 99.4%

    \[\leadsto \left(\color{blue}{\left(0.5 \cdot \varepsilon\right)} \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right) \cdot -2 \]
  7. Final simplification99.4%

    \[\leadsto -2 \cdot \left(\left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right) \]
  8. Add Preprocessing

Alternative 6: 99.1% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \left(-\varepsilon\right) \cdot \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* (- eps) (sin (* x (+ 1.0 (/ (* 0.5 eps) x))))))
double code(double x, double eps) {
	return -eps * sin((x * (1.0 + ((0.5 * eps) / x))));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = -eps * sin((x * (1.0d0 + ((0.5d0 * eps) / x))))
end function
public static double code(double x, double eps) {
	return -eps * Math.sin((x * (1.0 + ((0.5 * eps) / x))));
}
def code(x, eps):
	return -eps * math.sin((x * (1.0 + ((0.5 * eps) / x))))
function code(x, eps)
	return Float64(Float64(-eps) * sin(Float64(x * Float64(1.0 + Float64(Float64(0.5 * eps) / x)))))
end
function tmp = code(x, eps)
	tmp = -eps * sin((x * (1.0 + ((0.5 * eps) / x))));
end
code[x_, eps_] := N[((-eps) * N[Sin[N[(x * N[(1.0 + N[(N[(0.5 * eps), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-\varepsilon\right) \cdot \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. diff-cos82.2%

      \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    2. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \color{blue}{\left(\left(\left(x + \varepsilon\right) - x\right) \cdot \frac{1}{2}\right)} \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    3. associate--l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\color{blue}{\left(x + \left(\varepsilon - x\right)\right)} \cdot \frac{1}{2}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    4. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot \color{blue}{0.5}\right) \cdot \sin \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \]
    5. div-inv82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \color{blue}{\left(\left(\left(x + \varepsilon\right) + x\right) \cdot \frac{1}{2}\right)}\right) \]
    6. +-commutative82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\color{blue}{\left(\varepsilon + x\right)} + x\right) \cdot \frac{1}{2}\right)\right) \]
    7. associate-+l+82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\color{blue}{\left(\varepsilon + \left(x + x\right)\right)} \cdot \frac{1}{2}\right)\right) \]
    8. metadata-eval82.2%

      \[\leadsto -2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot \color{blue}{0.5}\right)\right) \]
  4. Applied egg-rr82.2%

    \[\leadsto \color{blue}{-2 \cdot \left(\sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)\right)} \]
  5. Step-by-step derivation
    1. associate-*r*82.2%

      \[\leadsto \color{blue}{\left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \cdot \sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right)} \]
    2. *-commutative82.2%

      \[\leadsto \color{blue}{\sin \left(\left(\varepsilon + \left(x + x\right)\right) \cdot 0.5\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right)} \]
    3. *-commutative82.2%

      \[\leadsto \sin \color{blue}{\left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)} \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    4. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\left(\left(x + x\right) + \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    5. count-282.2%

      \[\leadsto \sin \left(0.5 \cdot \left(\color{blue}{2 \cdot x} + \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    6. fma-define82.2%

      \[\leadsto \sin \left(0.5 \cdot \color{blue}{\mathsf{fma}\left(2, x, \varepsilon\right)}\right) \cdot \left(-2 \cdot \sin \left(\left(x + \left(\varepsilon - x\right)\right) \cdot 0.5\right)\right) \]
    7. *-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \left(x + \left(\varepsilon - x\right)\right)\right)}\right) \]
    8. associate-+r-82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\left(x + \varepsilon\right) - x\right)}\right)\right) \]
    9. +-commutative82.2%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\color{blue}{\left(\varepsilon + x\right)} - x\right)\right)\right) \]
    10. associate--l+99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \color{blue}{\left(\varepsilon + \left(x - x\right)\right)}\right)\right) \]
    11. +-inverses99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \left(\varepsilon + \color{blue}{0}\right)\right)\right) \]
    12. distribute-lft-in99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \color{blue}{\left(0.5 \cdot \varepsilon + 0.5 \cdot 0\right)}\right) \]
    13. metadata-eval99.8%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + \color{blue}{0}\right)\right) \]
  6. Simplified99.8%

    \[\leadsto \color{blue}{\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right)} \]
  7. Taylor expanded in x around inf 99.6%

    \[\leadsto \sin \color{blue}{\left(x \cdot \left(1 + 0.5 \cdot \frac{\varepsilon}{x}\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  8. Step-by-step derivation
    1. associate-*r/99.6%

      \[\leadsto \sin \left(x \cdot \left(1 + \color{blue}{\frac{0.5 \cdot \varepsilon}{x}}\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  9. Simplified99.6%

    \[\leadsto \sin \color{blue}{\left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right)} \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon + 0\right)\right) \]
  10. Taylor expanded in eps around 0 99.3%

    \[\leadsto \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right) \cdot \color{blue}{\left(-1 \cdot \varepsilon\right)} \]
  11. Step-by-step derivation
    1. mul-1-neg99.4%

      \[\leadsto \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \color{blue}{\left(-\varepsilon\right)} \]
  12. Simplified99.3%

    \[\leadsto \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right) \cdot \color{blue}{\left(-\varepsilon\right)} \]
  13. Final simplification99.3%

    \[\leadsto \left(-\varepsilon\right) \cdot \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right) \]
  14. Add Preprocessing

Alternative 7: 98.7% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) (sin x))))
double code(double x, double eps) {
	return eps * ((eps * -0.5) - sin(x));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * (-0.5d0)) - sin(x))
end function
public static double code(double x, double eps) {
	return eps * ((eps * -0.5) - Math.sin(x));
}
def code(x, eps):
	return eps * ((eps * -0.5) - math.sin(x))
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * -0.5) - sin(x)))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * -0.5) - sin(x));
end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*99.4%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot \cos x} - \sin x\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(-0.5 \cdot \varepsilon\right) \cdot \cos x - \sin x\right)} \]
  6. Taylor expanded in x around 0 98.9%

    \[\leadsto \varepsilon \cdot \left(\left(-0.5 \cdot \varepsilon\right) \cdot \color{blue}{1} - \sin x\right) \]
  7. Final simplification98.9%

    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right) \]
  8. Add Preprocessing

Alternative 8: 98.0% accurate, 10.8× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666 + \varepsilon \cdot 0.25\right) + -1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (+
   (* eps -0.5)
   (* x (+ (* x (+ (* x 0.16666666666666666) (* eps 0.25))) -1.0)))))
double code(double x, double eps) {
	return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * (-0.5d0)) + (x * ((x * ((x * 0.16666666666666666d0) + (eps * 0.25d0))) + (-1.0d0))))
end function
public static double code(double x, double eps) {
	return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)));
}
def code(x, eps):
	return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)))
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(Float64(x * Float64(Float64(x * 0.16666666666666666) + Float64(eps * 0.25))) + -1.0))))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)));
end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(N[(x * N[(N[(x * 0.16666666666666666), $MachinePrecision] + N[(eps * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666 + \varepsilon \cdot 0.25\right) + -1\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*99.4%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot \cos x} - \sin x\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(-0.5 \cdot \varepsilon\right) \cdot \cos x - \sin x\right)} \]
  6. Taylor expanded in x around 0 98.5%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(-0.5 \cdot \varepsilon + x \cdot \left(x \cdot \left(0.16666666666666666 \cdot x + 0.25 \cdot \varepsilon\right) - 1\right)\right)} \]
  7. Final simplification98.5%

    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666 + \varepsilon \cdot 0.25\right) + -1\right)\right) \]
  8. Add Preprocessing

Alternative 9: 97.9% accurate, 13.7× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666\right) + -1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (+ (* eps -0.5) (* x (+ (* x (* x 0.16666666666666666)) -1.0)))))
double code(double x, double eps) {
	return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * (-0.5d0)) + (x * ((x * (x * 0.16666666666666666d0)) + (-1.0d0))))
end function
public static double code(double x, double eps) {
	return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)));
}
def code(x, eps):
	return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)))
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(Float64(x * Float64(x * 0.16666666666666666)) + -1.0))))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)));
end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666\right) + -1\right)\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*99.4%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot \cos x} - \sin x\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(-0.5 \cdot \varepsilon\right) \cdot \cos x - \sin x\right)} \]
  6. Taylor expanded in x around 0 98.5%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(-0.5 \cdot \varepsilon + x \cdot \left(x \cdot \left(0.16666666666666666 \cdot x + 0.25 \cdot \varepsilon\right) - 1\right)\right)} \]
  7. Taylor expanded in x around inf 98.5%

    \[\leadsto \varepsilon \cdot \left(-0.5 \cdot \varepsilon + x \cdot \left(x \cdot \color{blue}{\left(0.16666666666666666 \cdot x\right)} - 1\right)\right) \]
  8. Step-by-step derivation
    1. *-commutative98.5%

      \[\leadsto \varepsilon \cdot \left(-0.5 \cdot \varepsilon + x \cdot \left(x \cdot \color{blue}{\left(x \cdot 0.16666666666666666\right)} - 1\right)\right) \]
  9. Simplified98.5%

    \[\leadsto \varepsilon \cdot \left(-0.5 \cdot \varepsilon + x \cdot \left(x \cdot \color{blue}{\left(x \cdot 0.16666666666666666\right)} - 1\right)\right) \]
  10. Final simplification98.5%

    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666\right) + -1\right)\right) \]
  11. Add Preprocessing

Alternative 10: 97.5% accurate, 29.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
	return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
	return eps * ((eps * -0.5) - x);
}
def code(x, eps):
	return eps * ((eps * -0.5) - x)
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * -0.5) - x))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * -0.5) - x);
end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*99.4%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(-0.5 \cdot \varepsilon\right) \cdot \cos x} - \sin x\right) \]
  5. Simplified99.4%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\left(-0.5 \cdot \varepsilon\right) \cdot \cos x - \sin x\right)} \]
  6. Taylor expanded in x around 0 97.8%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(-1 \cdot x + -0.5 \cdot \varepsilon\right)} \]
  7. Step-by-step derivation
    1. neg-mul-197.8%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(-x\right)} + -0.5 \cdot \varepsilon\right) \]
    2. +-commutative97.8%

      \[\leadsto \varepsilon \cdot \color{blue}{\left(-0.5 \cdot \varepsilon + \left(-x\right)\right)} \]
    3. unsub-neg97.8%

      \[\leadsto \varepsilon \cdot \color{blue}{\left(-0.5 \cdot \varepsilon - x\right)} \]
    4. *-commutative97.8%

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot -0.5} - x\right) \]
  8. Simplified97.8%

    \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot -0.5 - x\right)} \]
  9. Add Preprocessing

Alternative 11: 78.7% accurate, 51.3× speedup?

\[\begin{array}{l} \\ x \cdot \left(-\varepsilon\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* x (- eps)))
double code(double x, double eps) {
	return x * -eps;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = x * -eps
end function
public static double code(double x, double eps) {
	return x * -eps;
}
def code(x, eps):
	return x * -eps
function code(x, eps)
	return Float64(x * Float64(-eps))
end
function tmp = code(x, eps)
	tmp = x * -eps;
end
code[x_, eps_] := N[(x * (-eps)), $MachinePrecision]
\begin{array}{l}

\\
x \cdot \left(-\varepsilon\right)
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 80.5%

    \[\leadsto \color{blue}{-1 \cdot \left(\varepsilon \cdot \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*80.5%

      \[\leadsto \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \sin x} \]
    2. mul-1-neg80.5%

      \[\leadsto \color{blue}{\left(-\varepsilon\right)} \cdot \sin x \]
  5. Simplified80.5%

    \[\leadsto \color{blue}{\left(-\varepsilon\right) \cdot \sin x} \]
  6. Taylor expanded in x around 0 79.8%

    \[\leadsto \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)} \]
  7. Step-by-step derivation
    1. associate-*r*79.8%

      \[\leadsto \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x} \]
    2. mul-1-neg79.8%

      \[\leadsto \color{blue}{\left(-\varepsilon\right)} \cdot x \]
  8. Simplified79.8%

    \[\leadsto \color{blue}{\left(-\varepsilon\right) \cdot x} \]
  9. Final simplification79.8%

    \[\leadsto x \cdot \left(-\varepsilon\right) \]
  10. Add Preprocessing

Alternative 12: 51.5% accurate, 205.0× speedup?

\[\begin{array}{l} \\ 0 \end{array} \]
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
	return 0.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = 0.0d0
end function
public static double code(double x, double eps) {
	return 0.0;
}
def code(x, eps):
	return 0.0
function code(x, eps)
	return 0.0
end
function tmp = code(x, eps)
	tmp = 0.0;
end
code[x_, eps_] := 0.0
\begin{array}{l}

\\
0
\end{array}
Derivation
  1. Initial program 55.2%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0 80.5%

    \[\leadsto \color{blue}{-1 \cdot \left(\varepsilon \cdot \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*80.5%

      \[\leadsto \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \sin x} \]
    2. mul-1-neg80.5%

      \[\leadsto \color{blue}{\left(-\varepsilon\right)} \cdot \sin x \]
  5. Simplified80.5%

    \[\leadsto \color{blue}{\left(-\varepsilon\right) \cdot \sin x} \]
  6. Applied egg-rr53.9%

    \[\leadsto \color{blue}{0} \]
  7. Add Preprocessing

Developer Target 1: 99.7% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
	return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
	return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps):
	return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps)
	return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0)))
end
function tmp = code(x, eps)
	tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}

Reproduce

?
herbie shell --seed 2024123 
(FPCore (x eps)
  :name "2cos (problem 3.3.5)"
  :precision binary64
  :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))

  :alt
  (! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))

  (- (cos (+ x eps)) (cos x)))