2sin (example 3.3)

Percentage Accurate: 61.9% → 99.9%
Time: 13.0s
Alternatives: 11
Speedup: 34.5×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \sin \left(x + \varepsilon\right) - \sin x \end{array} \]
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
	return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
	return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps):
	return math.sin((x + eps)) - math.sin(x)
function code(x, eps)
	return Float64(sin(Float64(x + eps)) - sin(x))
end
function tmp = code(x, eps)
	tmp = sin((x + eps)) - sin(x);
end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 11 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 61.9% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \sin \left(x + \varepsilon\right) - \sin x \end{array} \]
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
	return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
	return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps):
	return math.sin((x + eps)) - math.sin(x)
function code(x, eps)
	return Float64(sin(Float64(x + eps)) - sin(x))
end
function tmp = code(x, eps)
	tmp = sin((x + eps)) - sin(x);
end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}

Alternative 1: 99.9% accurate, 0.9× speedup?

\[\begin{array}{l} \\ 2 \cdot \left(\sin \left(\varepsilon \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* 2.0 (* (sin (* eps 0.5)) (cos (fma 0.5 eps x)))))
double code(double x, double eps) {
	return 2.0 * (sin((eps * 0.5)) * cos(fma(0.5, eps, x)));
}
function code(x, eps)
	return Float64(2.0 * Float64(sin(Float64(eps * 0.5)) * cos(fma(0.5, eps, x))))
end
code[x_, eps_] := N[(2.0 * N[(N[Sin[N[(eps * 0.5), $MachinePrecision]], $MachinePrecision] * N[Cos[N[(0.5 * eps + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
2 \cdot \left(\sin \left(\varepsilon \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right)
\end{array}
Derivation
  1. Initial program 62.5%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right) - \sin x} \]
    2. lift-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right)} - \sin x \]
    3. lift-sin.f64N/A

      \[\leadsto \sin \left(x + \varepsilon\right) - \color{blue}{\sin x} \]
    4. diff-sinN/A

      \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    5. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    6. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
  4. Applied rewrites99.9%

    \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
  5. Taylor expanded in eps around inf

    \[\leadsto \color{blue}{\left(\cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right) \cdot \sin \left(\frac{1}{2} \cdot \varepsilon\right)\right)} \cdot 2 \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right)\right)} \cdot 2 \]
    2. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{\left(\mathsf{neg}\left(-2\right)\right)} \cdot x\right)\right)\right) \cdot 2 \]
    3. cancel-sign-sub-invN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon - -2 \cdot x\right)}\right)\right) \cdot 2 \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right)} \cdot 2 \]
    5. lower-sin.f64N/A

      \[\leadsto \left(\color{blue}{\sin \left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
    6. lower-*.f64N/A

      \[\leadsto \left(\sin \color{blue}{\left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
    7. lower-cos.f64N/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \color{blue}{\cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)}\right) \cdot 2 \]
    8. cancel-sign-sub-invN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon + \left(\mathsf{neg}\left(-2\right)\right) \cdot x\right)}\right)\right) \cdot 2 \]
    9. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{2} \cdot x\right)\right)\right) \cdot 2 \]
    10. distribute-lft-inN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + \frac{1}{2} \cdot \left(2 \cdot x\right)\right)}\right) \cdot 2 \]
    11. associate-*r*N/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{\left(\frac{1}{2} \cdot 2\right) \cdot x}\right)\right) \cdot 2 \]
    12. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{1} \cdot x\right)\right) \cdot 2 \]
    13. *-lft-identityN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{x}\right)\right) \cdot 2 \]
    14. lower-fma.f6499.9

      \[\leadsto \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
  7. Applied rewrites99.9%

    \[\leadsto \color{blue}{\left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right)} \cdot 2 \]
  8. Final simplification99.9%

    \[\leadsto 2 \cdot \left(\sin \left(\varepsilon \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right) \]
  9. Add Preprocessing

Alternative 2: 99.8% accurate, 1.3× speedup?

\[\begin{array}{l} \\ 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -1.5500992063492063 \cdot 10^{-6}, 0.00026041666666666666\right), -0.020833333333333332\right), 0.5\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  2.0
  (*
   (cos (fma 0.5 eps x))
   (*
    eps
    (fma
     eps
     (*
      eps
      (fma
       (* eps eps)
       (fma eps (* eps -1.5500992063492063e-6) 0.00026041666666666666)
       -0.020833333333333332))
     0.5)))))
double code(double x, double eps) {
	return 2.0 * (cos(fma(0.5, eps, x)) * (eps * fma(eps, (eps * fma((eps * eps), fma(eps, (eps * -1.5500992063492063e-6), 0.00026041666666666666), -0.020833333333333332)), 0.5)));
}
function code(x, eps)
	return Float64(2.0 * Float64(cos(fma(0.5, eps, x)) * Float64(eps * fma(eps, Float64(eps * fma(Float64(eps * eps), fma(eps, Float64(eps * -1.5500992063492063e-6), 0.00026041666666666666), -0.020833333333333332)), 0.5))))
end
code[x_, eps_] := N[(2.0 * N[(N[Cos[N[(0.5 * eps + x), $MachinePrecision]], $MachinePrecision] * N[(eps * N[(eps * N[(eps * N[(N[(eps * eps), $MachinePrecision] * N[(eps * N[(eps * -1.5500992063492063e-6), $MachinePrecision] + 0.00026041666666666666), $MachinePrecision] + -0.020833333333333332), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -1.5500992063492063 \cdot 10^{-6}, 0.00026041666666666666\right), -0.020833333333333332\right), 0.5\right)\right)\right)
\end{array}
Derivation
  1. Initial program 62.5%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right) - \sin x} \]
    2. lift-sin.f64N/A

      \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right)} - \sin x \]
    3. lift-sin.f64N/A

      \[\leadsto \sin \left(x + \varepsilon\right) - \color{blue}{\sin x} \]
    4. diff-sinN/A

      \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    5. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    6. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
  4. Applied rewrites99.9%

    \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
  5. Taylor expanded in eps around inf

    \[\leadsto \color{blue}{\left(\cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right) \cdot \sin \left(\frac{1}{2} \cdot \varepsilon\right)\right)} \cdot 2 \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right)\right)} \cdot 2 \]
    2. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{\left(\mathsf{neg}\left(-2\right)\right)} \cdot x\right)\right)\right) \cdot 2 \]
    3. cancel-sign-sub-invN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon - -2 \cdot x\right)}\right)\right) \cdot 2 \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right)} \cdot 2 \]
    5. lower-sin.f64N/A

      \[\leadsto \left(\color{blue}{\sin \left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
    6. lower-*.f64N/A

      \[\leadsto \left(\sin \color{blue}{\left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
    7. lower-cos.f64N/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \color{blue}{\cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)}\right) \cdot 2 \]
    8. cancel-sign-sub-invN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon + \left(\mathsf{neg}\left(-2\right)\right) \cdot x\right)}\right)\right) \cdot 2 \]
    9. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{2} \cdot x\right)\right)\right) \cdot 2 \]
    10. distribute-lft-inN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + \frac{1}{2} \cdot \left(2 \cdot x\right)\right)}\right) \cdot 2 \]
    11. associate-*r*N/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{\left(\frac{1}{2} \cdot 2\right) \cdot x}\right)\right) \cdot 2 \]
    12. metadata-evalN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{1} \cdot x\right)\right) \cdot 2 \]
    13. *-lft-identityN/A

      \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{x}\right)\right) \cdot 2 \]
    14. lower-fma.f6499.9

      \[\leadsto \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
  7. Applied rewrites99.9%

    \[\leadsto \color{blue}{\left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right)} \cdot 2 \]
  8. Taylor expanded in eps around 0

    \[\leadsto \left(\left(\varepsilon \cdot \left(\frac{1}{2} + {\varepsilon}^{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{3840} + \frac{-1}{645120} \cdot {\varepsilon}^{2}\right) - \frac{1}{48}\right)\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\frac{1}{2}, \varepsilon, x\right)\right)}\right) \cdot 2 \]
  9. Step-by-step derivation
    1. Applied rewrites99.9%

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -1.5500992063492063 \cdot 10^{-6}, 0.00026041666666666666\right), -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
    2. Final simplification99.9%

      \[\leadsto 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -1.5500992063492063 \cdot 10^{-6}, 0.00026041666666666666\right), -0.020833333333333332\right), 0.5\right)\right)\right) \]
    3. Add Preprocessing

    Alternative 3: 99.8% accurate, 1.4× speedup?

    \[\begin{array}{l} \\ 2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right) \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      2.0
      (*
       (*
        eps
        (fma
         eps
         (* eps (fma (* eps eps) 0.00026041666666666666 -0.020833333333333332))
         0.5))
       (cos (fma eps 0.5 x)))))
    double code(double x, double eps) {
    	return 2.0 * ((eps * fma(eps, (eps * fma((eps * eps), 0.00026041666666666666, -0.020833333333333332)), 0.5)) * cos(fma(eps, 0.5, x)));
    }
    
    function code(x, eps)
    	return Float64(2.0 * Float64(Float64(eps * fma(eps, Float64(eps * fma(Float64(eps * eps), 0.00026041666666666666, -0.020833333333333332)), 0.5)) * cos(fma(eps, 0.5, x))))
    end
    
    code[x_, eps_] := N[(2.0 * N[(N[(eps * N[(eps * N[(eps * N[(N[(eps * eps), $MachinePrecision] * 0.00026041666666666666 + -0.020833333333333332), $MachinePrecision]), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(eps * 0.5 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right)
    \end{array}
    
    Derivation
    1. Initial program 62.5%

      \[\sin \left(x + \varepsilon\right) - \sin x \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right) - \sin x} \]
      2. lift-sin.f64N/A

        \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right)} - \sin x \]
      3. lift-sin.f64N/A

        \[\leadsto \sin \left(x + \varepsilon\right) - \color{blue}{\sin x} \]
      4. diff-sinN/A

        \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
      6. lower-*.f64N/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    4. Applied rewrites99.9%

      \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
    5. Taylor expanded in eps around 0

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + {\varepsilon}^{2} \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right)\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    6. Step-by-step derivation
      1. lower-*.f64N/A

        \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + {\varepsilon}^{2} \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right)\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      2. +-commutativeN/A

        \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\left({\varepsilon}^{2} \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right) + \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      3. unpow2N/A

        \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right) + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      4. associate-*l*N/A

        \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right)\right)} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      5. lower-fma.f64N/A

        \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right), \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      6. lower-*.f64N/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot \left(\frac{1}{3840} \cdot {\varepsilon}^{2} - \frac{1}{48}\right)}, \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      7. sub-negN/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{\left(\frac{1}{3840} \cdot {\varepsilon}^{2} + \left(\mathsf{neg}\left(\frac{1}{48}\right)\right)\right)}, \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      8. *-commutativeN/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \left(\color{blue}{{\varepsilon}^{2} \cdot \frac{1}{3840}} + \left(\mathsf{neg}\left(\frac{1}{48}\right)\right)\right), \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      9. metadata-evalN/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \left({\varepsilon}^{2} \cdot \frac{1}{3840} + \color{blue}{\frac{-1}{48}}\right), \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      10. lower-fma.f64N/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \color{blue}{\mathsf{fma}\left({\varepsilon}^{2}, \frac{1}{3840}, \frac{-1}{48}\right)}, \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      11. unpow2N/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\color{blue}{\varepsilon \cdot \varepsilon}, \frac{1}{3840}, \frac{-1}{48}\right), \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
      12. lower-*.f6499.8

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\color{blue}{\varepsilon \cdot \varepsilon}, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
    7. Applied rewrites99.8%

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
    8. Taylor expanded in eps around 0

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3840}, \frac{-1}{48}\right), \frac{1}{2}\right)\right) \cdot \cos \color{blue}{\left(x + \frac{1}{2} \cdot \varepsilon\right)}\right) \cdot 2 \]
    9. Step-by-step derivation
      1. +-commutativeN/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3840}, \frac{-1}{48}\right), \frac{1}{2}\right)\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + x\right)}\right) \cdot 2 \]
      2. *-commutativeN/A

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, \frac{1}{3840}, \frac{-1}{48}\right), \frac{1}{2}\right)\right) \cdot \cos \left(\color{blue}{\varepsilon \cdot \frac{1}{2}} + x\right)\right) \cdot 2 \]
      3. lower-fma.f6499.8

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)}\right) \cdot 2 \]
    10. Applied rewrites99.8%

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)}\right) \cdot 2 \]
    11. Final simplification99.8%

      \[\leadsto 2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.00026041666666666666, -0.020833333333333332\right), 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right) \]
    12. Add Preprocessing

    Alternative 4: 99.7% accurate, 1.6× speedup?

    \[\begin{array}{l} \\ 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right)\right) \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (*
      2.0
      (*
       (cos (fma 0.5 eps x))
       (* eps (fma eps (* eps -0.020833333333333332) 0.5)))))
    double code(double x, double eps) {
    	return 2.0 * (cos(fma(0.5, eps, x)) * (eps * fma(eps, (eps * -0.020833333333333332), 0.5)));
    }
    
    function code(x, eps)
    	return Float64(2.0 * Float64(cos(fma(0.5, eps, x)) * Float64(eps * fma(eps, Float64(eps * -0.020833333333333332), 0.5))))
    end
    
    code[x_, eps_] := N[(2.0 * N[(N[Cos[N[(0.5 * eps + x), $MachinePrecision]], $MachinePrecision] * N[(eps * N[(eps * N[(eps * -0.020833333333333332), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right)\right)
    \end{array}
    
    Derivation
    1. Initial program 62.5%

      \[\sin \left(x + \varepsilon\right) - \sin x \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right) - \sin x} \]
      2. lift-sin.f64N/A

        \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right)} - \sin x \]
      3. lift-sin.f64N/A

        \[\leadsto \sin \left(x + \varepsilon\right) - \color{blue}{\sin x} \]
      4. diff-sinN/A

        \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
      5. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
      6. lower-*.f64N/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    4. Applied rewrites99.9%

      \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
    5. Taylor expanded in eps around inf

      \[\leadsto \color{blue}{\left(\cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right) \cdot \sin \left(\frac{1}{2} \cdot \varepsilon\right)\right)} \cdot 2 \]
    6. Step-by-step derivation
      1. *-commutativeN/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right)\right)} \cdot 2 \]
      2. metadata-evalN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{\left(\mathsf{neg}\left(-2\right)\right)} \cdot x\right)\right)\right) \cdot 2 \]
      3. cancel-sign-sub-invN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon - -2 \cdot x\right)}\right)\right) \cdot 2 \]
      4. lower-*.f64N/A

        \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right)} \cdot 2 \]
      5. lower-sin.f64N/A

        \[\leadsto \left(\color{blue}{\sin \left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
      6. lower-*.f64N/A

        \[\leadsto \left(\sin \color{blue}{\left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
      7. lower-cos.f64N/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \color{blue}{\cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)}\right) \cdot 2 \]
      8. cancel-sign-sub-invN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon + \left(\mathsf{neg}\left(-2\right)\right) \cdot x\right)}\right)\right) \cdot 2 \]
      9. metadata-evalN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{2} \cdot x\right)\right)\right) \cdot 2 \]
      10. distribute-lft-inN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + \frac{1}{2} \cdot \left(2 \cdot x\right)\right)}\right) \cdot 2 \]
      11. associate-*r*N/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{\left(\frac{1}{2} \cdot 2\right) \cdot x}\right)\right) \cdot 2 \]
      12. metadata-evalN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{1} \cdot x\right)\right) \cdot 2 \]
      13. *-lft-identityN/A

        \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{x}\right)\right) \cdot 2 \]
      14. lower-fma.f6499.9

        \[\leadsto \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
    7. Applied rewrites99.9%

      \[\leadsto \color{blue}{\left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right)} \cdot 2 \]
    8. Taylor expanded in eps around 0

      \[\leadsto \left(\left(\varepsilon \cdot \left(\frac{1}{2} + \frac{-1}{48} \cdot {\varepsilon}^{2}\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\frac{1}{2}, \varepsilon, x\right)\right)}\right) \cdot 2 \]
    9. Step-by-step derivation
      1. Applied rewrites99.7%

        \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
      2. Final simplification99.7%

        \[\leadsto 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right)\right) \]
      3. Add Preprocessing

      Alternative 5: 99.5% accurate, 1.7× speedup?

      \[\begin{array}{l} \\ 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot 0.5\right)\right) \end{array} \]
      (FPCore (x eps)
       :precision binary64
       (* 2.0 (* (cos (fma 0.5 eps x)) (* eps 0.5))))
      double code(double x, double eps) {
      	return 2.0 * (cos(fma(0.5, eps, x)) * (eps * 0.5));
      }
      
      function code(x, eps)
      	return Float64(2.0 * Float64(cos(fma(0.5, eps, x)) * Float64(eps * 0.5)))
      end
      
      code[x_, eps_] := N[(2.0 * N[(N[Cos[N[(0.5 * eps + x), $MachinePrecision]], $MachinePrecision] * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot 0.5\right)\right)
      \end{array}
      
      Derivation
      1. Initial program 62.5%

        \[\sin \left(x + \varepsilon\right) - \sin x \]
      2. Add Preprocessing
      3. Step-by-step derivation
        1. lift--.f64N/A

          \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right) - \sin x} \]
        2. lift-sin.f64N/A

          \[\leadsto \color{blue}{\sin \left(x + \varepsilon\right)} - \sin x \]
        3. lift-sin.f64N/A

          \[\leadsto \sin \left(x + \varepsilon\right) - \color{blue}{\sin x} \]
        4. diff-sinN/A

          \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
        5. *-commutativeN/A

          \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
        6. lower-*.f64N/A

          \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
      4. Applied rewrites99.9%

        \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
      5. Taylor expanded in eps around inf

        \[\leadsto \color{blue}{\left(\cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right) \cdot \sin \left(\frac{1}{2} \cdot \varepsilon\right)\right)} \cdot 2 \]
      6. Step-by-step derivation
        1. *-commutativeN/A

          \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + 2 \cdot x\right)\right)\right)} \cdot 2 \]
        2. metadata-evalN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{\left(\mathsf{neg}\left(-2\right)\right)} \cdot x\right)\right)\right) \cdot 2 \]
        3. cancel-sign-sub-invN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon - -2 \cdot x\right)}\right)\right) \cdot 2 \]
        4. lower-*.f64N/A

          \[\leadsto \color{blue}{\left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right)} \cdot 2 \]
        5. lower-sin.f64N/A

          \[\leadsto \left(\color{blue}{\sin \left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
        6. lower-*.f64N/A

          \[\leadsto \left(\sin \color{blue}{\left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)\right) \cdot 2 \]
        7. lower-cos.f64N/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \color{blue}{\cos \left(\frac{1}{2} \cdot \left(\varepsilon - -2 \cdot x\right)\right)}\right) \cdot 2 \]
        8. cancel-sign-sub-invN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \color{blue}{\left(\varepsilon + \left(\mathsf{neg}\left(-2\right)\right) \cdot x\right)}\right)\right) \cdot 2 \]
        9. metadata-evalN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \left(\varepsilon + \color{blue}{2} \cdot x\right)\right)\right) \cdot 2 \]
        10. distribute-lft-inN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + \frac{1}{2} \cdot \left(2 \cdot x\right)\right)}\right) \cdot 2 \]
        11. associate-*r*N/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{\left(\frac{1}{2} \cdot 2\right) \cdot x}\right)\right) \cdot 2 \]
        12. metadata-evalN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{1} \cdot x\right)\right) \cdot 2 \]
        13. *-lft-identityN/A

          \[\leadsto \left(\sin \left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \left(\frac{1}{2} \cdot \varepsilon + \color{blue}{x}\right)\right) \cdot 2 \]
        14. lower-fma.f6499.9

          \[\leadsto \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
      7. Applied rewrites99.9%

        \[\leadsto \color{blue}{\left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right)} \cdot 2 \]
      8. Taylor expanded in eps around 0

        \[\leadsto \left(\left(\frac{1}{2} \cdot \varepsilon\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\frac{1}{2}, \varepsilon, x\right)\right)}\right) \cdot 2 \]
      9. Step-by-step derivation
        1. Applied rewrites99.6%

          \[\leadsto \left(\left(\varepsilon \cdot 0.5\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)}\right) \cdot 2 \]
        2. Final simplification99.6%

          \[\leadsto 2 \cdot \left(\cos \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right) \cdot \left(\varepsilon \cdot 0.5\right)\right) \]
        3. Add Preprocessing

        Alternative 6: 99.1% accurate, 2.0× speedup?

        \[\begin{array}{l} \\ \varepsilon \cdot \cos x \end{array} \]
        (FPCore (x eps) :precision binary64 (* eps (cos x)))
        double code(double x, double eps) {
        	return eps * cos(x);
        }
        
        real(8) function code(x, eps)
            real(8), intent (in) :: x
            real(8), intent (in) :: eps
            code = eps * cos(x)
        end function
        
        public static double code(double x, double eps) {
        	return eps * Math.cos(x);
        }
        
        def code(x, eps):
        	return eps * math.cos(x)
        
        function code(x, eps)
        	return Float64(eps * cos(x))
        end
        
        function tmp = code(x, eps)
        	tmp = eps * cos(x);
        end
        
        code[x_, eps_] := N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \varepsilon \cdot \cos x
        \end{array}
        
        Derivation
        1. Initial program 62.5%

          \[\sin \left(x + \varepsilon\right) - \sin x \]
        2. Add Preprocessing
        3. Taylor expanded in eps around 0

          \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
        4. Step-by-step derivation
          1. lower-*.f64N/A

            \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
          2. lower-cos.f6499.3

            \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
        5. Applied rewrites99.3%

          \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
        6. Add Preprocessing

        Alternative 7: 98.6% accurate, 5.3× speedup?

        \[\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \varepsilon \cdot \left(x \cdot x\right), \varepsilon\right) \end{array} \]
        (FPCore (x eps)
         :precision binary64
         (fma
          (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5)
          (* eps (* x x))
          eps))
        double code(double x, double eps) {
        	return fma(fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), (eps * (x * x)), eps);
        }
        
        function code(x, eps)
        	return fma(fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), Float64(eps * Float64(x * x)), eps)
        end
        
        code[x_, eps_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] * N[(eps * N[(x * x), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
        
        \begin{array}{l}
        
        \\
        \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \varepsilon \cdot \left(x \cdot x\right), \varepsilon\right)
        \end{array}
        
        Derivation
        1. Initial program 62.5%

          \[\sin \left(x + \varepsilon\right) - \sin x \]
        2. Add Preprocessing
        3. Taylor expanded in eps around 0

          \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
        4. Step-by-step derivation
          1. lower-*.f64N/A

            \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
          2. lower-cos.f6499.3

            \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
        5. Applied rewrites99.3%

          \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
        6. Taylor expanded in x around 0

          \[\leadsto \varepsilon + \color{blue}{\frac{-1}{2} \cdot \left(\varepsilon \cdot {x}^{2}\right)} \]
        7. Step-by-step derivation
          1. Applied rewrites98.5%

            \[\leadsto \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\varepsilon \cdot -0.5\right)}, \varepsilon\right) \]
          2. Taylor expanded in x around 0

            \[\leadsto \varepsilon + \color{blue}{{x}^{2} \cdot \left(\frac{-1}{2} \cdot \varepsilon + {x}^{2} \cdot \left(\frac{-1}{720} \cdot \left(\varepsilon \cdot {x}^{2}\right) + \frac{1}{24} \cdot \varepsilon\right)\right)} \]
          3. Applied rewrites98.8%

            \[\leadsto \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \color{blue}{\varepsilon \cdot \left(x \cdot x\right)}, \varepsilon\right) \]
          4. Add Preprocessing

          Alternative 8: 98.5% accurate, 7.4× speedup?

          \[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.041666666666666664, x \cdot x, -0.5\right), 1\right) \end{array} \]
          (FPCore (x eps)
           :precision binary64
           (* eps (fma (* x x) (fma 0.041666666666666664 (* x x) -0.5) 1.0)))
          double code(double x, double eps) {
          	return eps * fma((x * x), fma(0.041666666666666664, (x * x), -0.5), 1.0);
          }
          
          function code(x, eps)
          	return Float64(eps * fma(Float64(x * x), fma(0.041666666666666664, Float64(x * x), -0.5), 1.0))
          end
          
          code[x_, eps_] := N[(eps * N[(N[(x * x), $MachinePrecision] * N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
          
          \begin{array}{l}
          
          \\
          \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.041666666666666664, x \cdot x, -0.5\right), 1\right)
          \end{array}
          
          Derivation
          1. Initial program 62.5%

            \[\sin \left(x + \varepsilon\right) - \sin x \]
          2. Add Preprocessing
          3. Taylor expanded in eps around 0

            \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
          4. Step-by-step derivation
            1. lower-*.f64N/A

              \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
            2. lower-cos.f6499.3

              \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
          5. Applied rewrites99.3%

            \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
          6. Taylor expanded in x around 0

            \[\leadsto \varepsilon \cdot \left(1 + \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} \cdot {x}^{2} - \frac{1}{2}\right)}\right) \]
          7. Step-by-step derivation
            1. Applied rewrites98.6%

              \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left(0.041666666666666664, x \cdot x, -0.5\right)}, 1\right) \]
            2. Add Preprocessing

            Alternative 9: 98.5% accurate, 10.4× speedup?

            \[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(x, -0.5 \cdot \left(\varepsilon + x\right), 1\right) \end{array} \]
            (FPCore (x eps) :precision binary64 (* eps (fma x (* -0.5 (+ eps x)) 1.0)))
            double code(double x, double eps) {
            	return eps * fma(x, (-0.5 * (eps + x)), 1.0);
            }
            
            function code(x, eps)
            	return Float64(eps * fma(x, Float64(-0.5 * Float64(eps + x)), 1.0))
            end
            
            code[x_, eps_] := N[(eps * N[(x * N[(-0.5 * N[(eps + x), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
            
            \begin{array}{l}
            
            \\
            \varepsilon \cdot \mathsf{fma}\left(x, -0.5 \cdot \left(\varepsilon + x\right), 1\right)
            \end{array}
            
            Derivation
            1. Initial program 62.5%

              \[\sin \left(x + \varepsilon\right) - \sin x \]
            2. Add Preprocessing
            3. Taylor expanded in eps around 0

              \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
            4. Step-by-step derivation
              1. *-commutativeN/A

                \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
              2. associate-*r*N/A

                \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
              3. lower-*.f64N/A

                \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
              4. +-commutativeN/A

                \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
              5. associate-*r*N/A

                \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
              6. *-commutativeN/A

                \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
              7. *-commutativeN/A

                \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
              8. associate-*r*N/A

                \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
              9. *-commutativeN/A

                \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
              10. lower-fma.f64N/A

                \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
              11. lower-*.f64N/A

                \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
              12. lower-sin.f64N/A

                \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
              13. lower-cos.f6499.7

                \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
            5. Applied rewrites99.7%

              \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
            6. Taylor expanded in x around 0

              \[\leadsto \varepsilon \cdot \left(1 + \color{blue}{x \cdot \left(\frac{-1}{2} \cdot \varepsilon + \frac{-1}{2} \cdot x\right)}\right) \]
            7. Step-by-step derivation
              1. Applied rewrites98.6%

                \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x, \color{blue}{-0.5 \cdot \left(x + \varepsilon\right)}, 1\right) \]
              2. Final simplification98.6%

                \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x, -0.5 \cdot \left(\varepsilon + x\right), 1\right) \]
              3. Add Preprocessing

              Alternative 10: 98.4% accurate, 12.2× speedup?

              \[\begin{array}{l} \\ \mathsf{fma}\left(x, x \cdot \left(\varepsilon \cdot -0.5\right), \varepsilon\right) \end{array} \]
              (FPCore (x eps) :precision binary64 (fma x (* x (* eps -0.5)) eps))
              double code(double x, double eps) {
              	return fma(x, (x * (eps * -0.5)), eps);
              }
              
              function code(x, eps)
              	return fma(x, Float64(x * Float64(eps * -0.5)), eps)
              end
              
              code[x_, eps_] := N[(x * N[(x * N[(eps * -0.5), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
              
              \begin{array}{l}
              
              \\
              \mathsf{fma}\left(x, x \cdot \left(\varepsilon \cdot -0.5\right), \varepsilon\right)
              \end{array}
              
              Derivation
              1. Initial program 62.5%

                \[\sin \left(x + \varepsilon\right) - \sin x \]
              2. Add Preprocessing
              3. Taylor expanded in eps around 0

                \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
              4. Step-by-step derivation
                1. lower-*.f64N/A

                  \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
                2. lower-cos.f6499.3

                  \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
              5. Applied rewrites99.3%

                \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
              6. Taylor expanded in x around 0

                \[\leadsto \varepsilon + \color{blue}{\frac{-1}{2} \cdot \left(\varepsilon \cdot {x}^{2}\right)} \]
              7. Step-by-step derivation
                1. Applied rewrites98.5%

                  \[\leadsto \mathsf{fma}\left(x, \color{blue}{x \cdot \left(\varepsilon \cdot -0.5\right)}, \varepsilon\right) \]
                2. Add Preprocessing

                Alternative 11: 98.0% accurate, 34.5× speedup?

                \[\begin{array}{l} \\ \varepsilon \cdot 1 \end{array} \]
                (FPCore (x eps) :precision binary64 (* eps 1.0))
                double code(double x, double eps) {
                	return eps * 1.0;
                }
                
                real(8) function code(x, eps)
                    real(8), intent (in) :: x
                    real(8), intent (in) :: eps
                    code = eps * 1.0d0
                end function
                
                public static double code(double x, double eps) {
                	return eps * 1.0;
                }
                
                def code(x, eps):
                	return eps * 1.0
                
                function code(x, eps)
                	return Float64(eps * 1.0)
                end
                
                function tmp = code(x, eps)
                	tmp = eps * 1.0;
                end
                
                code[x_, eps_] := N[(eps * 1.0), $MachinePrecision]
                
                \begin{array}{l}
                
                \\
                \varepsilon \cdot 1
                \end{array}
                
                Derivation
                1. Initial program 62.5%

                  \[\sin \left(x + \varepsilon\right) - \sin x \]
                2. Add Preprocessing
                3. Taylor expanded in eps around 0

                  \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
                4. Step-by-step derivation
                  1. *-commutativeN/A

                    \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
                  2. associate-*r*N/A

                    \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
                  3. lower-*.f64N/A

                    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
                  4. +-commutativeN/A

                    \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
                  5. associate-*r*N/A

                    \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
                  6. *-commutativeN/A

                    \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
                  7. *-commutativeN/A

                    \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
                  8. associate-*r*N/A

                    \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
                  9. *-commutativeN/A

                    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
                  10. lower-fma.f64N/A

                    \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
                  11. lower-*.f64N/A

                    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
                  12. lower-sin.f64N/A

                    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
                  13. lower-cos.f6499.7

                    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
                5. Applied rewrites99.7%

                  \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
                6. Taylor expanded in x around 0

                  \[\leadsto \varepsilon \cdot 1 \]
                7. Step-by-step derivation
                  1. Applied rewrites98.1%

                    \[\leadsto \varepsilon \cdot 1 \]
                  2. Add Preprocessing

                  Developer Target 1: 99.9% accurate, 0.9× speedup?

                  \[\begin{array}{l} \\ \left(2 \cdot \cos \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right) \end{array} \]
                  (FPCore (x eps)
                   :precision binary64
                   (* (* 2.0 (cos (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
                  double code(double x, double eps) {
                  	return (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0));
                  }
                  
                  real(8) function code(x, eps)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: eps
                      code = (2.0d0 * cos((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
                  end function
                  
                  public static double code(double x, double eps) {
                  	return (2.0 * Math.cos((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
                  }
                  
                  def code(x, eps):
                  	return (2.0 * math.cos((x + (eps / 2.0)))) * math.sin((eps / 2.0))
                  
                  function code(x, eps)
                  	return Float64(Float64(2.0 * cos(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0)))
                  end
                  
                  function tmp = code(x, eps)
                  	tmp = (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0));
                  end
                  
                  code[x_, eps_] := N[(N[(2.0 * N[Cos[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
                  
                  \begin{array}{l}
                  
                  \\
                  \left(2 \cdot \cos \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
                  \end{array}
                  

                  Developer Target 2: 99.6% accurate, 0.5× speedup?

                  \[\begin{array}{l} \\ \sin x \cdot \left(\cos \varepsilon - 1\right) + \cos x \cdot \sin \varepsilon \end{array} \]
                  (FPCore (x eps)
                   :precision binary64
                   (+ (* (sin x) (- (cos eps) 1.0)) (* (cos x) (sin eps))))
                  double code(double x, double eps) {
                  	return (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps));
                  }
                  
                  real(8) function code(x, eps)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: eps
                      code = (sin(x) * (cos(eps) - 1.0d0)) + (cos(x) * sin(eps))
                  end function
                  
                  public static double code(double x, double eps) {
                  	return (Math.sin(x) * (Math.cos(eps) - 1.0)) + (Math.cos(x) * Math.sin(eps));
                  }
                  
                  def code(x, eps):
                  	return (math.sin(x) * (math.cos(eps) - 1.0)) + (math.cos(x) * math.sin(eps))
                  
                  function code(x, eps)
                  	return Float64(Float64(sin(x) * Float64(cos(eps) - 1.0)) + Float64(cos(x) * sin(eps)))
                  end
                  
                  function tmp = code(x, eps)
                  	tmp = (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps));
                  end
                  
                  code[x_, eps_] := N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Cos[eps], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Cos[x], $MachinePrecision] * N[Sin[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
                  
                  \begin{array}{l}
                  
                  \\
                  \sin x \cdot \left(\cos \varepsilon - 1\right) + \cos x \cdot \sin \varepsilon
                  \end{array}
                  

                  Developer Target 3: 99.9% accurate, 0.9× speedup?

                  \[\begin{array}{l} \\ \left(\cos \left(0.5 \cdot \left(\varepsilon - -2 \cdot x\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot 2 \end{array} \]
                  (FPCore (x eps)
                   :precision binary64
                   (* (* (cos (* 0.5 (- eps (* -2.0 x)))) (sin (* 0.5 eps))) 2.0))
                  double code(double x, double eps) {
                  	return (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0;
                  }
                  
                  real(8) function code(x, eps)
                      real(8), intent (in) :: x
                      real(8), intent (in) :: eps
                      code = (cos((0.5d0 * (eps - ((-2.0d0) * x)))) * sin((0.5d0 * eps))) * 2.0d0
                  end function
                  
                  public static double code(double x, double eps) {
                  	return (Math.cos((0.5 * (eps - (-2.0 * x)))) * Math.sin((0.5 * eps))) * 2.0;
                  }
                  
                  def code(x, eps):
                  	return (math.cos((0.5 * (eps - (-2.0 * x)))) * math.sin((0.5 * eps))) * 2.0
                  
                  function code(x, eps)
                  	return Float64(Float64(cos(Float64(0.5 * Float64(eps - Float64(-2.0 * x)))) * sin(Float64(0.5 * eps))) * 2.0)
                  end
                  
                  function tmp = code(x, eps)
                  	tmp = (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0;
                  end
                  
                  code[x_, eps_] := N[(N[(N[Cos[N[(0.5 * N[(eps - N[(-2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
                  
                  \begin{array}{l}
                  
                  \\
                  \left(\cos \left(0.5 \cdot \left(\varepsilon - -2 \cdot x\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot 2
                  \end{array}
                  

                  Reproduce

                  ?
                  herbie shell --seed 2024235 
                  (FPCore (x eps)
                    :name "2sin (example 3.3)"
                    :precision binary64
                    :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
                  
                    :alt
                    (! :herbie-platform default (* 2 (cos (+ x (/ eps 2))) (sin (/ eps 2))))
                  
                    :alt
                    (! :herbie-platform default (+ (* (sin x) (- (cos eps) 1)) (* (cos x) (sin eps))))
                  
                    :alt
                    (! :herbie-platform default (* (cos (* 1/2 (- eps (* -2 x)))) (sin (* 1/2 eps)) 2))
                  
                    (- (sin (+ x eps)) (sin x)))