2sin (example 3.3)

Percentage Accurate: 62.6% → 99.7%
Time: 14.4s
Alternatives: 13
Speedup: 207.0×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \sin \left(x + \varepsilon\right) - \sin x \end{array} \]
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
	return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
	return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps):
	return math.sin((x + eps)) - math.sin(x)
function code(x, eps)
	return Float64(sin(Float64(x + eps)) - sin(x))
end
function tmp = code(x, eps)
	tmp = sin((x + eps)) - sin(x);
end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 13 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 62.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \sin \left(x + \varepsilon\right) - \sin x \end{array} \]
(FPCore (x eps) :precision binary64 (- (sin (+ x eps)) (sin x)))
double code(double x, double eps) {
	return sin((x + eps)) - sin(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = sin((x + eps)) - sin(x)
end function
public static double code(double x, double eps) {
	return Math.sin((x + eps)) - Math.sin(x);
}
def code(x, eps):
	return math.sin((x + eps)) - math.sin(x)
function code(x, eps)
	return Float64(sin(Float64(x + eps)) - sin(x))
end
function tmp = code(x, eps)
	tmp = sin((x + eps)) - sin(x);
end
code[x_, eps_] := N[(N[Sin[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\sin \left(x + \varepsilon\right) - \sin x
\end{array}

Alternative 1: 99.7% accurate, 1.2× speedup?

\[\begin{array}{l} \\ 2 \cdot \left(\frac{\varepsilon \cdot \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right), 0.00043402777777777775, -0.25\right)}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, -0.5\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  2.0
  (*
   (/
    (* eps (fma (* (* eps eps) (* eps eps)) 0.00043402777777777775 -0.25))
    (fma eps (* eps -0.020833333333333332) -0.5))
   (cos (* (fma x 2.0 eps) 0.5)))))
double code(double x, double eps) {
	return 2.0 * (((eps * fma(((eps * eps) * (eps * eps)), 0.00043402777777777775, -0.25)) / fma(eps, (eps * -0.020833333333333332), -0.5)) * cos((fma(x, 2.0, eps) * 0.5)));
}
function code(x, eps)
	return Float64(2.0 * Float64(Float64(Float64(eps * fma(Float64(Float64(eps * eps) * Float64(eps * eps)), 0.00043402777777777775, -0.25)) / fma(eps, Float64(eps * -0.020833333333333332), -0.5)) * cos(Float64(fma(x, 2.0, eps) * 0.5))))
end
code[x_, eps_] := N[(2.0 * N[(N[(N[(eps * N[(N[(N[(eps * eps), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision] * 0.00043402777777777775 + -0.25), $MachinePrecision]), $MachinePrecision] / N[(eps * N[(eps * -0.020833333333333332), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(N[(x * 2.0 + eps), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
2 \cdot \left(\frac{\varepsilon \cdot \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right), 0.00043402777777777775, -0.25\right)}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, -0.5\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \sin \color{blue}{\left(x + \varepsilon\right)} - \sin x \]
    2. diff-sinN/A

      \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    3. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
  4. Applied egg-rr99.9%

    \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + \frac{-1}{48} \cdot {\varepsilon}^{2}\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
  6. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + \frac{-1}{48} \cdot {\varepsilon}^{2}\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{48} \cdot {\varepsilon}^{2} + \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    3. *-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{{\varepsilon}^{2} \cdot \frac{-1}{48}} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    4. unpow2N/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{-1}{48} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    5. associate-*l*N/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    6. lower-fma.f64N/A

      \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    7. lower-*.f6499.9

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot -0.020833333333333332}, 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  7. Simplified99.9%

    \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  8. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \frac{-1}{48}\right)} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    2. lift-fma.f64N/A

      \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    3. *-commutativeN/A

      \[\leadsto \left(\color{blue}{\left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right) \cdot \varepsilon\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    4. lift-fma.f64N/A

      \[\leadsto \left(\left(\color{blue}{\left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right) + \frac{1}{2}\right)} \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    5. flip-+N/A

      \[\leadsto \left(\left(\color{blue}{\frac{\left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) - \frac{1}{2} \cdot \frac{1}{2}}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right) - \frac{1}{2}}} \cdot \varepsilon\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    6. associate-*l/N/A

      \[\leadsto \left(\color{blue}{\frac{\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) - \frac{1}{2} \cdot \frac{1}{2}\right) \cdot \varepsilon}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right) - \frac{1}{2}}} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    7. lower-/.f64N/A

      \[\leadsto \left(\color{blue}{\frac{\left(\left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)\right) - \frac{1}{2} \cdot \frac{1}{2}\right) \cdot \varepsilon}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right) - \frac{1}{2}}} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
  9. Applied egg-rr99.9%

    \[\leadsto \left(\color{blue}{\frac{\mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right), 0.00043402777777777775, -0.25\right) \cdot \varepsilon}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, -0.5\right)}} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  10. Final simplification99.9%

    \[\leadsto 2 \cdot \left(\frac{\varepsilon \cdot \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \left(\varepsilon \cdot \varepsilon\right), 0.00043402777777777775, -0.25\right)}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, -0.5\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \]
  11. Add Preprocessing

Alternative 2: 99.8% accurate, 1.6× speedup?

\[\begin{array}{l} \\ 2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  2.0
  (*
   (* eps (fma eps (* eps -0.020833333333333332) 0.5))
   (cos (fma eps 0.5 x)))))
double code(double x, double eps) {
	return 2.0 * ((eps * fma(eps, (eps * -0.020833333333333332), 0.5)) * cos(fma(eps, 0.5, x)));
}
function code(x, eps)
	return Float64(2.0 * Float64(Float64(eps * fma(eps, Float64(eps * -0.020833333333333332), 0.5)) * cos(fma(eps, 0.5, x))))
end
code[x_, eps_] := N[(2.0 * N[(N[(eps * N[(eps * N[(eps * -0.020833333333333332), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision] * N[Cos[N[(eps * 0.5 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \sin \color{blue}{\left(x + \varepsilon\right)} - \sin x \]
    2. diff-sinN/A

      \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    3. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
  4. Applied egg-rr99.9%

    \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + \frac{-1}{48} \cdot {\varepsilon}^{2}\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
  6. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{2} + \frac{-1}{48} \cdot {\varepsilon}^{2}\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    2. +-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{48} \cdot {\varepsilon}^{2} + \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    3. *-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{{\varepsilon}^{2} \cdot \frac{-1}{48}} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    4. unpow2N/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{-1}{48} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    5. associate-*l*N/A

      \[\leadsto \left(\left(\varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{48}\right)} + \frac{1}{2}\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    6. lower-fma.f64N/A

      \[\leadsto \left(\left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)}\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    7. lower-*.f6499.9

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot -0.020833333333333332}, 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  7. Simplified99.9%

    \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  8. Taylor expanded in x around 0

    \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)\right) \cdot \cos \color{blue}{\left(x + \frac{1}{2} \cdot \varepsilon\right)}\right) \cdot 2 \]
  9. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)\right) \cdot \cos \color{blue}{\left(\frac{1}{2} \cdot \varepsilon + x\right)}\right) \cdot 2 \]
    2. *-commutativeN/A

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot \frac{-1}{48}, \frac{1}{2}\right)\right) \cdot \cos \left(\color{blue}{\varepsilon \cdot \frac{1}{2}} + x\right)\right) \cdot 2 \]
    3. lower-fma.f6499.9

      \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)}\right) \cdot 2 \]
  10. Simplified99.9%

    \[\leadsto \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \color{blue}{\left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)}\right) \cdot 2 \]
  11. Final simplification99.9%

    \[\leadsto 2 \cdot \left(\left(\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot -0.020833333333333332, 0.5\right)\right) \cdot \cos \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right) \]
  12. Add Preprocessing

Alternative 3: 99.5% accurate, 1.6× speedup?

\[\begin{array}{l} \\ 2 \cdot \left(\cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right) \cdot \left(\varepsilon \cdot 0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* 2.0 (* (cos (* (fma x 2.0 eps) 0.5)) (* eps 0.5))))
double code(double x, double eps) {
	return 2.0 * (cos((fma(x, 2.0, eps) * 0.5)) * (eps * 0.5));
}
function code(x, eps)
	return Float64(2.0 * Float64(cos(Float64(fma(x, 2.0, eps) * 0.5)) * Float64(eps * 0.5)))
end
code[x_, eps_] := N[(2.0 * N[(N[Cos[N[(N[(x * 2.0 + eps), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision] * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
2 \cdot \left(\cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right) \cdot \left(\varepsilon \cdot 0.5\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \sin \color{blue}{\left(x + \varepsilon\right)} - \sin x \]
    2. diff-sinN/A

      \[\leadsto \color{blue}{2 \cdot \left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right)} \]
    3. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
    4. lower-*.f64N/A

      \[\leadsto \color{blue}{\left(\sin \left(\frac{\left(x + \varepsilon\right) - x}{2}\right) \cdot \cos \left(\frac{\left(x + \varepsilon\right) + x}{2}\right)\right) \cdot 2} \]
  4. Applied egg-rr99.9%

    \[\leadsto \color{blue}{\left(\sin \left(\left(\varepsilon + 0\right) \cdot 0.5\right) \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2} \]
  5. Taylor expanded in eps around 0

    \[\leadsto \left(\color{blue}{\left(\frac{1}{2} \cdot \varepsilon\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
  6. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot \frac{1}{2}\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot \frac{1}{2}\right)\right) \cdot 2 \]
    2. lower-*.f6499.8

      \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot 0.5\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  7. Simplified99.8%

    \[\leadsto \left(\color{blue}{\left(\varepsilon \cdot 0.5\right)} \cdot \cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right)\right) \cdot 2 \]
  8. Final simplification99.8%

    \[\leadsto 2 \cdot \left(\cos \left(\mathsf{fma}\left(x, 2, \varepsilon\right) \cdot 0.5\right) \cdot \left(\varepsilon \cdot 0.5\right)\right) \]
  9. Add Preprocessing

Alternative 4: 99.1% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \cos x \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (cos x)))
double code(double x, double eps) {
	return eps * cos(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * cos(x)
end function
public static double code(double x, double eps) {
	return eps * Math.cos(x);
}
def code(x, eps):
	return eps * math.cos(x)
function code(x, eps)
	return Float64(eps * cos(x))
end
function tmp = code(x, eps)
	tmp = eps * cos(x);
end
code[x_, eps_] := N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \cos x
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
    2. lower-cos.f6499.6

      \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  6. Add Preprocessing

Alternative 5: 98.7% accurate, 3.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), x \cdot x, 1 + x \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.08333333333333333, x \cdot x, -0.5\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   (fma x (* x (fma (* x x) -0.001388888888888889 0.041666666666666664)) -0.5)
   (* x x)
   (+ 1.0 (* x (* eps (fma 0.08333333333333333 (* x x) -0.5)))))))
double code(double x, double eps) {
	return eps * fma(fma(x, (x * fma((x * x), -0.001388888888888889, 0.041666666666666664)), -0.5), (x * x), (1.0 + (x * (eps * fma(0.08333333333333333, (x * x), -0.5)))));
}
function code(x, eps)
	return Float64(eps * fma(fma(x, Float64(x * fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664)), -0.5), Float64(x * x), Float64(1.0 + Float64(x * Float64(eps * fma(0.08333333333333333, Float64(x * x), -0.5))))))
end
code[x_, eps_] := N[(eps * N[(N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + N[(1.0 + N[(x * N[(eps * N[(0.08333333333333333 * N[(x * x), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), x \cdot x, 1 + x \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.08333333333333333, x \cdot x, -0.5\right)\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
    2. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
    4. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
    5. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
    8. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
    12. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
    13. lower-cos.f6499.9

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}, \cos x\right) \]
    3. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{12}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    4. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    5. associate-*l*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{x \cdot \left(x \cdot \frac{1}{12}\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(x \cdot \left(x \cdot \frac{1}{12}\right) + \color{blue}{\frac{-1}{2}}\right), \cos x\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)}, \cos x\right) \]
    8. lower-*.f6499.4

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot 0.08333333333333333}, -0.5\right), \cos x\right) \]
  8. Simplified99.4%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right)}, \cos x\right) \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)}\right) \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{{x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1}\right) \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{\mathsf{fma}\left({x}^{2}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)}\right) \]
    3. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    4. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    5. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right)\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \color{blue}{\frac{-1}{2}}, 1\right)\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right)}, 1\right)\right) \]
    8. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-1}{720} \cdot {x}^{2} + \frac{1}{24}}, \frac{-1}{2}\right), 1\right)\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{-1}{720}} + \frac{1}{24}, \frac{-1}{2}\right), 1\right)\right) \]
    12. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{720}, \frac{1}{24}\right)}, \frac{-1}{2}\right), 1\right)\right) \]
    13. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right)\right) \]
    14. lower-*.f6498.8

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \]
  11. Simplified98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)}\right) \]
  12. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \left(x \cdot \color{blue}{\left(x \cdot \frac{1}{12}\right)} + \frac{-1}{2}\right)\right) + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    2. lift-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \color{blue}{\mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)}\right) + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    3. lift-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right)} + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    4. lift-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \left(\color{blue}{\left(x \cdot x\right)} \cdot \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    5. lift-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \left(\left(x \cdot x\right) \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    6. lift-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{-1}{720} + \frac{1}{24}\right) + \frac{-1}{2}\right) + 1\right)\right) \]
    7. lift-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \color{blue}{\mathsf{fma}\left(x \cdot x, \frac{-1}{720}, \frac{1}{24}\right)} + \frac{-1}{2}\right) + 1\right)\right) \]
    8. lift-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \left(\left(x \cdot x\right) \cdot \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right)} + 1\right)\right) \]
    9. lift-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right) + \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right)}\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right) + \varepsilon \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)\right)\right)} \]
  13. Applied egg-rr98.8%

    \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), x \cdot x, 1 + x \cdot \left(\mathsf{fma}\left(0.08333333333333333, x \cdot x, -0.5\right) \cdot \varepsilon\right)\right)} \]
  14. Final simplification98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), x \cdot x, 1 + x \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.08333333333333333, x \cdot x, -0.5\right)\right)\right) \]
  15. Add Preprocessing

Alternative 6: 98.7% accurate, 3.4× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   eps
   (* x (fma x (* x 0.08333333333333333) -0.5))
   (fma
    (* x x)
    (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5)
    1.0))))
double code(double x, double eps) {
	return eps * fma(eps, (x * fma(x, (x * 0.08333333333333333), -0.5)), fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0));
}
function code(x, eps)
	return Float64(eps * fma(eps, Float64(x * fma(x, Float64(x * 0.08333333333333333), -0.5)), fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0)))
end
code[x_, eps_] := N[(eps * N[(eps * N[(x * N[(x * N[(x * 0.08333333333333333), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
    2. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
    4. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
    5. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
    8. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
    12. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
    13. lower-cos.f6499.9

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}, \cos x\right) \]
    3. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{12}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    4. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    5. associate-*l*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{x \cdot \left(x \cdot \frac{1}{12}\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(x \cdot \left(x \cdot \frac{1}{12}\right) + \color{blue}{\frac{-1}{2}}\right), \cos x\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)}, \cos x\right) \]
    8. lower-*.f6499.4

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot 0.08333333333333333}, -0.5\right), \cos x\right) \]
  8. Simplified99.4%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right)}, \cos x\right) \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)}\right) \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{{x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1}\right) \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{\mathsf{fma}\left({x}^{2}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)}\right) \]
    3. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    4. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    5. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right)\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \color{blue}{\frac{-1}{2}}, 1\right)\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right)}, 1\right)\right) \]
    8. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-1}{720} \cdot {x}^{2} + \frac{1}{24}}, \frac{-1}{2}\right), 1\right)\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{-1}{720}} + \frac{1}{24}, \frac{-1}{2}\right), 1\right)\right) \]
    12. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{720}, \frac{1}{24}\right)}, \frac{-1}{2}\right), 1\right)\right) \]
    13. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right)\right) \]
    14. lower-*.f6498.8

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \]
  11. Simplified98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)}\right) \]
  12. Add Preprocessing

Alternative 7: 98.7% accurate, 4.1× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   eps
   (* -0.5 x)
   (fma
    (* x x)
    (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5)
    1.0))))
double code(double x, double eps) {
	return eps * fma(eps, (-0.5 * x), fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0));
}
function code(x, eps)
	return Float64(eps * fma(eps, Float64(-0.5 * x), fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0)))
end
code[x_, eps_] := N[(eps * N[(eps * N[(-0.5 * x), $MachinePrecision] + N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
    2. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
    4. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
    5. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
    8. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
    12. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
    13. lower-cos.f6499.9

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot x}, \cos x\right) \]
  7. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \frac{-1}{2}}, \cos x\right) \]
    2. lower-*.f6499.5

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot -0.5}, \cos x\right) \]
  8. Simplified99.5%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot -0.5}, \cos x\right) \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \color{blue}{1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)}\right) \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \color{blue}{{x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1}\right) \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \color{blue}{\mathsf{fma}\left({x}^{2}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)}\right) \]
    3. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    4. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    5. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right)\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \color{blue}{\frac{-1}{2}}, 1\right)\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right)}, 1\right)\right) \]
    8. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-1}{720} \cdot {x}^{2} + \frac{1}{24}}, \frac{-1}{2}\right), 1\right)\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{-1}{720}} + \frac{1}{24}, \frac{-1}{2}\right), 1\right)\right) \]
    12. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{720}, \frac{1}{24}\right)}, \frac{-1}{2}\right), 1\right)\right) \]
    13. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \frac{-1}{2}, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right)\right) \]
    14. lower-*.f6498.8

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot -0.5, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \]
  11. Simplified98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot -0.5, \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)}\right) \]
  12. Final simplification98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \]
  13. Add Preprocessing

Alternative 8: 98.6% accurate, 5.3× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \varepsilon\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma
  (* x x)
  (*
   eps
   (fma x (* x (fma (* x x) -0.001388888888888889 0.041666666666666664)) -0.5))
  eps))
double code(double x, double eps) {
	return fma((x * x), (eps * fma(x, (x * fma((x * x), -0.001388888888888889, 0.041666666666666664)), -0.5)), eps);
}
function code(x, eps)
	return fma(Float64(x * x), Float64(eps * fma(x, Float64(x * fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664)), -0.5)), eps)
end
code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * N[(eps * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \varepsilon\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
    2. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
    4. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
    5. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
    8. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
    12. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
    13. lower-cos.f6499.9

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{1}{12} \cdot {x}^{2} - \frac{1}{2}\right)}, \cos x\right) \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(\frac{1}{12} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}, \cos x\right) \]
    3. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{12}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    4. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{12} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    5. associate-*l*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(\color{blue}{x \cdot \left(x \cdot \frac{1}{12}\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \cos x\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \left(x \cdot \left(x \cdot \frac{1}{12}\right) + \color{blue}{\frac{-1}{2}}\right), \cos x\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right)}, \cos x\right) \]
    8. lower-*.f6499.4

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot 0.08333333333333333}, -0.5\right), \cos x\right) \]
  8. Simplified99.4%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right)}, \cos x\right) \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)}\right) \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{{x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1}\right) \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \color{blue}{\mathsf{fma}\left({x}^{2}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)}\right) \]
    3. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    4. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)\right) \]
    5. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right)\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \color{blue}{\frac{-1}{2}}, 1\right)\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right)}, 1\right)\right) \]
    8. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right)\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-1}{720} \cdot {x}^{2} + \frac{1}{24}}, \frac{-1}{2}\right), 1\right)\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{-1}{720}} + \frac{1}{24}, \frac{-1}{2}\right), 1\right)\right) \]
    12. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{720}, \frac{1}{24}\right)}, \frac{-1}{2}\right), 1\right)\right) \]
    13. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot \frac{1}{12}, \frac{-1}{2}\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right)\right) \]
    14. lower-*.f6498.8

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)\right) \]
  11. Simplified98.8%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, x \cdot \mathsf{fma}\left(x, x \cdot 0.08333333333333333, -0.5\right), \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)}\right) \]
  12. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)\right)} \]
  13. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1\right)} \]
    2. distribute-rgt-inN/A

      \[\leadsto \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)\right) \cdot \varepsilon + 1 \cdot \varepsilon} \]
    3. associate-*l*N/A

      \[\leadsto \color{blue}{{x}^{2} \cdot \left(\left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) \cdot \varepsilon\right)} + 1 \cdot \varepsilon \]
    4. *-lft-identityN/A

      \[\leadsto {x}^{2} \cdot \left(\left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) \cdot \varepsilon\right) + \color{blue}{\varepsilon} \]
    5. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) \cdot \varepsilon, \varepsilon\right)} \]
  14. Simplified98.8%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right) \cdot \varepsilon, \varepsilon\right)} \]
  15. Final simplification98.8%

    \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), \varepsilon\right) \]
  16. Add Preprocessing

Alternative 9: 98.6% accurate, 5.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   (* x x)
   (fma (* x x) (fma (* x x) -0.001388888888888889 0.041666666666666664) -0.5)
   1.0)))
double code(double x, double eps) {
	return eps * fma((x * x), fma((x * x), fma((x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0);
}
function code(x, eps)
	return Float64(eps * fma(Float64(x * x), fma(Float64(x * x), fma(Float64(x * x), -0.001388888888888889, 0.041666666666666664), -0.5), 1.0))
end
code[x_, eps_] := N[(eps * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
    2. lower-cos.f6499.6

      \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \color{blue}{\left(1 + {x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right)\right)} \]
  7. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left({x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}\right) + 1\right)} \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right)} \]
    3. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right) \]
    4. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) - \frac{1}{2}, 1\right) \]
    5. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}, 1\right) \]
    6. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, {x}^{2} \cdot \left(\frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}\right) + \color{blue}{\frac{-1}{2}}, 1\right) \]
    7. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right)}, 1\right) \]
    8. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24} + \frac{-1}{720} \cdot {x}^{2}, \frac{-1}{2}\right), 1\right) \]
    10. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\frac{-1}{720} \cdot {x}^{2} + \frac{1}{24}}, \frac{-1}{2}\right), 1\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{{x}^{2} \cdot \frac{-1}{720}} + \frac{1}{24}, \frac{-1}{2}\right), 1\right) \]
    12. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{720}, \frac{1}{24}\right)}, \frac{-1}{2}\right), 1\right) \]
    13. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{720}, \frac{1}{24}\right), \frac{-1}{2}\right), 1\right) \]
    14. lower-*.f6498.8

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(\color{blue}{x \cdot x}, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right) \]
  8. Simplified98.8%

    \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.041666666666666664\right), -0.5\right), 1\right)} \]
  9. Add Preprocessing

Alternative 10: 98.5% accurate, 7.4× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), \varepsilon\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma (* x x) (* eps (fma (* x x) 0.041666666666666664 -0.5)) eps))
double code(double x, double eps) {
	return fma((x * x), (eps * fma((x * x), 0.041666666666666664, -0.5)), eps);
}
function code(x, eps)
	return fma(Float64(x * x), Float64(eps * fma(Float64(x * x), 0.041666666666666664, -0.5)), eps)
end
code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * N[(eps * N[(N[(x * x), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), \varepsilon\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
    2. lower-cos.f6499.6

      \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\varepsilon + {x}^{2} \cdot \left(\frac{-1}{2} \cdot \varepsilon + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right)\right)} \]
  7. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{{x}^{2} \cdot \left(\frac{-1}{2} \cdot \varepsilon + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right)\right) + \varepsilon} \]
    2. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{-1}{2} \cdot \varepsilon + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right), \varepsilon\right)} \]
    3. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{2} \cdot \varepsilon + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right), \varepsilon\right) \]
    4. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{-1}{2} \cdot \varepsilon + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right), \varepsilon\right) \]
    5. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\varepsilon \cdot \frac{-1}{2}} + \frac{1}{24} \cdot \left(\varepsilon \cdot {x}^{2}\right), \varepsilon\right) \]
    6. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \frac{-1}{2} + \color{blue}{\left(\varepsilon \cdot {x}^{2}\right) \cdot \frac{1}{24}}, \varepsilon\right) \]
    7. associate-*l*N/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \frac{-1}{2} + \color{blue}{\varepsilon \cdot \left({x}^{2} \cdot \frac{1}{24}\right)}, \varepsilon\right) \]
    8. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \frac{-1}{2} + \varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot {x}^{2}\right)}, \varepsilon\right) \]
    9. distribute-lft-outN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} + \frac{1}{24} \cdot {x}^{2}\right)}, \varepsilon\right) \]
    10. +-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot {x}^{2} + \frac{-1}{2}\right)}, \varepsilon\right) \]
    11. metadata-evalN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \left(\frac{1}{24} \cdot {x}^{2} + \color{blue}{\left(\mathsf{neg}\left(\frac{1}{2}\right)\right)}\right), \varepsilon\right) \]
    12. sub-negN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot {x}^{2} - \frac{1}{2}\right)}, \varepsilon\right) \]
    13. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \color{blue}{\varepsilon \cdot \left(\frac{1}{24} \cdot {x}^{2} - \frac{1}{2}\right)}, \varepsilon\right) \]
    14. sub-negN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}, \varepsilon\right) \]
    15. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{24}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right), \varepsilon\right) \]
    16. metadata-evalN/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \left({x}^{2} \cdot \frac{1}{24} + \color{blue}{\frac{-1}{2}}\right), \varepsilon\right) \]
    17. lower-fma.f64N/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \color{blue}{\mathsf{fma}\left({x}^{2}, \frac{1}{24}, \frac{-1}{2}\right)}, \varepsilon\right) \]
    18. unpow2N/A

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, \frac{1}{24}, \frac{-1}{2}\right), \varepsilon\right) \]
    19. lower-*.f6498.6

      \[\leadsto \mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(\color{blue}{x \cdot x}, 0.041666666666666664, -0.5\right), \varepsilon\right) \]
  8. Simplified98.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x \cdot x, \varepsilon \cdot \mathsf{fma}\left(x \cdot x, 0.041666666666666664, -0.5\right), \varepsilon\right)} \]
  9. Add Preprocessing

Alternative 11: 98.5% accurate, 10.4× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, \varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon + x\right)\right), \varepsilon\right) \end{array} \]
(FPCore (x eps) :precision binary64 (fma x (* eps (* -0.5 (+ eps x))) eps))
double code(double x, double eps) {
	return fma(x, (eps * (-0.5 * (eps + x))), eps);
}
function code(x, eps)
	return fma(x, Float64(eps * Float64(-0.5 * Float64(eps + x))), eps)
end
code[x_, eps_] := N[(x * N[(eps * N[(-0.5 * N[(eps + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, \varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon + x\right)\right), \varepsilon\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \left(\varepsilon \cdot \sin x\right)\right)} \]
  4. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \frac{-1}{2} \cdot \color{blue}{\left(\sin x \cdot \varepsilon\right)}\right) \]
    2. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\cos x + \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon}\right) \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\cos x + \left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon\right)} \]
    4. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \sin x\right) \cdot \varepsilon + \cos x\right)} \]
    5. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\sin x \cdot \varepsilon\right)} + \cos x\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \sin x\right)} + \cos x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \sin x\right) \cdot \frac{-1}{2}} + \cos x\right) \]
    8. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\sin x \cdot \frac{-1}{2}\right)} + \cos x\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \sin x\right)} + \cos x\right) \]
    10. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \sin x, \cos x\right)} \]
    11. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\frac{-1}{2} \cdot \sin x}, \cos x\right) \]
    12. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\sin x}, \cos x\right) \]
    13. lower-cos.f6499.9

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \color{blue}{\cos x}\right) \]
  5. Simplified99.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, -0.5 \cdot \sin x, \cos x\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\varepsilon + x \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot x\right) + \frac{-1}{2} \cdot {\varepsilon}^{2}\right)} \]
  7. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot x\right) + \frac{-1}{2} \cdot {\varepsilon}^{2}\right) + \varepsilon} \]
    2. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \frac{-1}{2} \cdot \left(\varepsilon \cdot x\right) + \frac{-1}{2} \cdot {\varepsilon}^{2}, \varepsilon\right)} \]
    3. associate-*r*N/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon\right) \cdot x} + \frac{-1}{2} \cdot {\varepsilon}^{2}, \varepsilon\right) \]
    4. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\left(\varepsilon \cdot \frac{-1}{2}\right)} \cdot x + \frac{-1}{2} \cdot {\varepsilon}^{2}, \varepsilon\right) \]
    5. associate-*l*N/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot x\right)} + \frac{-1}{2} \cdot {\varepsilon}^{2}, \varepsilon\right) \]
    6. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{-1}{2} \cdot x\right) + \color{blue}{{\varepsilon}^{2} \cdot \frac{-1}{2}}, \varepsilon\right) \]
    7. unpow2N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{-1}{2} \cdot x\right) + \color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{-1}{2}, \varepsilon\right) \]
    8. associate-*l*N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{-1}{2} \cdot x\right) + \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \frac{-1}{2}\right)}, \varepsilon\right) \]
    9. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{-1}{2} \cdot x\right) + \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon\right)}, \varepsilon\right) \]
    10. distribute-lft-outN/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot x + \frac{-1}{2} \cdot \varepsilon\right)}, \varepsilon\right) \]
    11. +-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon + \frac{-1}{2} \cdot x\right)}, \varepsilon\right) \]
    12. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot \varepsilon + \frac{-1}{2} \cdot x\right)}, \varepsilon\right) \]
    13. distribute-lft-outN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \left(\varepsilon + x\right)\right)}, \varepsilon\right) \]
    14. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \left(\varepsilon + x\right)\right)}, \varepsilon\right) \]
    15. lower-+.f6498.6

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(-0.5 \cdot \color{blue}{\left(\varepsilon + x\right)}\right), \varepsilon\right) \]
  8. Simplified98.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon + x\right)\right), \varepsilon\right)} \]
  9. Add Preprocessing

Alternative 12: 98.4% accurate, 12.2× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(\varepsilon, x \cdot \left(-0.5 \cdot x\right), \varepsilon\right) \end{array} \]
(FPCore (x eps) :precision binary64 (fma eps (* x (* -0.5 x)) eps))
double code(double x, double eps) {
	return fma(eps, (x * (-0.5 * x)), eps);
}
function code(x, eps)
	return fma(eps, Float64(x * Float64(-0.5 * x)), eps)
end
code[x_, eps_] := N[(eps * N[(x * N[(-0.5 * x), $MachinePrecision]), $MachinePrecision] + eps), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(\varepsilon, x \cdot \left(-0.5 \cdot x\right), \varepsilon\right)
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
    2. lower-cos.f6499.6

      \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\varepsilon + \frac{-1}{2} \cdot \left(\varepsilon \cdot {x}^{2}\right)} \]
  7. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto \varepsilon + \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon\right) \cdot {x}^{2}} \]
    2. +-commutativeN/A

      \[\leadsto \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon\right) \cdot {x}^{2} + \varepsilon} \]
    3. associate-*r*N/A

      \[\leadsto \color{blue}{\frac{-1}{2} \cdot \left(\varepsilon \cdot {x}^{2}\right)} + \varepsilon \]
    4. *-commutativeN/A

      \[\leadsto \color{blue}{\left(\varepsilon \cdot {x}^{2}\right) \cdot \frac{-1}{2}} + \varepsilon \]
    5. associate-*l*N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left({x}^{2} \cdot \frac{-1}{2}\right)} + \varepsilon \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot {x}^{2}\right)} + \varepsilon \]
    7. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot {x}^{2}, \varepsilon\right)} \]
    8. unpow2N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \color{blue}{\left(x \cdot x\right)}, \varepsilon\right) \]
    9. associate-*r*N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{\left(\frac{-1}{2} \cdot x\right) \cdot x}, \varepsilon\right) \]
    10. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{-1}{2} \cdot x\right)}, \varepsilon\right) \]
    11. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, \color{blue}{x \cdot \left(\frac{-1}{2} \cdot x\right)}, \varepsilon\right) \]
    12. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(x \cdot \frac{-1}{2}\right)}, \varepsilon\right) \]
    13. lower-*.f6498.6

      \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \color{blue}{\left(x \cdot -0.5\right)}, \varepsilon\right) \]
  8. Simplified98.6%

    \[\leadsto \color{blue}{\mathsf{fma}\left(\varepsilon, x \cdot \left(x \cdot -0.5\right), \varepsilon\right)} \]
  9. Final simplification98.6%

    \[\leadsto \mathsf{fma}\left(\varepsilon, x \cdot \left(-0.5 \cdot x\right), \varepsilon\right) \]
  10. Add Preprocessing

Alternative 13: 97.9% accurate, 207.0× speedup?

\[\begin{array}{l} \\ \varepsilon \end{array} \]
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
	return eps;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps
end function
public static double code(double x, double eps) {
	return eps;
}
def code(x, eps):
	return eps
function code(x, eps)
	return eps
end
function tmp = code(x, eps)
	tmp = eps;
end
code[x_, eps_] := eps
\begin{array}{l}

\\
\varepsilon
\end{array}
Derivation
  1. Initial program 60.8%

    \[\sin \left(x + \varepsilon\right) - \sin x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
    2. lower-cos.f6499.6

      \[\leadsto \varepsilon \cdot \color{blue}{\cos x} \]
  5. Simplified99.6%

    \[\leadsto \color{blue}{\varepsilon \cdot \cos x} \]
  6. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \color{blue}{1} \]
  7. Step-by-step derivation
    1. Simplified97.9%

      \[\leadsto \varepsilon \cdot \color{blue}{1} \]
    2. Step-by-step derivation
      1. *-rgt-identity97.9

        \[\leadsto \color{blue}{\varepsilon} \]
    3. Applied egg-rr97.9%

      \[\leadsto \color{blue}{\varepsilon} \]
    4. Add Preprocessing

    Developer Target 1: 99.9% accurate, 0.9× speedup?

    \[\begin{array}{l} \\ \left(2 \cdot \cos \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right) \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (* (* 2.0 (cos (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
    double code(double x, double eps) {
    	return (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0));
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        code = (2.0d0 * cos((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
    end function
    
    public static double code(double x, double eps) {
    	return (2.0 * Math.cos((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
    }
    
    def code(x, eps):
    	return (2.0 * math.cos((x + (eps / 2.0)))) * math.sin((eps / 2.0))
    
    function code(x, eps)
    	return Float64(Float64(2.0 * cos(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0)))
    end
    
    function tmp = code(x, eps)
    	tmp = (2.0 * cos((x + (eps / 2.0)))) * sin((eps / 2.0));
    end
    
    code[x_, eps_] := N[(N[(2.0 * N[Cos[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(2 \cdot \cos \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
    \end{array}
    

    Developer Target 2: 99.6% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \sin x \cdot \left(\cos \varepsilon - 1\right) + \cos x \cdot \sin \varepsilon \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (+ (* (sin x) (- (cos eps) 1.0)) (* (cos x) (sin eps))))
    double code(double x, double eps) {
    	return (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps));
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        code = (sin(x) * (cos(eps) - 1.0d0)) + (cos(x) * sin(eps))
    end function
    
    public static double code(double x, double eps) {
    	return (Math.sin(x) * (Math.cos(eps) - 1.0)) + (Math.cos(x) * Math.sin(eps));
    }
    
    def code(x, eps):
    	return (math.sin(x) * (math.cos(eps) - 1.0)) + (math.cos(x) * math.sin(eps))
    
    function code(x, eps)
    	return Float64(Float64(sin(x) * Float64(cos(eps) - 1.0)) + Float64(cos(x) * sin(eps)))
    end
    
    function tmp = code(x, eps)
    	tmp = (sin(x) * (cos(eps) - 1.0)) + (cos(x) * sin(eps));
    end
    
    code[x_, eps_] := N[(N[(N[Sin[x], $MachinePrecision] * N[(N[Cos[eps], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision] + N[(N[Cos[x], $MachinePrecision] * N[Sin[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \sin x \cdot \left(\cos \varepsilon - 1\right) + \cos x \cdot \sin \varepsilon
    \end{array}
    

    Developer Target 3: 99.9% accurate, 0.9× speedup?

    \[\begin{array}{l} \\ \left(\cos \left(0.5 \cdot \left(\varepsilon - -2 \cdot x\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot 2 \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (* (* (cos (* 0.5 (- eps (* -2.0 x)))) (sin (* 0.5 eps))) 2.0))
    double code(double x, double eps) {
    	return (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0;
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        code = (cos((0.5d0 * (eps - ((-2.0d0) * x)))) * sin((0.5d0 * eps))) * 2.0d0
    end function
    
    public static double code(double x, double eps) {
    	return (Math.cos((0.5 * (eps - (-2.0 * x)))) * Math.sin((0.5 * eps))) * 2.0;
    }
    
    def code(x, eps):
    	return (math.cos((0.5 * (eps - (-2.0 * x)))) * math.sin((0.5 * eps))) * 2.0
    
    function code(x, eps)
    	return Float64(Float64(cos(Float64(0.5 * Float64(eps - Float64(-2.0 * x)))) * sin(Float64(0.5 * eps))) * 2.0)
    end
    
    function tmp = code(x, eps)
    	tmp = (cos((0.5 * (eps - (-2.0 * x)))) * sin((0.5 * eps))) * 2.0;
    end
    
    code[x_, eps_] := N[(N[(N[Cos[N[(0.5 * N[(eps - N[(-2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * 2.0), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(\cos \left(0.5 \cdot \left(\varepsilon - -2 \cdot x\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot 2
    \end{array}
    

    Reproduce

    ?
    herbie shell --seed 2024208 
    (FPCore (x eps)
      :name "2sin (example 3.3)"
      :precision binary64
      :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
    
      :alt
      (! :herbie-platform default (* 2 (cos (+ x (/ eps 2))) (sin (/ eps 2))))
    
      :alt
      (! :herbie-platform default (+ (* (sin x) (- (cos eps) 1)) (* (cos x) (sin eps))))
    
      :alt
      (! :herbie-platform default (* (cos (* 1/2 (- eps (* -2 x)))) (sin (* 1/2 eps)) 2))
    
      (- (sin (+ x eps)) (sin x)))