2cos (problem 3.3.5)

Percentage Accurate: 51.4% → 99.5%
Time: 19.9s
Alternatives: 16
Speedup: 25.9×

Specification

?
\[\left(\left(-10000 \leq x \land x \leq 10000\right) \land 10^{-16} \cdot \left|x\right| < \varepsilon\right) \land \varepsilon < \left|x\right|\]
\[\begin{array}{l} \\ \cos \left(x + \varepsilon\right) - \cos x \end{array} \]
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
	return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
	return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps):
	return math.cos((x + eps)) - math.cos(x)
function code(x, eps)
	return Float64(cos(Float64(x + eps)) - cos(x))
end
function tmp = code(x, eps)
	tmp = cos((x + eps)) - cos(x);
end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 16 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 51.4% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \cos \left(x + \varepsilon\right) - \cos x \end{array} \]
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
	return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
	return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps):
	return math.cos((x + eps)) - math.cos(x)
function code(x, eps)
	return Float64(cos(Float64(x + eps)) - cos(x))
end
function tmp = code(x, eps)
	tmp = cos((x + eps)) - cos(x);
end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}

Alternative 1: 99.5% accurate, 0.8× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   (sin x)
   (fma 0.16666666666666666 (* eps eps) -1.0)
   (* eps (* (cos x) (fma 0.041666666666666664 (* eps eps) -0.5))))))
double code(double x, double eps) {
	return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * (cos(x) * fma(0.041666666666666664, (eps * eps), -0.5))));
}
function code(x, eps)
	return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * Float64(cos(x) * fma(0.041666666666666664, Float64(eps * eps), -0.5)))))
end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(N[Cos[x], $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) + \frac{1}{6} \cdot \sin x\right)\right)\right)} \]
  7. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right)} \]
  8. Add Preprocessing

Alternative 2: 99.4% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot -0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   (sin x)
   (fma 0.16666666666666666 (* eps eps) -1.0)
   (* eps (* (cos x) -0.5)))))
double code(double x, double eps) {
	return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * (cos(x) * -0.5)));
}
function code(x, eps)
	return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * Float64(cos(x) * -0.5))))
end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(N[Cos[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot -0.5\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right) + \frac{-1}{2} \cdot \cos x\right)}\right) \]
    3. distribute-lft-inN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)}\right) \]
    4. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon} + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)\right) \]
    5. associate-+r+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)} \]
  8. Simplified99.3%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(-0.5 \cdot \cos x\right)\right)} \]
  9. Final simplification99.3%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot -0.5\right)\right) \]
  10. Add Preprocessing

Alternative 3: 99.4% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(-0.5, \varepsilon \cdot \cos x, \sin x \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   -0.5
   (* eps (cos x))
   (* (sin x) (fma eps (* eps 0.16666666666666666) -1.0)))))
double code(double x, double eps) {
	return eps * fma(-0.5, (eps * cos(x)), (sin(x) * fma(eps, (eps * 0.16666666666666666), -1.0)));
}
function code(x, eps)
	return Float64(eps * fma(-0.5, Float64(eps * cos(x)), Float64(sin(x) * fma(eps, Float64(eps * 0.16666666666666666), -1.0))))
end
code[x_, eps_] := N[(eps * N[(-0.5 * N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision] + N[(N[Sin[x], $MachinePrecision] * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(-0.5, \varepsilon \cdot \cos x, \sin x \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) - \sin x\right)} \]
    2. distribute-rgt-inN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\left(\frac{-1}{2} \cdot \cos x\right) \cdot \varepsilon + \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon\right)} - \sin x\right) \]
    3. associate--l+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\frac{-1}{2} \cdot \cos x\right) \cdot \varepsilon + \left(\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right)\right)} \]
    4. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\cos x \cdot \varepsilon\right)} + \left(\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \cos x\right)} + \left(\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right)\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right)} \]
    7. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \color{blue}{\varepsilon \cdot \cos x}, \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right) \]
    8. lower-cos.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \color{blue}{\cos x}, \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon - \sin x\right) \]
    9. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon + \left(\mathsf{neg}\left(\sin x\right)\right)}\right) \]
    10. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)} + \left(\mathsf{neg}\left(\sin x\right)\right)\right) \]
    11. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \varepsilon \cdot \color{blue}{\left(\left(\frac{1}{6} \cdot \varepsilon\right) \cdot \sin x\right)} + \left(\mathsf{neg}\left(\sin x\right)\right)\right) \]
    12. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right)\right) \cdot \sin x} + \left(\mathsf{neg}\left(\sin x\right)\right)\right) \]
    13. neg-mul-1N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right)\right) \cdot \sin x + \color{blue}{-1 \cdot \sin x}\right) \]
    14. distribute-rgt-outN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\sin x \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right) + -1\right)}\right) \]
    15. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\sin x \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right) + -1\right)}\right) \]
    16. lower-sin.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \color{blue}{\sin x} \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right) + -1\right)\right) \]
    17. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\frac{-1}{2}, \varepsilon \cdot \cos x, \sin x \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{1}{6} \cdot \varepsilon, -1\right)}\right) \]
  5. Simplified99.3%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(-0.5, \varepsilon \cdot \cos x, \sin x \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right)\right)} \]
  6. Add Preprocessing

Alternative 4: 99.3% accurate, 0.9× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (- (* -0.5 (* eps (cos x))) (sin x))))
double code(double x, double eps) {
	return eps * ((-0.5 * (eps * cos(x))) - sin(x));
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * (((-0.5d0) * (eps * cos(x))) - sin(x))
end function
public static double code(double x, double eps) {
	return eps * ((-0.5 * (eps * Math.cos(x))) - Math.sin(x));
}
def code(x, eps):
	return eps * ((-0.5 * (eps * math.cos(x))) - math.sin(x))
function code(x, eps)
	return Float64(eps * Float64(Float64(-0.5 * Float64(eps * cos(x))) - sin(x)))
end
function tmp = code(x, eps)
	tmp = eps * ((-0.5 * (eps * cos(x))) - sin(x));
end
code[x_, eps_] := N[(eps * N[(N[(-0.5 * N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
    2. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \cos x\right) \cdot \frac{-1}{2}} - \sin x\right) \]
    3. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\cos x \cdot \frac{-1}{2}\right)} - \sin x\right) \]
    4. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \cos x\right)} - \sin x\right) \]
    5. lower--.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right) - \sin x\right)} \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\cos x \cdot \frac{-1}{2}\right)} - \sin x\right) \]
    7. associate-*r*N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \cos x\right) \cdot \frac{-1}{2}} - \sin x\right) \]
    8. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\varepsilon \cdot \cos x\right)} - \sin x\right) \]
    9. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\frac{-1}{2} \cdot \left(\varepsilon \cdot \cos x\right)} - \sin x\right) \]
    10. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \color{blue}{\left(\varepsilon \cdot \cos x\right)} - \sin x\right) \]
    11. lower-cos.f64N/A

      \[\leadsto \varepsilon \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot \color{blue}{\cos x}\right) - \sin x\right) \]
    12. lower-sin.f6498.8

      \[\leadsto \varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \color{blue}{\sin x}\right) \]
  5. Simplified98.8%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)} \]
  6. Add Preprocessing

Alternative 5: 98.8% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   (sin x)
   (fma 0.16666666666666666 (* eps eps) -1.0)
   (* eps (fma eps (* eps 0.041666666666666664) -0.5)))))
double code(double x, double eps) {
	return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * fma(eps, (eps * 0.041666666666666664), -0.5)));
}
function code(x, eps)
	return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * fma(eps, Float64(eps * 0.041666666666666664), -0.5))))
end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(eps * N[(eps * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) + \frac{1}{6} \cdot \sin x\right)\right)\right)} \]
  7. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right)} \]
  8. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)}\right) \]
  9. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)}\right) \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot {\varepsilon}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)}\right) \]
    3. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\color{blue}{{\varepsilon}^{2} \cdot \frac{1}{24}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)\right) \]
    4. unpow2N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \varepsilon\right)} \cdot \frac{1}{24} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)\right) \]
    5. associate-*l*N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \frac{1}{24}\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)\right) \]
    6. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{1}{24} \cdot \varepsilon\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)\right) \]
    7. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{24} \cdot \varepsilon\right) + \color{blue}{\frac{-1}{2}}\right)\right) \]
    8. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{1}{24} \cdot \varepsilon, \frac{-1}{2}\right)}\right) \]
    9. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot \frac{1}{24}}, \frac{-1}{2}\right)\right) \]
    10. lower-*.f6498.2

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \color{blue}{\varepsilon \cdot 0.041666666666666664}, -0.5\right)\right) \]
  10. Simplified98.2%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)}\right) \]
  11. Add Preprocessing

Alternative 6: 98.8% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot -0.5\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (fma (sin x) (fma 0.16666666666666666 (* eps eps) -1.0) (* eps -0.5))))
double code(double x, double eps) {
	return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * -0.5));
}
function code(x, eps)
	return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * -0.5)))
end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot -0.5\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right) + \frac{-1}{2} \cdot \cos x\right)}\right) \]
    3. distribute-lft-inN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)}\right) \]
    4. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon} + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)\right) \]
    5. associate-+r+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)} \]
  8. Simplified99.3%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(-0.5 \cdot \cos x\right)\right)} \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\frac{-1}{2} \cdot \varepsilon}\right) \]
  10. Step-by-step derivation
    1. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot \frac{-1}{2}}\right) \]
    2. lower-*.f6498.2

      \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot -0.5}\right) \]
  11. Simplified98.2%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \color{blue}{\varepsilon \cdot -0.5}\right) \]
  12. Add Preprocessing

Alternative 7: 98.3% accurate, 2.1× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon \cdot x, \mathsf{fma}\left(-0.027777777777777776, \varepsilon \cdot \varepsilon, 0.16666666666666666\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(-0.5, \left(\varepsilon \cdot \varepsilon\right) \cdot 0.041666666666666664, 0.25\right)\right), \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma
  x
  (fma
   x
   (fma
    (* eps x)
    (fma -0.027777777777777776 (* eps eps) 0.16666666666666666)
    (* (* eps eps) (fma -0.5 (* (* eps eps) 0.041666666666666664) 0.25)))
   (* eps (fma 0.16666666666666666 (* eps eps) -1.0)))
  (* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
	return fma(x, fma(x, fma((eps * x), fma(-0.027777777777777776, (eps * eps), 0.16666666666666666), ((eps * eps) * fma(-0.5, ((eps * eps) * 0.041666666666666664), 0.25))), (eps * fma(0.16666666666666666, (eps * eps), -1.0))), ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps)
	return fma(x, fma(x, fma(Float64(eps * x), fma(-0.027777777777777776, Float64(eps * eps), 0.16666666666666666), Float64(Float64(eps * eps) * fma(-0.5, Float64(Float64(eps * eps) * 0.041666666666666664), 0.25))), Float64(eps * fma(0.16666666666666666, Float64(eps * eps), -1.0))), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5)))
end
code[x_, eps_] := N[(x * N[(x * N[(N[(eps * x), $MachinePrecision] * N[(-0.027777777777777776 * N[(eps * eps), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(-0.5 * N[(N[(eps * eps), $MachinePrecision] * 0.041666666666666664), $MachinePrecision] + 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(eps * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon \cdot x, \mathsf{fma}\left(-0.027777777777777776, \varepsilon \cdot \varepsilon, 0.16666666666666666\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(-0.5, \left(\varepsilon \cdot \varepsilon\right) \cdot 0.041666666666666664, 0.25\right)\right), \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{x \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + x \cdot \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) + \varepsilon \cdot \left(x \cdot \left(\frac{1}{6} + \frac{-1}{36} \cdot {\varepsilon}^{2}\right)\right)\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)} \]
  7. Step-by-step derivation
    1. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + x \cdot \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) + \varepsilon \cdot \left(x \cdot \left(\frac{1}{6} + \frac{-1}{36} \cdot {\varepsilon}^{2}\right)\right)\right), {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} \]
  8. Simplified97.4%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(x \cdot \varepsilon, \mathsf{fma}\left(-0.027777777777777776, \varepsilon \cdot \varepsilon, 0.16666666666666666\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(-0.5, 0.041666666666666664 \cdot \left(\varepsilon \cdot \varepsilon\right), 0.25\right)\right), \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)} \]
  9. Final simplification97.4%

    \[\leadsto \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon \cdot x, \mathsf{fma}\left(-0.027777777777777776, \varepsilon \cdot \varepsilon, 0.16666666666666666\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(-0.5, \left(\varepsilon \cdot \varepsilon\right) \cdot 0.041666666666666664, 0.25\right)\right), \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \]
  10. Add Preprocessing

Alternative 8: 98.1% accurate, 2.4× speedup?

\[\begin{array}{l} \\ \begin{array}{l} t_0 := \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\\ \varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, -0.5 \cdot t\_0, x \cdot \left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666\right)\right), -1\right)\right), \varepsilon \cdot t\_0\right) \end{array} \end{array} \]
(FPCore (x eps)
 :precision binary64
 (let* ((t_0 (fma eps (* eps 0.041666666666666664) -0.5)))
   (*
    eps
    (fma
     x
     (fma
      eps
      (* eps 0.16666666666666666)
      (fma
       x
       (fma
        eps
        (* -0.5 t_0)
        (*
         x
         (* (fma eps (* eps 0.16666666666666666) -1.0) -0.16666666666666666)))
       -1.0))
     (* eps t_0)))))
double code(double x, double eps) {
	double t_0 = fma(eps, (eps * 0.041666666666666664), -0.5);
	return eps * fma(x, fma(eps, (eps * 0.16666666666666666), fma(x, fma(eps, (-0.5 * t_0), (x * (fma(eps, (eps * 0.16666666666666666), -1.0) * -0.16666666666666666))), -1.0)), (eps * t_0));
}
function code(x, eps)
	t_0 = fma(eps, Float64(eps * 0.041666666666666664), -0.5)
	return Float64(eps * fma(x, fma(eps, Float64(eps * 0.16666666666666666), fma(x, fma(eps, Float64(-0.5 * t_0), Float64(x * Float64(fma(eps, Float64(eps * 0.16666666666666666), -1.0) * -0.16666666666666666))), -1.0)), Float64(eps * t_0)))
end
code[x_, eps_] := Block[{t$95$0 = N[(eps * N[(eps * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]}, N[(eps * N[(x * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + N[(x * N[(eps * N[(-0.5 * t$95$0), $MachinePrecision] + N[(x * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, -0.5 \cdot t\_0, x \cdot \left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666\right)\right), -1\right)\right), \varepsilon \cdot t\_0\right)
\end{array}
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) + \frac{1}{6} \cdot \sin x\right)\right)\right)} \]
  7. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right)} \]
  8. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) + x \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + x \cdot \left(\frac{-1}{2} \cdot \left(\varepsilon \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) + \frac{-1}{6} \cdot \left(x \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right)\right)\right) - 1\right)\right)} \]
  9. Simplified97.2%

    \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right) \cdot -0.5, x \cdot \left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666\right)\right), -1\right)\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right)} \]
  10. Final simplification97.2%

    \[\leadsto \varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, -0.5 \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right), x \cdot \left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666\right)\right), -1\right)\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right) \]
  11. Add Preprocessing

Alternative 9: 98.1% accurate, 3.4× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666, \varepsilon \cdot 0.25\right), -1\right)\right), \varepsilon \cdot -0.5\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (*
  eps
  (fma
   x
   (fma
    eps
    (* eps 0.16666666666666666)
    (fma
     x
     (fma
      x
      (* (fma eps (* eps 0.16666666666666666) -1.0) -0.16666666666666666)
      (* eps 0.25))
     -1.0))
   (* eps -0.5))))
double code(double x, double eps) {
	return eps * fma(x, fma(eps, (eps * 0.16666666666666666), fma(x, fma(x, (fma(eps, (eps * 0.16666666666666666), -1.0) * -0.16666666666666666), (eps * 0.25)), -1.0)), (eps * -0.5));
}
function code(x, eps)
	return Float64(eps * fma(x, fma(eps, Float64(eps * 0.16666666666666666), fma(x, fma(x, Float64(fma(eps, Float64(eps * 0.16666666666666666), -1.0) * -0.16666666666666666), Float64(eps * 0.25)), -1.0)), Float64(eps * -0.5)))
end
code[x_, eps_] := N[(eps * N[(x * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + N[(x * N[(x * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] + N[(eps * 0.25), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666, \varepsilon \cdot 0.25\right), -1\right)\right), \varepsilon \cdot -0.5\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \varepsilon \cdot \color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right) + \frac{-1}{2} \cdot \cos x\right)}\right) \]
    3. distribute-lft-inN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)}\right) \]
    4. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon} + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)\right) \]
    5. associate-+r+N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\left(\sin \left(\mathsf{neg}\left(x\right)\right) + \left(\frac{1}{6} \cdot \left(\varepsilon \cdot \sin x\right)\right) \cdot \varepsilon\right) + \varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x\right)\right)} \]
  8. Simplified99.3%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(-0.5 \cdot \cos x\right)\right)} \]
  9. Taylor expanded in x around 0

    \[\leadsto \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon + x \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + x \cdot \left(\frac{-1}{6} \cdot \left(x \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + \frac{1}{4} \cdot \varepsilon\right)\right) - 1\right)\right)} \]
  10. Step-by-step derivation
    1. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(x \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + x \cdot \left(\frac{-1}{6} \cdot \left(x \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + \frac{1}{4} \cdot \varepsilon\right)\right) - 1\right) + \frac{-1}{2} \cdot \varepsilon\right)} \]
    2. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(x, \left(\frac{1}{6} \cdot {\varepsilon}^{2} + x \cdot \left(\frac{-1}{6} \cdot \left(x \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + \frac{1}{4} \cdot \varepsilon\right)\right) - 1, \frac{-1}{2} \cdot \varepsilon\right)} \]
  11. Simplified97.2%

    \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666, \varepsilon \cdot 0.25\right), -1\right)\right), \varepsilon \cdot -0.5\right)} \]
  12. Add Preprocessing

Alternative 10: 97.8% accurate, 4.1× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\varepsilon, 0.16666666666666666, x \cdot 0.25\right), -1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma
  x
  (* eps (fma eps (fma eps 0.16666666666666666 (* x 0.25)) -1.0))
  (* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
	return fma(x, (eps * fma(eps, fma(eps, 0.16666666666666666, (x * 0.25)), -1.0)), ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps)
	return fma(x, Float64(eps * fma(eps, fma(eps, 0.16666666666666666, Float64(x * 0.25)), -1.0)), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5)))
end
code[x_, eps_] := N[(x * N[(eps * N[(eps * N[(eps * 0.16666666666666666 + N[(x * 0.25), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\varepsilon, 0.16666666666666666, x \cdot 0.25\right), -1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)} \]
  7. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    2. *-commutativeN/A

      \[\leadsto x \cdot \left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(\left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \cdot x\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    3. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) \cdot x} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    4. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right)} \cdot x + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    5. +-commutativeN/A

      \[\leadsto x \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x\right)} + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x, {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} \]
  8. Simplified97.1%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right), x \cdot -0.5, \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)} \]
  9. Taylor expanded in eps around 0

    \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon + \frac{1}{4} \cdot x\right) - 1\right)}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
  10. Step-by-step derivation
    1. distribute-lft-inN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \varepsilon\right) + \varepsilon \cdot \left(\frac{1}{4} \cdot x\right)\right)} - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    2. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\color{blue}{\left(\frac{1}{6} \cdot \varepsilon\right) \cdot \varepsilon} + \varepsilon \cdot \left(\frac{1}{4} \cdot x\right)\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    3. associate-*r*N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\color{blue}{\frac{1}{6} \cdot \left(\varepsilon \cdot \varepsilon\right)} + \varepsilon \cdot \left(\frac{1}{4} \cdot x\right)\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    4. unpow2N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\frac{1}{6} \cdot \color{blue}{{\varepsilon}^{2}} + \varepsilon \cdot \left(\frac{1}{4} \cdot x\right)\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    5. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \varepsilon \cdot \color{blue}{\left(x \cdot \frac{1}{4}\right)}\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    6. associate-*r*N/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \color{blue}{\left(\varepsilon \cdot x\right) \cdot \frac{1}{4}}\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    7. *-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \color{blue}{\frac{1}{4} \cdot \left(\varepsilon \cdot x\right)}\right) - 1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    8. lower-*.f64N/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \frac{1}{4} \cdot \left(\varepsilon \cdot x\right)\right) - 1\right)}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    9. sub-negN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \color{blue}{\left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \frac{1}{4} \cdot \left(\varepsilon \cdot x\right)\right) + \left(\mathsf{neg}\left(1\right)\right)\right)}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    10. metadata-evalN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\left(\frac{1}{6} \cdot {\varepsilon}^{2} + \frac{1}{4} \cdot \left(\varepsilon \cdot x\right)\right) + \color{blue}{-1}\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    11. +-commutativeN/A

      \[\leadsto \mathsf{fma}\left(x, \varepsilon \cdot \left(\color{blue}{\left(\frac{1}{4} \cdot \left(\varepsilon \cdot x\right) + \frac{1}{6} \cdot {\varepsilon}^{2}\right)} + -1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
  11. Simplified97.1%

    \[\leadsto \mathsf{fma}\left(x, \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\varepsilon, 0.16666666666666666, x \cdot 0.25\right), -1\right)}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \]
  12. Add Preprocessing

Alternative 11: 97.8% accurate, 6.9× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, -\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (fma x (- eps) (* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
	return fma(x, -eps, ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps)
	return fma(x, Float64(-eps), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5)))
end
code[x_, eps_] := N[(x * (-eps) + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, -\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)} \]
  7. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    2. *-commutativeN/A

      \[\leadsto x \cdot \left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(\left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \cdot x\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    3. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) \cdot x} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    4. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right)} \cdot x + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    5. +-commutativeN/A

      \[\leadsto x \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x\right)} + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x, {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} \]
  8. Simplified97.1%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right), x \cdot -0.5, \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)} \]
  9. Taylor expanded in eps around 0

    \[\leadsto \mathsf{fma}\left(x, \color{blue}{-1 \cdot \varepsilon}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
  10. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{\mathsf{neg}\left(\varepsilon\right)}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(\frac{1}{24}, \varepsilon \cdot \varepsilon, \frac{-1}{2}\right)\right) \]
    2. lower-neg.f6497.0

      \[\leadsto \mathsf{fma}\left(x, \color{blue}{-\varepsilon}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \]
  11. Simplified97.0%

    \[\leadsto \mathsf{fma}\left(x, \color{blue}{-\varepsilon}, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right) \]
  12. Add Preprocessing

Alternative 12: 97.6% accurate, 8.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.25, -0.5\right) - x\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (- (* eps (fma x (* x 0.25) -0.5)) x)))
double code(double x, double eps) {
	return eps * ((eps * fma(x, (x * 0.25), -0.5)) - x);
}
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * fma(x, Float64(x * 0.25), -0.5)) - x))
end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(x * N[(x * 0.25), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.25, -0.5\right) - x\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
  4. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) - \sin x\right)} \]
    2. sub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right)\right) + \left(\mathsf{neg}\left(\sin x\right)\right)\right)} \]
    3. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\mathsf{fma}\left(\varepsilon, \frac{-1}{2} \cdot \cos x + \varepsilon \cdot \left(\frac{1}{24} \cdot \left(\varepsilon \cdot \cos x\right) - \frac{-1}{6} \cdot \sin x\right), \mathsf{neg}\left(\sin x\right)\right)} \]
  5. Simplified99.7%

    \[\leadsto \color{blue}{\varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\cos x, -0.5 + \varepsilon \cdot \left(\varepsilon \cdot 0.041666666666666664\right), \varepsilon \cdot \left(0.16666666666666666 \cdot \sin x\right)\right), \sin \left(-x\right)\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{x \cdot \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)} \]
  7. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(x \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    2. *-commutativeN/A

      \[\leadsto x \cdot \left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \color{blue}{\left(\left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \cdot x\right)} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    3. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\left(\frac{-1}{2} \cdot {\varepsilon}^{2}\right) \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right) \cdot x} + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    4. associate-*r*N/A

      \[\leadsto x \cdot \left(\color{blue}{\left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right)} \cdot x + \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right)\right) + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    5. +-commutativeN/A

      \[\leadsto x \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x\right)} + {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \color{blue}{\mathsf{fma}\left(x, \varepsilon \cdot \left(\frac{1}{6} \cdot {\varepsilon}^{2} - 1\right) + \left(\frac{-1}{2} \cdot \left({\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)\right) \cdot x, {\varepsilon}^{2} \cdot \left(\frac{1}{24} \cdot {\varepsilon}^{2} - \frac{1}{2}\right)\right)} \]
  8. Simplified97.1%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, \mathsf{fma}\left(\left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right), x \cdot -0.5, \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)} \]
  9. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(-1 \cdot x + \varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right)\right)} \]
  10. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(-1 \cdot x + \varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right)\right)} \]
    2. +-commutativeN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right) + -1 \cdot x\right)} \]
    3. mul-1-negN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right) + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)}\right) \]
    4. unsub-negN/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right) - x\right)} \]
    5. lower--.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right) - x\right)} \]
    6. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\frac{1}{4} \cdot {x}^{2} - \frac{1}{2}\right)} - x\right) \]
    7. sub-negN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{1}{4} \cdot {x}^{2} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} - x\right) \]
    8. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(\color{blue}{{x}^{2} \cdot \frac{1}{4}} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right) - x\right) \]
    9. unpow2N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(\color{blue}{\left(x \cdot x\right)} \cdot \frac{1}{4} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right) - x\right) \]
    10. associate-*l*N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(\color{blue}{x \cdot \left(x \cdot \frac{1}{4}\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right) - x\right) \]
    11. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \color{blue}{\left(\frac{1}{4} \cdot x\right)} + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right) - x\right) \]
    12. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(x \cdot \left(\frac{1}{4} \cdot x\right) + \color{blue}{\frac{-1}{2}}\right) - x\right) \]
    13. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(x, \frac{1}{4} \cdot x, \frac{-1}{2}\right)} - x\right) \]
    14. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot \frac{1}{4}}, \frac{-1}{2}\right) - x\right) \]
    15. lower-*.f6496.9

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, \color{blue}{x \cdot 0.25}, -0.5\right) - x\right) \]
  11. Simplified96.9%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.25, -0.5\right) - x\right)} \]
  12. Add Preprocessing

Alternative 13: 97.6% accurate, 8.3× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot x, -0.5\right) - x\right) \end{array} \]
(FPCore (x eps)
 :precision binary64
 (* eps (- (* eps (fma 0.16666666666666666 (* eps x) -0.5)) x)))
double code(double x, double eps) {
	return eps * ((eps * fma(0.16666666666666666, (eps * x), -0.5)) - x);
}
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * fma(0.16666666666666666, Float64(eps * x), -0.5)) - x))
end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(0.16666666666666666 * N[(eps * x), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot x, -0.5\right) - x\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\left(\cos \varepsilon + -1 \cdot \left(x \cdot \sin \varepsilon\right)\right) - 1} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \left(\cos \varepsilon + \color{blue}{\left(\mathsf{neg}\left(x \cdot \sin \varepsilon\right)\right)}\right) - 1 \]
    2. unsub-negN/A

      \[\leadsto \color{blue}{\left(\cos \varepsilon - x \cdot \sin \varepsilon\right)} - 1 \]
    3. associate--l-N/A

      \[\leadsto \color{blue}{\cos \varepsilon - \left(x \cdot \sin \varepsilon + 1\right)} \]
    4. lower--.f64N/A

      \[\leadsto \color{blue}{\cos \varepsilon - \left(x \cdot \sin \varepsilon + 1\right)} \]
    5. lower-cos.f64N/A

      \[\leadsto \color{blue}{\cos \varepsilon} - \left(x \cdot \sin \varepsilon + 1\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \cos \varepsilon - \color{blue}{\mathsf{fma}\left(x, \sin \varepsilon, 1\right)} \]
    7. lower-sin.f6452.5

      \[\leadsto \cos \varepsilon - \mathsf{fma}\left(x, \color{blue}{\sin \varepsilon}, 1\right) \]
  5. Simplified52.5%

    \[\leadsto \color{blue}{\cos \varepsilon - \mathsf{fma}\left(x, \sin \varepsilon, 1\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) - \frac{1}{2}\right) - x\right)} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) - \frac{1}{2}\right) - x\right)} \]
    2. lower--.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) - \frac{1}{2}\right) - x\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) - \frac{1}{2}\right)} - x\right) \]
    4. sub-negN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) + \left(\mathsf{neg}\left(\frac{1}{2}\right)\right)\right)} - x\right) \]
    5. metadata-evalN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \left(\frac{1}{6} \cdot \left(\varepsilon \cdot x\right) + \color{blue}{\frac{-1}{2}}\right) - x\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \color{blue}{\mathsf{fma}\left(\frac{1}{6}, \varepsilon \cdot x, \frac{-1}{2}\right)} - x\right) \]
    7. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(\frac{1}{6}, \color{blue}{x \cdot \varepsilon}, \frac{-1}{2}\right) - x\right) \]
    8. lower-*.f6496.8

      \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \color{blue}{x \cdot \varepsilon}, -0.5\right) - x\right) \]
  8. Simplified96.8%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, x \cdot \varepsilon, -0.5\right) - x\right)} \]
  9. Final simplification96.8%

    \[\leadsto \varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot x, -0.5\right) - x\right) \]
  10. Add Preprocessing

Alternative 14: 97.6% accurate, 14.8× speedup?

\[\begin{array}{l} \\ \varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right) \end{array} \]
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
	return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
	return eps * ((eps * -0.5) - x);
}
def code(x, eps):
	return eps * ((eps * -0.5) - x)
function code(x, eps)
	return Float64(eps * Float64(Float64(eps * -0.5) - x))
end
function tmp = code(x, eps)
	tmp = eps * ((eps * -0.5) - x);
end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\left(\cos \varepsilon + -1 \cdot \left(x \cdot \sin \varepsilon\right)\right) - 1} \]
  4. Step-by-step derivation
    1. mul-1-negN/A

      \[\leadsto \left(\cos \varepsilon + \color{blue}{\left(\mathsf{neg}\left(x \cdot \sin \varepsilon\right)\right)}\right) - 1 \]
    2. unsub-negN/A

      \[\leadsto \color{blue}{\left(\cos \varepsilon - x \cdot \sin \varepsilon\right)} - 1 \]
    3. associate--l-N/A

      \[\leadsto \color{blue}{\cos \varepsilon - \left(x \cdot \sin \varepsilon + 1\right)} \]
    4. lower--.f64N/A

      \[\leadsto \color{blue}{\cos \varepsilon - \left(x \cdot \sin \varepsilon + 1\right)} \]
    5. lower-cos.f64N/A

      \[\leadsto \color{blue}{\cos \varepsilon} - \left(x \cdot \sin \varepsilon + 1\right) \]
    6. lower-fma.f64N/A

      \[\leadsto \cos \varepsilon - \color{blue}{\mathsf{fma}\left(x, \sin \varepsilon, 1\right)} \]
    7. lower-sin.f6452.5

      \[\leadsto \cos \varepsilon - \mathsf{fma}\left(x, \color{blue}{\sin \varepsilon}, 1\right) \]
  5. Simplified52.5%

    \[\leadsto \color{blue}{\cos \varepsilon - \mathsf{fma}\left(x, \sin \varepsilon, 1\right)} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot \varepsilon - x\right)} \]
  7. Step-by-step derivation
    1. lower-*.f64N/A

      \[\leadsto \color{blue}{\varepsilon \cdot \left(\frac{-1}{2} \cdot \varepsilon - x\right)} \]
    2. lower--.f64N/A

      \[\leadsto \varepsilon \cdot \color{blue}{\left(\frac{-1}{2} \cdot \varepsilon - x\right)} \]
    3. *-commutativeN/A

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot \frac{-1}{2}} - x\right) \]
    4. lower-*.f6496.8

      \[\leadsto \varepsilon \cdot \left(\color{blue}{\varepsilon \cdot -0.5} - x\right) \]
  8. Simplified96.8%

    \[\leadsto \color{blue}{\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)} \]
  9. Add Preprocessing

Alternative 15: 78.3% accurate, 25.9× speedup?

\[\begin{array}{l} \\ -\varepsilon \cdot x \end{array} \]
(FPCore (x eps) :precision binary64 (- (* eps x)))
double code(double x, double eps) {
	return -(eps * x);
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = -(eps * x)
end function
public static double code(double x, double eps) {
	return -(eps * x);
}
def code(x, eps):
	return -(eps * x)
function code(x, eps)
	return Float64(-Float64(eps * x))
end
function tmp = code(x, eps)
	tmp = -(eps * x);
end
code[x_, eps_] := (-N[(eps * x), $MachinePrecision])
\begin{array}{l}

\\
-\varepsilon \cdot x
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{-1 \cdot \left(\varepsilon \cdot \sin x\right)} \]
  4. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot \sin x} \]
    2. *-commutativeN/A

      \[\leadsto \color{blue}{\sin x \cdot \left(-1 \cdot \varepsilon\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{\sin x \cdot \left(-1 \cdot \varepsilon\right)} \]
    4. lower-sin.f64N/A

      \[\leadsto \color{blue}{\sin x} \cdot \left(-1 \cdot \varepsilon\right) \]
    5. mul-1-negN/A

      \[\leadsto \sin x \cdot \color{blue}{\left(\mathsf{neg}\left(\varepsilon\right)\right)} \]
    6. lower-neg.f6479.1

      \[\leadsto \sin x \cdot \color{blue}{\left(-\varepsilon\right)} \]
  5. Simplified79.1%

    \[\leadsto \color{blue}{\sin x \cdot \left(-\varepsilon\right)} \]
  6. Taylor expanded in x around 0

    \[\leadsto \color{blue}{-1 \cdot \left(\varepsilon \cdot x\right)} \]
  7. Step-by-step derivation
    1. associate-*r*N/A

      \[\leadsto \color{blue}{\left(-1 \cdot \varepsilon\right) \cdot x} \]
    2. *-commutativeN/A

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \varepsilon\right)} \]
    3. lower-*.f64N/A

      \[\leadsto \color{blue}{x \cdot \left(-1 \cdot \varepsilon\right)} \]
    4. mul-1-negN/A

      \[\leadsto x \cdot \color{blue}{\left(\mathsf{neg}\left(\varepsilon\right)\right)} \]
    5. lower-neg.f6478.0

      \[\leadsto x \cdot \color{blue}{\left(-\varepsilon\right)} \]
  8. Simplified78.0%

    \[\leadsto \color{blue}{x \cdot \left(-\varepsilon\right)} \]
  9. Final simplification78.0%

    \[\leadsto -\varepsilon \cdot x \]
  10. Add Preprocessing

Alternative 16: 50.0% accurate, 51.8× speedup?

\[\begin{array}{l} \\ -1 + 1 \end{array} \]
(FPCore (x eps) :precision binary64 (+ -1.0 1.0))
double code(double x, double eps) {
	return -1.0 + 1.0;
}
real(8) function code(x, eps)
    real(8), intent (in) :: x
    real(8), intent (in) :: eps
    code = (-1.0d0) + 1.0d0
end function
public static double code(double x, double eps) {
	return -1.0 + 1.0;
}
def code(x, eps):
	return -1.0 + 1.0
function code(x, eps)
	return Float64(-1.0 + 1.0)
end
function tmp = code(x, eps)
	tmp = -1.0 + 1.0;
end
code[x_, eps_] := N[(-1.0 + 1.0), $MachinePrecision]
\begin{array}{l}

\\
-1 + 1
\end{array}
Derivation
  1. Initial program 54.0%

    \[\cos \left(x + \varepsilon\right) - \cos x \]
  2. Add Preprocessing
  3. Taylor expanded in x around 0

    \[\leadsto \color{blue}{\cos \varepsilon - 1} \]
  4. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto \color{blue}{\cos \varepsilon + \left(\mathsf{neg}\left(1\right)\right)} \]
    2. metadata-evalN/A

      \[\leadsto \cos \varepsilon + \color{blue}{-1} \]
    3. lower-+.f64N/A

      \[\leadsto \color{blue}{\cos \varepsilon + -1} \]
    4. lower-cos.f6452.2

      \[\leadsto \color{blue}{\cos \varepsilon} + -1 \]
  5. Simplified52.2%

    \[\leadsto \color{blue}{\cos \varepsilon + -1} \]
  6. Taylor expanded in eps around 0

    \[\leadsto \color{blue}{1} + -1 \]
  7. Step-by-step derivation
    1. Simplified52.0%

      \[\leadsto \color{blue}{1} + -1 \]
    2. Final simplification52.0%

      \[\leadsto -1 + 1 \]
    3. Add Preprocessing

    Developer Target 1: 99.7% accurate, 0.9× speedup?

    \[\begin{array}{l} \\ \left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right) \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
    double code(double x, double eps) {
    	return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
    }
    
    real(8) function code(x, eps)
        real(8), intent (in) :: x
        real(8), intent (in) :: eps
        code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
    end function
    
    public static double code(double x, double eps) {
    	return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
    }
    
    def code(x, eps):
    	return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
    
    function code(x, eps)
    	return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0)))
    end
    
    function tmp = code(x, eps)
    	tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
    end
    
    code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
    \end{array}
    

    Developer Target 2: 98.7% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ {\left(\sqrt[3]{\left(-2 \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)}\right)}^{3} \end{array} \]
    (FPCore (x eps)
     :precision binary64
     (pow (cbrt (* (* -2.0 (sin (* 0.5 (fma 2.0 x eps)))) (sin (* 0.5 eps)))) 3.0))
    double code(double x, double eps) {
    	return pow(cbrt(((-2.0 * sin((0.5 * fma(2.0, x, eps)))) * sin((0.5 * eps)))), 3.0);
    }
    
    function code(x, eps)
    	return cbrt(Float64(Float64(-2.0 * sin(Float64(0.5 * fma(2.0, x, eps)))) * sin(Float64(0.5 * eps)))) ^ 3.0
    end
    
    code[x_, eps_] := N[Power[N[Power[N[(N[(-2.0 * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision], 3.0], $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    {\left(\sqrt[3]{\left(-2 \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)}\right)}^{3}
    \end{array}
    

    Reproduce

    ?
    herbie shell --seed 2024215 
    (FPCore (x eps)
      :name "2cos (problem 3.3.5)"
      :precision binary64
      :pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
    
      :alt
      (! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))
    
      :alt
      (! :herbie-platform default (pow (cbrt (* -2 (sin (* 1/2 (fma 2 x eps))) (sin (* 1/2 eps)))) 3))
    
      (- (cos (+ x eps)) (cos x)))