
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (* x (* 0.5 (/ eps x)))))
(*
(+ (* (sin x) (cos t_0)) (* (cos x) (sin t_0)))
(* -2.0 (sin (* 0.5 eps))))))
double code(double x, double eps) {
double t_0 = x * (0.5 * (eps / x));
return ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * (-2.0 * sin((0.5 * eps)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: t_0
t_0 = x * (0.5d0 * (eps / x))
code = ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * ((-2.0d0) * sin((0.5d0 * eps)))
end function
public static double code(double x, double eps) {
double t_0 = x * (0.5 * (eps / x));
return ((Math.sin(x) * Math.cos(t_0)) + (Math.cos(x) * Math.sin(t_0))) * (-2.0 * Math.sin((0.5 * eps)));
}
def code(x, eps): t_0 = x * (0.5 * (eps / x)) return ((math.sin(x) * math.cos(t_0)) + (math.cos(x) * math.sin(t_0))) * (-2.0 * math.sin((0.5 * eps)))
function code(x, eps) t_0 = Float64(x * Float64(0.5 * Float64(eps / x))) return Float64(Float64(Float64(sin(x) * cos(t_0)) + Float64(cos(x) * sin(t_0))) * Float64(-2.0 * sin(Float64(0.5 * eps)))) end
function tmp = code(x, eps) t_0 = x * (0.5 * (eps / x)); tmp = ((sin(x) * cos(t_0)) + (cos(x) * sin(t_0))) * (-2.0 * sin((0.5 * eps))); end
code[x_, eps_] := Block[{t$95$0 = N[(x * N[(0.5 * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Sin[x], $MachinePrecision] * N[Cos[t$95$0], $MachinePrecision]), $MachinePrecision] + N[(N[Cos[x], $MachinePrecision] * N[Sin[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(-2.0 * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(0.5 \cdot \frac{\varepsilon}{x}\right)\\
\left(\sin x \cdot \cos t\_0 + \cos x \cdot \sin t\_0\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right)
\end{array}
\end{array}
Initial program 55.2%
diff-cos82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
associate-*r*82.2%
*-commutative82.2%
*-commutative82.2%
+-commutative82.2%
count-282.2%
fma-define82.2%
*-commutative82.2%
associate-+r-82.2%
+-commutative82.2%
associate--l+99.8%
+-inverses99.8%
distribute-lft-in99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in x around inf 99.6%
associate-*r/99.6%
Simplified99.6%
distribute-rgt-in99.8%
*-un-lft-identity99.8%
sin-sum99.8%
associate-/l*99.8%
associate-/l*99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (* 0.5 eps))) (sin (* 0.5 (fma 2.0 x eps)))))
double code(double x, double eps) {
return (-2.0 * sin((0.5 * eps))) * sin((0.5 * fma(2.0, x, eps)));
}
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(0.5 * eps))) * sin(Float64(0.5 * fma(2.0, x, eps)))) end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right) \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)
\end{array}
Initial program 55.2%
diff-cos82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
associate-*r*82.2%
*-commutative82.2%
*-commutative82.2%
+-commutative82.2%
count-282.2%
fma-define82.2%
*-commutative82.2%
associate-+r-82.2%
+-commutative82.2%
associate--l+99.8%
+-inverses99.8%
distribute-lft-in99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x eps) :precision binary64 (* -2.0 (* (sin (* 0.5 eps)) (sin (* 0.5 (+ eps (+ x x)))))))
double code(double x, double eps) {
return -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps + (x + x)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-2.0d0) * (sin((0.5d0 * eps)) * sin((0.5d0 * (eps + (x + x)))))
end function
public static double code(double x, double eps) {
return -2.0 * (Math.sin((0.5 * eps)) * Math.sin((0.5 * (eps + (x + x)))));
}
def code(x, eps): return -2.0 * (math.sin((0.5 * eps)) * math.sin((0.5 * (eps + (x + x)))))
function code(x, eps) return Float64(-2.0 * Float64(sin(Float64(0.5 * eps)) * sin(Float64(0.5 * Float64(eps + Float64(x + x)))))) end
function tmp = code(x, eps) tmp = -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps + (x + x))))); end
code[x_, eps_] := N[(-2.0 * N[(N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * N[(eps + N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right)
\end{array}
Initial program 55.2%
diff-cos82.2%
*-commutative82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
Taylor expanded in x around 0 99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* eps (- (sin (* 0.5 (fma 2.0 x eps))))))
double code(double x, double eps) {
return eps * -sin((0.5 * fma(2.0, x, eps)));
}
function code(x, eps) return Float64(eps * Float64(-sin(Float64(0.5 * fma(2.0, x, eps))))) end
code[x_, eps_] := N[(eps * (-N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(-\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right)
\end{array}
Initial program 55.2%
diff-cos82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
associate-*r*82.2%
*-commutative82.2%
*-commutative82.2%
+-commutative82.2%
count-282.2%
fma-define82.2%
*-commutative82.2%
associate-+r-82.2%
+-commutative82.2%
associate--l+99.8%
+-inverses99.8%
distribute-lft-in99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in eps around 0 99.4%
mul-1-neg99.4%
Simplified99.4%
Final simplification99.4%
(FPCore (x eps) :precision binary64 (* -2.0 (* (* 0.5 eps) (sin (* 0.5 (+ eps (+ x x)))))))
double code(double x, double eps) {
return -2.0 * ((0.5 * eps) * sin((0.5 * (eps + (x + x)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-2.0d0) * ((0.5d0 * eps) * sin((0.5d0 * (eps + (x + x)))))
end function
public static double code(double x, double eps) {
return -2.0 * ((0.5 * eps) * Math.sin((0.5 * (eps + (x + x)))));
}
def code(x, eps): return -2.0 * ((0.5 * eps) * math.sin((0.5 * (eps + (x + x)))))
function code(x, eps) return Float64(-2.0 * Float64(Float64(0.5 * eps) * sin(Float64(0.5 * Float64(eps + Float64(x + x)))))) end
function tmp = code(x, eps) tmp = -2.0 * ((0.5 * eps) * sin((0.5 * (eps + (x + x))))); end
code[x_, eps_] := N[(-2.0 * N[(N[(0.5 * eps), $MachinePrecision] * N[Sin[N[(0.5 * N[(eps + N[(x + x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon + \left(x + x\right)\right)\right)\right)
\end{array}
Initial program 55.2%
diff-cos82.2%
*-commutative82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
Taylor expanded in x around 0 99.7%
Taylor expanded in eps around 0 99.4%
Final simplification99.4%
(FPCore (x eps) :precision binary64 (* (- eps) (sin (* x (+ 1.0 (/ (* 0.5 eps) x))))))
double code(double x, double eps) {
return -eps * sin((x * (1.0 + ((0.5 * eps) / x))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = -eps * sin((x * (1.0d0 + ((0.5d0 * eps) / x))))
end function
public static double code(double x, double eps) {
return -eps * Math.sin((x * (1.0 + ((0.5 * eps) / x))));
}
def code(x, eps): return -eps * math.sin((x * (1.0 + ((0.5 * eps) / x))))
function code(x, eps) return Float64(Float64(-eps) * sin(Float64(x * Float64(1.0 + Float64(Float64(0.5 * eps) / x))))) end
function tmp = code(x, eps) tmp = -eps * sin((x * (1.0 + ((0.5 * eps) / x)))); end
code[x_, eps_] := N[((-eps) * N[Sin[N[(x * N[(1.0 + N[(N[(0.5 * eps), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-\varepsilon\right) \cdot \sin \left(x \cdot \left(1 + \frac{0.5 \cdot \varepsilon}{x}\right)\right)
\end{array}
Initial program 55.2%
diff-cos82.2%
div-inv82.2%
associate--l+82.2%
metadata-eval82.2%
div-inv82.2%
+-commutative82.2%
associate-+l+82.2%
metadata-eval82.2%
Applied egg-rr82.2%
associate-*r*82.2%
*-commutative82.2%
*-commutative82.2%
+-commutative82.2%
count-282.2%
fma-define82.2%
*-commutative82.2%
associate-+r-82.2%
+-commutative82.2%
associate--l+99.8%
+-inverses99.8%
distribute-lft-in99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in x around inf 99.6%
associate-*r/99.6%
Simplified99.6%
Taylor expanded in eps around 0 99.3%
mul-1-neg99.4%
Simplified99.3%
Final simplification99.3%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) (sin x))))
double code(double x, double eps) {
return eps * ((eps * -0.5) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * -0.5) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right)
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.9%
Final simplification98.9%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ (* x (+ (* x 0.16666666666666666) (* eps 0.25))) -1.0)))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((x * ((x * 0.16666666666666666d0) + (eps * 0.25d0))) + (-1.0d0))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0)))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(Float64(x * Float64(Float64(x * 0.16666666666666666) + Float64(eps * 0.25))) + -1.0)))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * ((x * ((x * 0.16666666666666666) + (eps * 0.25))) + -1.0))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(N[(x * N[(N[(x * 0.16666666666666666), $MachinePrecision] + N[(eps * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666 + \varepsilon \cdot 0.25\right) + -1\right)\right)
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.5%
Final simplification98.5%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ (* x (* x 0.16666666666666666)) -1.0)))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((x * (x * 0.16666666666666666d0)) + (-1.0d0))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0)))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(Float64(x * Float64(x * 0.16666666666666666)) + -1.0)))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * ((x * (x * 0.16666666666666666)) + -1.0))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(x \cdot \left(x \cdot 0.16666666666666666\right) + -1\right)\right)
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.5%
Taylor expanded in x around inf 98.5%
*-commutative98.5%
Simplified98.5%
Final simplification98.5%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
def code(x, eps): return eps * ((eps * -0.5) - x)
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - x)) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - x); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 97.8%
neg-mul-197.8%
+-commutative97.8%
unsub-neg97.8%
*-commutative97.8%
Simplified97.8%
(FPCore (x eps) :precision binary64 (* x (- eps)))
double code(double x, double eps) {
return x * -eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = x * -eps
end function
public static double code(double x, double eps) {
return x * -eps;
}
def code(x, eps): return x * -eps
function code(x, eps) return Float64(x * Float64(-eps)) end
function tmp = code(x, eps) tmp = x * -eps; end
code[x_, eps_] := N[(x * (-eps)), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(-\varepsilon\right)
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 80.5%
associate-*r*80.5%
mul-1-neg80.5%
Simplified80.5%
Taylor expanded in x around 0 79.8%
associate-*r*79.8%
mul-1-neg79.8%
Simplified79.8%
Final simplification79.8%
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
return 0.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double x, double eps) {
return 0.0;
}
def code(x, eps): return 0.0
function code(x, eps) return 0.0 end
function tmp = code(x, eps) tmp = 0.0; end
code[x_, eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 55.2%
Taylor expanded in eps around 0 80.5%
associate-*r*80.5%
mul-1-neg80.5%
Simplified80.5%
Applied egg-rr53.9%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
herbie shell --seed 2024123
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))
(- (cos (+ x eps)) (cos x)))