
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps) :precision binary64 (* eps (- (* eps (* -0.5 (cos x))) (sin x))))
double code(double x, double eps) {
return eps * ((eps * (-0.5 * cos(x))) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * ((-0.5d0) * cos(x))) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * (-0.5 * Math.cos(x))) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * (-0.5 * math.cos(x))) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * Float64(-0.5 * cos(x))) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * (-0.5 * cos(x))) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(-0.5 * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \left(-0.5 \cdot \cos x\right) - \sin x\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
(FPCore (x eps) :precision binary64 (* (* (* eps 0.5) (sin (fma eps 0.5 x))) -2.0))
double code(double x, double eps) {
return ((eps * 0.5) * sin(fma(eps, 0.5, x))) * -2.0;
}
function code(x, eps) return Float64(Float64(Float64(eps * 0.5) * sin(fma(eps, 0.5, x))) * -2.0) end
code[x_, eps_] := N[(N[(N[(eps * 0.5), $MachinePrecision] * N[Sin[N[(eps * 0.5 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\varepsilon \cdot 0.5\right) \cdot \sin \left(\mathsf{fma}\left(\varepsilon, 0.5, x\right)\right)\right) \cdot -2
\end{array}
Initial program 52.3%
lift--.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
diff-cosN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in eps around inf
metadata-evalN/A
cancel-sign-sub-invN/A
lower-*.f64N/A
lower-sin.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
cancel-sign-sub-invN/A
metadata-evalN/A
distribute-rgt-inN/A
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
*-rgt-identityN/A
lower-fma.f6499.8
Applied rewrites99.8%
Taylor expanded in eps around 0
Applied rewrites99.8%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) (sin x))))
double code(double x, double eps) {
return eps * ((eps * -0.5) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * -0.5) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites99.7%
(FPCore (x eps) :precision binary64 (fma x (fma x (* eps (* x 0.16666666666666666)) (- eps)) (* eps (* eps -0.5))))
double code(double x, double eps) {
return fma(x, fma(x, (eps * (x * 0.16666666666666666)), -eps), (eps * (eps * -0.5)));
}
function code(x, eps) return fma(x, fma(x, Float64(eps * Float64(x * 0.16666666666666666)), Float64(-eps)), Float64(eps * Float64(eps * -0.5))) end
code[x_, eps_] := N[(x * N[(x * N[(eps * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + (-eps)), $MachinePrecision] + N[(eps * N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \varepsilon \cdot \left(x \cdot 0.16666666666666666\right), -\varepsilon\right), \varepsilon \cdot \left(\varepsilon \cdot -0.5\right)\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites98.9%
Taylor expanded in eps around 0
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (fma x (* eps (fma 0.16666666666666666 (* x x) -1.0)) (* eps (* eps -0.5))))
double code(double x, double eps) {
return fma(x, (eps * fma(0.16666666666666666, (x * x), -1.0)), (eps * (eps * -0.5)));
}
function code(x, eps) return fma(x, Float64(eps * fma(0.16666666666666666, Float64(x * x), -1.0)), Float64(eps * Float64(eps * -0.5))) end
code[x_, eps_] := N[(x * N[(eps * N[(0.16666666666666666 * N[(x * x), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, x \cdot x, -1\right), \varepsilon \cdot \left(\varepsilon \cdot -0.5\right)\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites98.9%
Taylor expanded in eps around 0
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (fma x (- eps) (* eps (* eps -0.5))))
double code(double x, double eps) {
return fma(x, -eps, (eps * (eps * -0.5)));
}
function code(x, eps) return fma(x, Float64(-eps), Float64(eps * Float64(eps * -0.5))) end
code[x_, eps_] := N[(x * (-eps) + N[(eps * N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, -\varepsilon, \varepsilon \cdot \left(\varepsilon \cdot -0.5\right)\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites98.9%
Taylor expanded in x around 0
Applied rewrites98.1%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
def code(x, eps): return eps * ((eps * -0.5) - x)
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - x)) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - x); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.8
Applied rewrites99.8%
Taylor expanded in x around 0
Applied rewrites98.0%
(FPCore (x eps) :precision binary64 (- (* x eps)))
double code(double x, double eps) {
return -(x * eps);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = -(x * eps)
end function
public static double code(double x, double eps) {
return -(x * eps);
}
def code(x, eps): return -(x * eps)
function code(x, eps) return Float64(-Float64(x * eps)) end
function tmp = code(x, eps) tmp = -(x * eps); end
code[x_, eps_] := (-N[(x * eps), $MachinePrecision])
\begin{array}{l}
\\
-x \cdot \varepsilon
\end{array}
Initial program 52.3%
Taylor expanded in eps around 0
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
mul-1-negN/A
lower-neg.f6479.1
Applied rewrites79.1%
Taylor expanded in x around 0
Applied rewrites77.7%
Final simplification77.7%
(FPCore (x eps) :precision binary64 (+ -1.0 1.0))
double code(double x, double eps) {
return -1.0 + 1.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-1.0d0) + 1.0d0
end function
public static double code(double x, double eps) {
return -1.0 + 1.0;
}
def code(x, eps): return -1.0 + 1.0
function code(x, eps) return Float64(-1.0 + 1.0) end
function tmp = code(x, eps) tmp = -1.0 + 1.0; end
code[x_, eps_] := N[(-1.0 + 1.0), $MachinePrecision]
\begin{array}{l}
\\
-1 + 1
\end{array}
Initial program 52.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-+.f64N/A
lower-cos.f6451.9
Applied rewrites51.9%
Taylor expanded in eps around 0
Applied rewrites51.9%
Final simplification51.9%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
(FPCore (x eps) :precision binary64 (pow (cbrt (* (* -2.0 (sin (* 0.5 (fma 2.0 x eps)))) (sin (* 0.5 eps)))) 3.0))
double code(double x, double eps) {
return pow(cbrt(((-2.0 * sin((0.5 * fma(2.0, x, eps)))) * sin((0.5 * eps)))), 3.0);
}
function code(x, eps) return cbrt(Float64(Float64(-2.0 * sin(Float64(0.5 * fma(2.0, x, eps)))) * sin(Float64(0.5 * eps)))) ^ 3.0 end
code[x_, eps_] := N[Power[N[Power[N[(N[(-2.0 * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision], 3.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\sqrt[3]{\left(-2 \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)}\right)}^{3}
\end{array}
herbie shell --seed 2024225
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))
:alt
(! :herbie-platform default (pow (cbrt (* -2 (sin (* 1/2 (fma 2 x eps))) (sin (* 1/2 eps)))) 3))
(- (cos (+ x eps)) (cos x)))