
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps)
:precision binary64
(fma
(sin x)
(- (sin eps))
(*
(*
(fma
(fma (* eps eps) -0.001388888888888889 0.041666666666666664)
(* eps eps)
-0.5)
(cos x))
(* eps eps))))
double code(double x, double eps) {
return fma(sin(x), -sin(eps), ((fma(fma((eps * eps), -0.001388888888888889, 0.041666666666666664), (eps * eps), -0.5) * cos(x)) * (eps * eps)));
}
function code(x, eps) return fma(sin(x), Float64(-sin(eps)), Float64(Float64(fma(fma(Float64(eps * eps), -0.001388888888888889, 0.041666666666666664), Float64(eps * eps), -0.5) * cos(x)) * Float64(eps * eps))) end
code[x_, eps_] := N[(N[Sin[x], $MachinePrecision] * (-N[Sin[eps], $MachinePrecision]) + N[(N[(N[(N[(N[(eps * eps), $MachinePrecision] * -0.001388888888888889 + 0.041666666666666664), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sin x, -\sin \varepsilon, \left(\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.001388888888888889, 0.041666666666666664\right), \varepsilon \cdot \varepsilon, -0.5\right) \cdot \cos x\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)
\end{array}
Initial program 50.5%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-cos.f64N/A
lift-+.f64N/A
cos-sumN/A
associate-+r-N/A
lower--.f64N/A
neg-mul-1N/A
lower-fma.f64N/A
lift-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-sin.f6480.4
Applied rewrites80.4%
Taylor expanded in eps around 0
associate-+r+N/A
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lftN/A
+-lft-identityN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.8%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
lift-sin.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
lower-fma.f64N/A
lift-sin.f64N/A
lower-neg.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x eps) :precision binary64 (- (* (fma (* eps eps) 0.041666666666666664 -0.5) (* (* (cos x) eps) eps)) (* (sin eps) (sin x))))
double code(double x, double eps) {
return (fma((eps * eps), 0.041666666666666664, -0.5) * ((cos(x) * eps) * eps)) - (sin(eps) * sin(x));
}
function code(x, eps) return Float64(Float64(fma(Float64(eps * eps), 0.041666666666666664, -0.5) * Float64(Float64(cos(x) * eps) * eps)) - Float64(sin(eps) * sin(x))) end
code[x_, eps_] := N[(N[(N[(N[(eps * eps), $MachinePrecision] * 0.041666666666666664 + -0.5), $MachinePrecision] * N[(N[(N[Cos[x], $MachinePrecision] * eps), $MachinePrecision] * eps), $MachinePrecision]), $MachinePrecision] - N[(N[Sin[eps], $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon \cdot \varepsilon, 0.041666666666666664, -0.5\right) \cdot \left(\left(\cos x \cdot \varepsilon\right) \cdot \varepsilon\right) - \sin \varepsilon \cdot \sin x
\end{array}
Initial program 50.5%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-cos.f64N/A
lift-+.f64N/A
cos-sumN/A
associate-+r-N/A
lower--.f64N/A
neg-mul-1N/A
lower-fma.f64N/A
lift-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-sin.f6480.4
Applied rewrites80.4%
Taylor expanded in eps around 0
associate-+r+N/A
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lftN/A
+-lft-identityN/A
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
distribute-rgt-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x eps) :precision binary64 (* -2.0 (* (sin (* 0.5 eps)) (sin (* (fma 2.0 x eps) 0.5)))))
double code(double x, double eps) {
return -2.0 * (sin((0.5 * eps)) * sin((fma(2.0, x, eps) * 0.5)));
}
function code(x, eps) return Float64(-2.0 * Float64(sin(Float64(0.5 * eps)) * sin(Float64(fma(2.0, x, eps) * 0.5)))) end
code[x_, eps_] := N[(-2.0 * N[(N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(N[(2.0 * x + eps), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(\mathsf{fma}\left(2, x, \varepsilon\right) \cdot 0.5\right)\right)
\end{array}
Initial program 50.5%
lift--.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
diff-cosN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps)
:precision binary64
(*
(*
(*
(fma
(fma 0.00026041666666666666 (* eps eps) -0.020833333333333332)
(* eps eps)
0.5)
eps)
(sin (* (fma 2.0 x eps) 0.5)))
-2.0))
double code(double x, double eps) {
return ((fma(fma(0.00026041666666666666, (eps * eps), -0.020833333333333332), (eps * eps), 0.5) * eps) * sin((fma(2.0, x, eps) * 0.5))) * -2.0;
}
function code(x, eps) return Float64(Float64(Float64(fma(fma(0.00026041666666666666, Float64(eps * eps), -0.020833333333333332), Float64(eps * eps), 0.5) * eps) * sin(Float64(fma(2.0, x, eps) * 0.5))) * -2.0) end
code[x_, eps_] := N[(N[(N[(N[(N[(0.00026041666666666666 * N[(eps * eps), $MachinePrecision] + -0.020833333333333332), $MachinePrecision] * N[(eps * eps), $MachinePrecision] + 0.5), $MachinePrecision] * eps), $MachinePrecision] * N[Sin[N[(N[(2.0 * x + eps), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.00026041666666666666, \varepsilon \cdot \varepsilon, -0.020833333333333332\right), \varepsilon \cdot \varepsilon, 0.5\right) \cdot \varepsilon\right) \cdot \sin \left(\mathsf{fma}\left(2, x, \varepsilon\right) \cdot 0.5\right)\right) \cdot -2
\end{array}
Initial program 50.5%
lift--.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
diff-cosN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* (* (* (fma (* eps eps) -0.020833333333333332 0.5) eps) (sin (fma 0.5 eps x))) -2.0))
double code(double x, double eps) {
return ((fma((eps * eps), -0.020833333333333332, 0.5) * eps) * sin(fma(0.5, eps, x))) * -2.0;
}
function code(x, eps) return Float64(Float64(Float64(fma(Float64(eps * eps), -0.020833333333333332, 0.5) * eps) * sin(fma(0.5, eps, x))) * -2.0) end
code[x_, eps_] := N[(N[(N[(N[(N[(eps * eps), $MachinePrecision] * -0.020833333333333332 + 0.5), $MachinePrecision] * eps), $MachinePrecision] * N[Sin[N[(0.5 * eps + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\mathsf{fma}\left(\varepsilon \cdot \varepsilon, -0.020833333333333332, 0.5\right) \cdot \varepsilon\right) \cdot \sin \left(\mathsf{fma}\left(0.5, \varepsilon, x\right)\right)\right) \cdot -2
\end{array}
Initial program 50.5%
lift--.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
diff-cosN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.7
Applied rewrites99.7%
Taylor expanded in eps around 0
+-commutativeN/A
lower-fma.f6499.7
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* (* (* 0.5 eps) (sin (* (fma 2.0 x eps) 0.5))) -2.0))
double code(double x, double eps) {
return ((0.5 * eps) * sin((fma(2.0, x, eps) * 0.5))) * -2.0;
}
function code(x, eps) return Float64(Float64(Float64(0.5 * eps) * sin(Float64(fma(2.0, x, eps) * 0.5))) * -2.0) end
code[x_, eps_] := N[(N[(N[(0.5 * eps), $MachinePrecision] * N[Sin[N[(N[(2.0 * x + eps), $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(0.5 \cdot \varepsilon\right) \cdot \sin \left(\mathsf{fma}\left(2, x, \varepsilon\right) \cdot 0.5\right)\right) \cdot -2
\end{array}
Initial program 50.5%
lift--.f64N/A
lift-cos.f64N/A
lift-cos.f64N/A
diff-cosN/A
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.7%
Taylor expanded in eps around 0
lower-*.f6499.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (x eps) :precision binary64 (* (- (* -0.5 eps) (sin x)) eps))
double code(double x, double eps) {
return ((-0.5 * eps) - sin(x)) * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (((-0.5d0) * eps) - sin(x)) * eps
end function
public static double code(double x, double eps) {
return ((-0.5 * eps) - Math.sin(x)) * eps;
}
def code(x, eps): return ((-0.5 * eps) - math.sin(x)) * eps
function code(x, eps) return Float64(Float64(Float64(-0.5 * eps) - sin(x)) * eps) end
function tmp = code(x, eps) tmp = ((-0.5 * eps) - sin(x)) * eps; end
code[x_, eps_] := N[(N[(N[(-0.5 * eps), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(-0.5 \cdot \varepsilon - \sin x\right) \cdot \varepsilon
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.3%
(FPCore (x eps) :precision binary64 (fma (* -0.5 eps) eps (* (fma (* (fma 0.16666666666666666 x (* 0.25 eps)) eps) x (- eps)) x)))
double code(double x, double eps) {
return fma((-0.5 * eps), eps, (fma((fma(0.16666666666666666, x, (0.25 * eps)) * eps), x, -eps) * x));
}
function code(x, eps) return fma(Float64(-0.5 * eps), eps, Float64(fma(Float64(fma(0.16666666666666666, x, Float64(0.25 * eps)) * eps), x, Float64(-eps)) * x)) end
code[x_, eps_] := N[(N[(-0.5 * eps), $MachinePrecision] * eps + N[(N[(N[(N[(0.16666666666666666 * x + N[(0.25 * eps), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision] * x + (-eps)), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5 \cdot \varepsilon, \varepsilon, \mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.25 \cdot \varepsilon\right) \cdot \varepsilon, x, -\varepsilon\right) \cdot x\right)
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites98.6%
Final simplification98.6%
(FPCore (x eps) :precision binary64 (* (fma (fma (* 0.16666666666666666 x) x -1.0) x (* -0.5 eps)) eps))
double code(double x, double eps) {
return fma(fma((0.16666666666666666 * x), x, -1.0), x, (-0.5 * eps)) * eps;
}
function code(x, eps) return Float64(fma(fma(Float64(0.16666666666666666 * x), x, -1.0), x, Float64(-0.5 * eps)) * eps) end
code[x_, eps_] := N[(N[(N[(N[(0.16666666666666666 * x), $MachinePrecision] * x + -1.0), $MachinePrecision] * x + N[(-0.5 * eps), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666 \cdot x, x, -1\right), x, -0.5 \cdot \varepsilon\right) \cdot \varepsilon
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites98.6%
Taylor expanded in eps around 0
Applied rewrites98.6%
(FPCore (x eps) :precision binary64 (fma (- x) eps (* -0.5 (* eps eps))))
double code(double x, double eps) {
return fma(-x, eps, (-0.5 * (eps * eps)));
}
function code(x, eps) return fma(Float64(-x), eps, Float64(-0.5 * Float64(eps * eps))) end
code[x_, eps_] := N[((-x) * eps + N[(-0.5 * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-x, \varepsilon, -0.5 \cdot \left(\varepsilon \cdot \varepsilon\right)\right)
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.0%
Final simplification98.0%
(FPCore (x eps) :precision binary64 (* (fma -0.5 eps (- x)) eps))
double code(double x, double eps) {
return fma(-0.5, eps, -x) * eps;
}
function code(x, eps) return Float64(fma(-0.5, eps, Float64(-x)) * eps) end
code[x_, eps_] := N[(N[(-0.5 * eps + (-x)), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.5, \varepsilon, -x\right) \cdot \varepsilon
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6499.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites97.9%
(FPCore (x eps) :precision binary64 (* (- x) eps))
double code(double x, double eps) {
return -x * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = -x * eps
end function
public static double code(double x, double eps) {
return -x * eps;
}
def code(x, eps): return -x * eps
function code(x, eps) return Float64(Float64(-x) * eps) end
function tmp = code(x, eps) tmp = -x * eps; end
code[x_, eps_] := N[((-x) * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(-x\right) \cdot \varepsilon
\end{array}
Initial program 50.5%
Taylor expanded in eps around 0
associate-*r*N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-sin.f6480.0
Applied rewrites80.0%
Taylor expanded in x around 0
Applied rewrites79.0%
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
return 0.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double x, double eps) {
return 0.0;
}
def code(x, eps): return 0.0
function code(x, eps) return 0.0 end
function tmp = code(x, eps) tmp = 0.0; end
code[x_, eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 50.5%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-cos.f64N/A
lift-+.f64N/A
cos-sumN/A
associate-+r-N/A
lower--.f64N/A
neg-mul-1N/A
lower-fma.f64N/A
lift-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-cos.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-sin.f6480.4
Applied rewrites80.4%
Taylor expanded in eps around 0
distribute-rgt1-inN/A
metadata-evalN/A
mul0-lft49.7
Applied rewrites49.7%
(FPCore (x eps) :precision binary64 (pow (cbrt (* (* -2.0 (sin (* 0.5 (fma 2.0 x eps)))) (sin (* 0.5 eps)))) 3.0))
double code(double x, double eps) {
return pow(cbrt(((-2.0 * sin((0.5 * fma(2.0, x, eps)))) * sin((0.5 * eps)))), 3.0);
}
function code(x, eps) return cbrt(Float64(Float64(-2.0 * sin(Float64(0.5 * fma(2.0, x, eps)))) * sin(Float64(0.5 * eps)))) ^ 3.0 end
code[x_, eps_] := N[Power[N[Power[N[(N[(-2.0 * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision], 3.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\sqrt[3]{\left(-2 \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)}\right)}^{3}
\end{array}
herbie shell --seed 2024275
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (pow (cbrt (* -2 (sin (* 1/2 (fma 2 x eps))) (sin (* 1/2 eps)))) 3))
(- (cos (+ x eps)) (cos x)))