
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps) :precision binary64 (* (sin (* 0.5 (fma 2.0 x eps))) (* -2.0 (sin (* 0.5 eps)))))
double code(double x, double eps) {
return sin((0.5 * fma(2.0, x, eps))) * (-2.0 * sin((0.5 * eps)));
}
function code(x, eps) return Float64(sin(Float64(0.5 * fma(2.0, x, eps))) * Float64(-2.0 * sin(Float64(0.5 * eps)))) end
code[x_, eps_] := N[(N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(-2.0 * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right) \cdot \left(-2 \cdot \sin \left(0.5 \cdot \varepsilon\right)\right)
\end{array}
Initial program 54.2%
diff-cos84.1%
div-inv84.1%
associate--l+84.1%
metadata-eval84.1%
div-inv84.1%
+-commutative84.1%
associate-+l+84.1%
metadata-eval84.1%
Applied egg-rr84.1%
associate-*r*84.1%
*-commutative84.1%
*-commutative84.1%
+-commutative84.1%
count-284.1%
fma-define84.1%
associate-+r-84.1%
+-commutative84.1%
associate--l+99.7%
+-inverses99.7%
+-commutative99.7%
*-lft-identity99.7%
metadata-eval99.7%
cancel-sign-sub-inv99.7%
neg-sub099.7%
mul-1-neg99.7%
remove-double-neg99.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* -2.0 (* (sin (* 0.5 eps)) (sin (* 0.5 (- eps (* x -2.0)))))))
double code(double x, double eps) {
return -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps - (x * -2.0)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-2.0d0) * (sin((0.5d0 * eps)) * sin((0.5d0 * (eps - (x * (-2.0d0))))))
end function
public static double code(double x, double eps) {
return -2.0 * (Math.sin((0.5 * eps)) * Math.sin((0.5 * (eps - (x * -2.0)))));
}
def code(x, eps): return -2.0 * (math.sin((0.5 * eps)) * math.sin((0.5 * (eps - (x * -2.0)))))
function code(x, eps) return Float64(-2.0 * Float64(sin(Float64(0.5 * eps)) * sin(Float64(0.5 * Float64(eps - Float64(x * -2.0)))))) end
function tmp = code(x, eps) tmp = -2.0 * (sin((0.5 * eps)) * sin((0.5 * (eps - (x * -2.0))))); end
code[x_, eps_] := N[(-2.0 * N[(N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision] * N[Sin[N[(0.5 * N[(eps - N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\sin \left(0.5 \cdot \varepsilon\right) \cdot \sin \left(0.5 \cdot \left(\varepsilon - x \cdot -2\right)\right)\right)
\end{array}
Initial program 54.2%
diff-cos84.1%
*-commutative84.1%
div-inv84.1%
associate--l+84.1%
metadata-eval84.1%
div-inv84.1%
+-commutative84.1%
associate-+l+84.1%
metadata-eval84.1%
Applied egg-rr84.1%
Taylor expanded in x around -inf 99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* eps (- (* (* eps -0.5) (cos x)) (sin x))))
double code(double x, double eps) {
return eps * (((eps * -0.5) * cos(x)) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (((eps * (-0.5d0)) * cos(x)) - sin(x))
end function
public static double code(double x, double eps) {
return eps * (((eps * -0.5) * Math.cos(x)) - Math.sin(x));
}
def code(x, eps): return eps * (((eps * -0.5) * math.cos(x)) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(Float64(eps * -0.5) * cos(x)) - sin(x))) end
function tmp = code(x, eps) tmp = eps * (((eps * -0.5) * cos(x)) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(N[(eps * -0.5), $MachinePrecision] * N[Cos[x], $MachinePrecision]), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\left(\varepsilon \cdot -0.5\right) \cdot \cos x - \sin x\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Final simplification99.4%
(FPCore (x eps) :precision binary64 (* eps (- (sin (* 0.5 (- eps (* x -2.0)))))))
double code(double x, double eps) {
return eps * -sin((0.5 * (eps - (x * -2.0))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * -sin((0.5d0 * (eps - (x * (-2.0d0)))))
end function
public static double code(double x, double eps) {
return eps * -Math.sin((0.5 * (eps - (x * -2.0))));
}
def code(x, eps): return eps * -math.sin((0.5 * (eps - (x * -2.0))))
function code(x, eps) return Float64(eps * Float64(-sin(Float64(0.5 * Float64(eps - Float64(x * -2.0)))))) end
function tmp = code(x, eps) tmp = eps * -sin((0.5 * (eps - (x * -2.0)))); end
code[x_, eps_] := N[(eps * (-N[Sin[N[(0.5 * N[(eps - N[(x * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision])), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(-\sin \left(0.5 \cdot \left(\varepsilon - x \cdot -2\right)\right)\right)
\end{array}
Initial program 54.2%
diff-cos84.1%
div-inv84.1%
associate--l+84.1%
metadata-eval84.1%
div-inv84.1%
+-commutative84.1%
associate-+l+84.1%
metadata-eval84.1%
Applied egg-rr84.1%
associate-*r*84.1%
*-commutative84.1%
*-commutative84.1%
+-commutative84.1%
count-284.1%
fma-define84.1%
associate-+r-84.1%
+-commutative84.1%
associate--l+99.7%
+-inverses99.7%
+-commutative99.7%
*-lft-identity99.7%
metadata-eval99.7%
cancel-sign-sub-inv99.7%
neg-sub099.7%
mul-1-neg99.7%
remove-double-neg99.7%
Simplified99.7%
Taylor expanded in eps around 0 99.4%
Taylor expanded in x around -inf 99.4%
Final simplification99.4%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) (sin x))))
double code(double x, double eps) {
return eps * ((eps * -0.5) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * -0.5) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 99.1%
Simplified99.1%
Final simplification99.1%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ -1.0 (* x (+ (* x 0.16666666666666666) (* eps 0.25))))))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((x * 0.16666666666666666) + (eps * 0.25))))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((-1.0d0) + (x * ((x * 0.16666666666666666d0) + (eps * 0.25d0))))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((x * 0.16666666666666666) + (eps * 0.25))))));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((x * 0.16666666666666666) + (eps * 0.25))))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(-1.0 + Float64(x * Float64(Float64(x * 0.16666666666666666) + Float64(eps * 0.25))))))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * (-1.0 + (x * ((x * 0.16666666666666666) + (eps * 0.25)))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(-1.0 + N[(x * N[(N[(x * 0.16666666666666666), $MachinePrecision] + N[(eps * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(-1 + x \cdot \left(x \cdot 0.16666666666666666 + \varepsilon \cdot 0.25\right)\right)\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.5%
Final simplification98.5%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ -1.0 (* 0.25 (* x eps)))))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (0.25 * (x * eps)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((-1.0d0) + (0.25d0 * (x * eps)))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (0.25 * (x * eps)))));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * (-1.0 + (0.25 * (x * eps)))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(-1.0 + Float64(0.25 * Float64(x * eps)))))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * (-1.0 + (0.25 * (x * eps))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(-1.0 + N[(0.25 * N[(x * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(-1 + 0.25 \cdot \left(x \cdot \varepsilon\right)\right)\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.2%
Final simplification98.2%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ -1.0 (* x (* x 0.16666666666666666)))))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((-1.0d0) + (x * (x * 0.16666666666666666d0)))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(-1.0 + Float64(x * Float64(x * 0.16666666666666666)))))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(-1.0 + N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(-1 + x \cdot \left(x \cdot 0.16666666666666666\right)\right)\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.5%
Taylor expanded in x around inf 98.5%
*-commutative98.5%
Simplified98.5%
Final simplification98.5%
(FPCore (x eps) :precision binary64 (* x (- (* -0.5 (* eps (/ eps x))) eps)))
double code(double x, double eps) {
return x * ((-0.5 * (eps * (eps / x))) - eps);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = x * (((-0.5d0) * (eps * (eps / x))) - eps)
end function
public static double code(double x, double eps) {
return x * ((-0.5 * (eps * (eps / x))) - eps);
}
def code(x, eps): return x * ((-0.5 * (eps * (eps / x))) - eps)
function code(x, eps) return Float64(x * Float64(Float64(-0.5 * Float64(eps * Float64(eps / x))) - eps)) end
function tmp = code(x, eps) tmp = x * ((-0.5 * (eps * (eps / x))) - eps); end
code[x_, eps_] := N[(x * N[(N[(-0.5 * N[(eps * N[(eps / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \frac{\varepsilon}{x}\right) - \varepsilon\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.1%
Taylor expanded in x around inf 98.2%
+-commutative98.2%
neg-mul-198.2%
unsub-neg98.2%
Simplified98.2%
unpow298.2%
*-un-lft-identity98.2%
times-frac98.2%
Applied egg-rr98.2%
Final simplification98.2%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
def code(x, eps): return eps * ((eps * -0.5) - x)
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - x)) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - x); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.2%
Taylor expanded in x around 0 98.1%
mul-1-neg98.1%
+-commutative98.1%
sub-neg98.1%
*-commutative98.1%
unpow298.1%
associate-*r*98.1%
distribute-lft-out--98.1%
Simplified98.1%
Final simplification98.1%
(FPCore (x eps) :precision binary64 (* x (- eps)))
double code(double x, double eps) {
return x * -eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = x * -eps
end function
public static double code(double x, double eps) {
return x * -eps;
}
def code(x, eps): return x * -eps
function code(x, eps) return Float64(x * Float64(-eps)) end
function tmp = code(x, eps) tmp = x * -eps; end
code[x_, eps_] := N[(x * (-eps)), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(-\varepsilon\right)
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 99.4%
associate-*r*99.4%
Simplified99.4%
Taylor expanded in x around 0 98.2%
Taylor expanded in eps around 0 77.0%
mul-1-neg77.0%
distribute-rgt-neg-in77.0%
Simplified77.0%
Final simplification77.0%
(FPCore (x eps) :precision binary64 (* x eps))
double code(double x, double eps) {
return x * eps;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = x * eps
end function
public static double code(double x, double eps) {
return x * eps;
}
def code(x, eps): return x * eps
function code(x, eps) return Float64(x * eps) end
function tmp = code(x, eps) tmp = x * eps; end
code[x_, eps_] := N[(x * eps), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \varepsilon
\end{array}
Initial program 54.2%
Taylor expanded in eps around 0 77.7%
mul-1-neg77.7%
*-commutative77.7%
distribute-rgt-neg-in77.7%
Simplified77.7%
Taylor expanded in x around 0 77.0%
Simplified52.4%
Final simplification52.4%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
herbie shell --seed 2024100
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0)))
(- (cos (+ x eps)) (cos x)))