
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps) :precision binary64 (* eps (fma (sin x) (fma 0.16666666666666666 (* eps eps) -1.0) (* eps (* (cos x) (fma 0.041666666666666664 (* eps eps) -0.5))))))
double code(double x, double eps) {
return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * (cos(x) * fma(0.041666666666666664, (eps * eps), -0.5))));
}
function code(x, eps) return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * Float64(cos(x) * fma(0.041666666666666664, Float64(eps * eps), -0.5))))) end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(N[Cos[x], $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
Simplified99.7%
(FPCore (x eps) :precision binary64 (* eps (fma (sin x) (fma 0.16666666666666666 (* eps eps) -1.0) (* eps (* (cos x) -0.5)))))
double code(double x, double eps) {
return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * (cos(x) * -0.5)));
}
function code(x, eps) return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * Float64(cos(x) * -0.5)))) end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(N[Cos[x], $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \left(\cos x \cdot -0.5\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
Simplified99.3%
Final simplification99.3%
(FPCore (x eps) :precision binary64 (* eps (fma -0.5 (* eps (cos x)) (* (sin x) (fma eps (* eps 0.16666666666666666) -1.0)))))
double code(double x, double eps) {
return eps * fma(-0.5, (eps * cos(x)), (sin(x) * fma(eps, (eps * 0.16666666666666666), -1.0)));
}
function code(x, eps) return Float64(eps * fma(-0.5, Float64(eps * cos(x)), Float64(sin(x) * fma(eps, Float64(eps * 0.16666666666666666), -1.0)))) end
code[x_, eps_] := N[(eps * N[(-0.5 * N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision] + N[(N[Sin[x], $MachinePrecision] * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(-0.5, \varepsilon \cdot \cos x, \sin x \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
distribute-rgt-inN/A
associate--l+N/A
associate-*r*N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
sub-negN/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
neg-mul-1N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-sin.f64N/A
lower-fma.f64N/A
Simplified99.3%
(FPCore (x eps) :precision binary64 (* eps (- (* -0.5 (* eps (cos x))) (sin x))))
double code(double x, double eps) {
return eps * ((-0.5 * (eps * cos(x))) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * (((-0.5d0) * (eps * cos(x))) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((-0.5 * (eps * Math.cos(x))) - Math.sin(x));
}
def code(x, eps): return eps * ((-0.5 * (eps * math.cos(x))) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(-0.5 * Float64(eps * cos(x))) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((-0.5 * (eps * cos(x))) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(-0.5 * N[(eps * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(-0.5 \cdot \left(\varepsilon \cdot \cos x\right) - \sin x\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower--.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f64N/A
lower-cos.f64N/A
lower-sin.f6498.8
Simplified98.8%
(FPCore (x eps) :precision binary64 (* eps (fma (sin x) (fma 0.16666666666666666 (* eps eps) -1.0) (* eps (fma eps (* eps 0.041666666666666664) -0.5)))))
double code(double x, double eps) {
return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * fma(eps, (eps * 0.041666666666666664), -0.5)));
}
function code(x, eps) return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * fma(eps, Float64(eps * 0.041666666666666664), -0.5)))) end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * N[(eps * N[(eps * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
Simplified99.7%
Taylor expanded in x around 0
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6498.2
Simplified98.2%
(FPCore (x eps) :precision binary64 (* eps (fma (sin x) (fma 0.16666666666666666 (* eps eps) -1.0) (* eps -0.5))))
double code(double x, double eps) {
return eps * fma(sin(x), fma(0.16666666666666666, (eps * eps), -1.0), (eps * -0.5));
}
function code(x, eps) return Float64(eps * fma(sin(x), fma(0.16666666666666666, Float64(eps * eps), -1.0), Float64(eps * -0.5))) end
code[x_, eps_] := N[(eps * N[(N[Sin[x], $MachinePrecision] * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(\sin x, \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right), \varepsilon \cdot -0.5\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
Simplified99.3%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f6498.2
Simplified98.2%
(FPCore (x eps)
:precision binary64
(fma
x
(fma
x
(fma
(* eps x)
(fma -0.027777777777777776 (* eps eps) 0.16666666666666666)
(* (* eps eps) (fma -0.5 (* (* eps eps) 0.041666666666666664) 0.25)))
(* eps (fma 0.16666666666666666 (* eps eps) -1.0)))
(* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
return fma(x, fma(x, fma((eps * x), fma(-0.027777777777777776, (eps * eps), 0.16666666666666666), ((eps * eps) * fma(-0.5, ((eps * eps) * 0.041666666666666664), 0.25))), (eps * fma(0.16666666666666666, (eps * eps), -1.0))), ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps) return fma(x, fma(x, fma(Float64(eps * x), fma(-0.027777777777777776, Float64(eps * eps), 0.16666666666666666), Float64(Float64(eps * eps) * fma(-0.5, Float64(Float64(eps * eps) * 0.041666666666666664), 0.25))), Float64(eps * fma(0.16666666666666666, Float64(eps * eps), -1.0))), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5))) end
code[x_, eps_] := N[(x * N[(x * N[(N[(eps * x), $MachinePrecision] * N[(-0.027777777777777776 * N[(eps * eps), $MachinePrecision] + 0.16666666666666666), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(-0.5 * N[(N[(eps * eps), $MachinePrecision] * 0.041666666666666664), $MachinePrecision] + 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(eps * N[(0.16666666666666666 * N[(eps * eps), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon \cdot x, \mathsf{fma}\left(-0.027777777777777776, \varepsilon \cdot \varepsilon, 0.16666666666666666\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(-0.5, \left(\varepsilon \cdot \varepsilon\right) \cdot 0.041666666666666664, 0.25\right)\right), \varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot \varepsilon, -1\right)\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in x around 0
lower-fma.f64N/A
Simplified97.4%
Final simplification97.4%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (fma eps (* eps 0.041666666666666664) -0.5)))
(*
eps
(fma
x
(fma
eps
(* eps 0.16666666666666666)
(fma
x
(fma
eps
(* -0.5 t_0)
(*
x
(* (fma eps (* eps 0.16666666666666666) -1.0) -0.16666666666666666)))
-1.0))
(* eps t_0)))))
double code(double x, double eps) {
double t_0 = fma(eps, (eps * 0.041666666666666664), -0.5);
return eps * fma(x, fma(eps, (eps * 0.16666666666666666), fma(x, fma(eps, (-0.5 * t_0), (x * (fma(eps, (eps * 0.16666666666666666), -1.0) * -0.16666666666666666))), -1.0)), (eps * t_0));
}
function code(x, eps) t_0 = fma(eps, Float64(eps * 0.041666666666666664), -0.5) return Float64(eps * fma(x, fma(eps, Float64(eps * 0.16666666666666666), fma(x, fma(eps, Float64(-0.5 * t_0), Float64(x * Float64(fma(eps, Float64(eps * 0.16666666666666666), -1.0) * -0.16666666666666666))), -1.0)), Float64(eps * t_0))) end
code[x_, eps_] := Block[{t$95$0 = N[(eps * N[(eps * 0.041666666666666664), $MachinePrecision] + -0.5), $MachinePrecision]}, N[(eps * N[(x * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + N[(x * N[(eps * N[(-0.5 * t$95$0), $MachinePrecision] + N[(x * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.041666666666666664, -0.5\right)\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, -0.5 \cdot t\_0, x \cdot \left(\mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666\right)\right), -1\right)\right), \varepsilon \cdot t\_0\right)
\end{array}
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
Simplified99.7%
Taylor expanded in x around 0
Simplified97.2%
Final simplification97.2%
(FPCore (x eps)
:precision binary64
(*
eps
(fma
x
(fma
eps
(* eps 0.16666666666666666)
(fma
x
(fma
x
(* (fma eps (* eps 0.16666666666666666) -1.0) -0.16666666666666666)
(* eps 0.25))
-1.0))
(* eps -0.5))))
double code(double x, double eps) {
return eps * fma(x, fma(eps, (eps * 0.16666666666666666), fma(x, fma(x, (fma(eps, (eps * 0.16666666666666666), -1.0) * -0.16666666666666666), (eps * 0.25)), -1.0)), (eps * -0.5));
}
function code(x, eps) return Float64(eps * fma(x, fma(eps, Float64(eps * 0.16666666666666666), fma(x, fma(x, Float64(fma(eps, Float64(eps * 0.16666666666666666), -1.0) * -0.16666666666666666), Float64(eps * 0.25)), -1.0)), Float64(eps * -0.5))) end
code[x_, eps_] := N[(eps * N[(x * N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + N[(x * N[(x * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision] + -1.0), $MachinePrecision] * -0.16666666666666666), $MachinePrecision] + N[(eps * 0.25), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, \mathsf{fma}\left(x, \mathsf{fma}\left(x, \mathsf{fma}\left(\varepsilon, \varepsilon \cdot 0.16666666666666666, -1\right) \cdot -0.16666666666666666, \varepsilon \cdot 0.25\right), -1\right)\right), \varepsilon \cdot -0.5\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
Simplified99.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Simplified97.2%
(FPCore (x eps) :precision binary64 (fma x (* eps (fma eps (fma eps 0.16666666666666666 (* x 0.25)) -1.0)) (* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
return fma(x, (eps * fma(eps, fma(eps, 0.16666666666666666, (x * 0.25)), -1.0)), ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps) return fma(x, Float64(eps * fma(eps, fma(eps, 0.16666666666666666, Float64(x * 0.25)), -1.0)), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5))) end
code[x_, eps_] := N[(x * N[(eps * N[(eps * N[(eps * 0.16666666666666666 + N[(x * 0.25), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \varepsilon \cdot \mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\varepsilon, 0.16666666666666666, x \cdot 0.25\right), -1\right), \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
+-commutativeN/A
lower-fma.f64N/A
Simplified97.1%
Taylor expanded in eps around 0
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
Simplified97.1%
(FPCore (x eps) :precision binary64 (fma x (- eps) (* (* eps eps) (fma 0.041666666666666664 (* eps eps) -0.5))))
double code(double x, double eps) {
return fma(x, -eps, ((eps * eps) * fma(0.041666666666666664, (eps * eps), -0.5)));
}
function code(x, eps) return fma(x, Float64(-eps), Float64(Float64(eps * eps) * fma(0.041666666666666664, Float64(eps * eps), -0.5))) end
code[x_, eps_] := N[(x * (-eps) + N[(N[(eps * eps), $MachinePrecision] * N[(0.041666666666666664 * N[(eps * eps), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, -\varepsilon, \left(\varepsilon \cdot \varepsilon\right) \cdot \mathsf{fma}\left(0.041666666666666664, \varepsilon \cdot \varepsilon, -0.5\right)\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
+-commutativeN/A
lower-fma.f64N/A
Simplified97.1%
Taylor expanded in eps around 0
mul-1-negN/A
lower-neg.f6497.0
Simplified97.0%
(FPCore (x eps) :precision binary64 (* eps (- (* eps (fma x (* x 0.25) -0.5)) x)))
double code(double x, double eps) {
return eps * ((eps * fma(x, (x * 0.25), -0.5)) - x);
}
function code(x, eps) return Float64(eps * Float64(Float64(eps * fma(x, Float64(x * 0.25), -0.5)) - x)) end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(x * N[(x * 0.25), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(x, x \cdot 0.25, -0.5\right) - x\right)
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
lower-*.f64N/A
sub-negN/A
lower-fma.f64N/A
Simplified99.7%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
+-commutativeN/A
lower-fma.f64N/A
Simplified97.1%
Taylor expanded in eps around 0
lower-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
lower--.f64N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6496.9
Simplified96.9%
(FPCore (x eps) :precision binary64 (* eps (- (* eps (fma 0.16666666666666666 (* eps x) -0.5)) x)))
double code(double x, double eps) {
return eps * ((eps * fma(0.16666666666666666, (eps * x), -0.5)) - x);
}
function code(x, eps) return Float64(eps * Float64(Float64(eps * fma(0.16666666666666666, Float64(eps * x), -0.5)) - x)) end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(0.16666666666666666 * N[(eps * x), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \mathsf{fma}\left(0.16666666666666666, \varepsilon \cdot x, -0.5\right) - x\right)
\end{array}
Initial program 54.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
lower-cos.f64N/A
lower-fma.f64N/A
lower-sin.f6452.5
Simplified52.5%
Taylor expanded in eps around 0
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6496.8
Simplified96.8%
Final simplification96.8%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
def code(x, eps): return eps * ((eps * -0.5) - x)
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - x)) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - x); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Initial program 54.0%
Taylor expanded in x around 0
mul-1-negN/A
unsub-negN/A
associate--l-N/A
lower--.f64N/A
lower-cos.f64N/A
lower-fma.f64N/A
lower-sin.f6452.5
Simplified52.5%
Taylor expanded in eps around 0
lower-*.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f6496.8
Simplified96.8%
(FPCore (x eps) :precision binary64 (- (* eps x)))
double code(double x, double eps) {
return -(eps * x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = -(eps * x)
end function
public static double code(double x, double eps) {
return -(eps * x);
}
def code(x, eps): return -(eps * x)
function code(x, eps) return Float64(-Float64(eps * x)) end
function tmp = code(x, eps) tmp = -(eps * x); end
code[x_, eps_] := (-N[(eps * x), $MachinePrecision])
\begin{array}{l}
\\
-\varepsilon \cdot x
\end{array}
Initial program 54.0%
Taylor expanded in eps around 0
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-sin.f64N/A
mul-1-negN/A
lower-neg.f6479.1
Simplified79.1%
Taylor expanded in x around 0
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6478.0
Simplified78.0%
Final simplification78.0%
(FPCore (x eps) :precision binary64 (+ -1.0 1.0))
double code(double x, double eps) {
return -1.0 + 1.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-1.0d0) + 1.0d0
end function
public static double code(double x, double eps) {
return -1.0 + 1.0;
}
def code(x, eps): return -1.0 + 1.0
function code(x, eps) return Float64(-1.0 + 1.0) end
function tmp = code(x, eps) tmp = -1.0 + 1.0; end
code[x_, eps_] := N[(-1.0 + 1.0), $MachinePrecision]
\begin{array}{l}
\\
-1 + 1
\end{array}
Initial program 54.0%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
lower-+.f64N/A
lower-cos.f6452.2
Simplified52.2%
Taylor expanded in eps around 0
Simplified52.0%
Final simplification52.0%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
(FPCore (x eps) :precision binary64 (pow (cbrt (* (* -2.0 (sin (* 0.5 (fma 2.0 x eps)))) (sin (* 0.5 eps)))) 3.0))
double code(double x, double eps) {
return pow(cbrt(((-2.0 * sin((0.5 * fma(2.0, x, eps)))) * sin((0.5 * eps)))), 3.0);
}
function code(x, eps) return cbrt(Float64(Float64(-2.0 * sin(Float64(0.5 * fma(2.0, x, eps)))) * sin(Float64(0.5 * eps)))) ^ 3.0 end
code[x_, eps_] := N[Power[N[Power[N[(N[(-2.0 * N[Sin[N[(0.5 * N[(2.0 * x + eps), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(0.5 * eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision], 3.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\sqrt[3]{\left(-2 \cdot \sin \left(0.5 \cdot \mathsf{fma}\left(2, x, \varepsilon\right)\right)\right) \cdot \sin \left(0.5 \cdot \varepsilon\right)}\right)}^{3}
\end{array}
herbie shell --seed 2024215
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))
:alt
(! :herbie-platform default (pow (cbrt (* -2 (sin (* 1/2 (fma 2 x eps))) (sin (* 1/2 eps)))) 3))
(- (cos (+ x eps)) (cos x)))