
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (cos (+ x eps)) (cos x)))
double code(double x, double eps) {
return cos((x + eps)) - cos(x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = cos((x + eps)) - cos(x)
end function
public static double code(double x, double eps) {
return Math.cos((x + eps)) - Math.cos(x);
}
def code(x, eps): return math.cos((x + eps)) - math.cos(x)
function code(x, eps) return Float64(cos(Float64(x + eps)) - cos(x)) end
function tmp = code(x, eps) tmp = cos((x + eps)) - cos(x); end
code[x_, eps_] := N[(N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Cos[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\cos \left(x + \varepsilon\right) - \cos x
\end{array}
(FPCore (x eps) :precision binary64 (fma (* (sin x) (+ (* eps (* eps 0.16666666666666666)) -1.0)) eps (* (* -0.5 (cos x)) (* eps eps))))
double code(double x, double eps) {
return fma((sin(x) * ((eps * (eps * 0.16666666666666666)) + -1.0)), eps, ((-0.5 * cos(x)) * (eps * eps)));
}
function code(x, eps) return fma(Float64(sin(x) * Float64(Float64(eps * Float64(eps * 0.16666666666666666)) + -1.0)), eps, Float64(Float64(-0.5 * cos(x)) * Float64(eps * eps))) end
code[x_, eps_] := N[(N[(N[Sin[x], $MachinePrecision] * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] * eps + N[(N[(-0.5 * N[Cos[x], $MachinePrecision]), $MachinePrecision] * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\sin x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.16666666666666666\right) + -1\right), \varepsilon, \left(-0.5 \cdot \cos x\right) \cdot \left(\varepsilon \cdot \varepsilon\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
neg-mul-1N/A
Simplified99.7%
+-commutativeN/A
distribute-rgt-inN/A
fma-defineN/A
fma-lowering-fma.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f6499.9%
Applied egg-rr99.9%
(FPCore (x eps) :precision binary64 (* eps (+ (* (sin x) (+ (* eps (* eps 0.16666666666666666)) -1.0)) (* eps (* -0.5 (cos x))))))
double code(double x, double eps) {
return eps * ((sin(x) * ((eps * (eps * 0.16666666666666666)) + -1.0)) + (eps * (-0.5 * cos(x))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((sin(x) * ((eps * (eps * 0.16666666666666666d0)) + (-1.0d0))) + (eps * ((-0.5d0) * cos(x))))
end function
public static double code(double x, double eps) {
return eps * ((Math.sin(x) * ((eps * (eps * 0.16666666666666666)) + -1.0)) + (eps * (-0.5 * Math.cos(x))));
}
def code(x, eps): return eps * ((math.sin(x) * ((eps * (eps * 0.16666666666666666)) + -1.0)) + (eps * (-0.5 * math.cos(x))))
function code(x, eps) return Float64(eps * Float64(Float64(sin(x) * Float64(Float64(eps * Float64(eps * 0.16666666666666666)) + -1.0)) + Float64(eps * Float64(-0.5 * cos(x))))) end
function tmp = code(x, eps) tmp = eps * ((sin(x) * ((eps * (eps * 0.16666666666666666)) + -1.0)) + (eps * (-0.5 * cos(x)))); end
code[x_, eps_] := N[(eps * N[(N[(N[Sin[x], $MachinePrecision] * N[(N[(eps * N[(eps * 0.16666666666666666), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision] + N[(eps * N[(-0.5 * N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\sin x \cdot \left(\varepsilon \cdot \left(\varepsilon \cdot 0.16666666666666666\right) + -1\right) + \varepsilon \cdot \left(-0.5 \cdot \cos x\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
neg-mul-1N/A
Simplified99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* (* (* eps (+ 0.5 (* (* eps eps) -0.020833333333333332))) (sin (/ (+ eps (* x 2.0)) 2.0))) -2.0))
double code(double x, double eps) {
return ((eps * (0.5 + ((eps * eps) * -0.020833333333333332))) * sin(((eps + (x * 2.0)) / 2.0))) * -2.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((eps * (0.5d0 + ((eps * eps) * (-0.020833333333333332d0)))) * sin(((eps + (x * 2.0d0)) / 2.0d0))) * (-2.0d0)
end function
public static double code(double x, double eps) {
return ((eps * (0.5 + ((eps * eps) * -0.020833333333333332))) * Math.sin(((eps + (x * 2.0)) / 2.0))) * -2.0;
}
def code(x, eps): return ((eps * (0.5 + ((eps * eps) * -0.020833333333333332))) * math.sin(((eps + (x * 2.0)) / 2.0))) * -2.0
function code(x, eps) return Float64(Float64(Float64(eps * Float64(0.5 + Float64(Float64(eps * eps) * -0.020833333333333332))) * sin(Float64(Float64(eps + Float64(x * 2.0)) / 2.0))) * -2.0) end
function tmp = code(x, eps) tmp = ((eps * (0.5 + ((eps * eps) * -0.020833333333333332))) * sin(((eps + (x * 2.0)) / 2.0))) * -2.0; end
code[x_, eps_] := N[(N[(N[(eps * N[(0.5 + N[(N[(eps * eps), $MachinePrecision] * -0.020833333333333332), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sin[N[(N[(eps + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * -2.0), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\varepsilon \cdot \left(0.5 + \left(\varepsilon \cdot \varepsilon\right) \cdot -0.020833333333333332\right)\right) \cdot \sin \left(\frac{\varepsilon + x \cdot 2}{2}\right)\right) \cdot -2
\end{array}
Initial program 53.3%
diff-cosN/A
*-commutativeN/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x eps) :precision binary64 (* -2.0 (* (sin (/ (+ eps (* x 2.0)) 2.0)) (* eps 0.5))))
double code(double x, double eps) {
return -2.0 * (sin(((eps + (x * 2.0)) / 2.0)) * (eps * 0.5));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-2.0d0) * (sin(((eps + (x * 2.0d0)) / 2.0d0)) * (eps * 0.5d0))
end function
public static double code(double x, double eps) {
return -2.0 * (Math.sin(((eps + (x * 2.0)) / 2.0)) * (eps * 0.5));
}
def code(x, eps): return -2.0 * (math.sin(((eps + (x * 2.0)) / 2.0)) * (eps * 0.5))
function code(x, eps) return Float64(-2.0 * Float64(sin(Float64(Float64(eps + Float64(x * 2.0)) / 2.0)) * Float64(eps * 0.5))) end
function tmp = code(x, eps) tmp = -2.0 * (sin(((eps + (x * 2.0)) / 2.0)) * (eps * 0.5)); end
code[x_, eps_] := N[(-2.0 * N[(N[Sin[N[(N[(eps + N[(x * 2.0), $MachinePrecision]), $MachinePrecision] / 2.0), $MachinePrecision]], $MachinePrecision] * N[(eps * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \left(\sin \left(\frac{\varepsilon + x \cdot 2}{2}\right) \cdot \left(\varepsilon \cdot 0.5\right)\right)
\end{array}
Initial program 53.3%
diff-cosN/A
*-commutativeN/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
Taylor expanded in eps around 0
*-commutativeN/A
*-lowering-*.f6499.6%
Simplified99.6%
Final simplification99.6%
(FPCore (x eps) :precision binary64 (* eps (- (* eps (+ -0.5 (* x (* x 0.25)))) (sin x))))
double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * ((-0.5d0) + (x * (x * 0.25d0)))) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * (-0.5 + (x * (x * 0.25)))) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * Float64(-0.5 + Float64(x * Float64(x * 0.25)))) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * (-0.5 + (x * (x * 0.25)))) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(-0.5 + N[(x * N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \left(-0.5 + x \cdot \left(x \cdot 0.25\right)\right) - \sin x\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
distribute-lft-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.3%
Simplified99.3%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) (sin x))))
double code(double x, double eps) {
return eps * ((eps * -0.5) - sin(x));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - sin(x))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - Math.sin(x));
}
def code(x, eps): return eps * ((eps * -0.5) - math.sin(x))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - sin(x))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - sin(x)); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - N[Sin[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - \sin x\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
*-commutativeN/A
*-lowering-*.f6499.2%
Simplified99.2%
(FPCore (x eps)
:precision binary64
(*
eps
(+
(* eps (+ -0.5 (* x (* x 0.25))))
(*
x
(-
-1.0
(*
x
(*
x
(+
-0.16666666666666666
(*
(* x x)
(+ 0.008333333333333333 (* (* x x) -0.0001984126984126984)))))))))))
double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * (0.008333333333333333 + ((x * x) * -0.0001984126984126984)))))))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * ((-0.5d0) + (x * (x * 0.25d0)))) + (x * ((-1.0d0) - (x * (x * ((-0.16666666666666666d0) + ((x * x) * (0.008333333333333333d0 + ((x * x) * (-0.0001984126984126984d0))))))))))
end function
public static double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * (0.008333333333333333 + ((x * x) * -0.0001984126984126984)))))))));
}
def code(x, eps): return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * (0.008333333333333333 + ((x * x) * -0.0001984126984126984)))))))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * Float64(-0.5 + Float64(x * Float64(x * 0.25)))) + Float64(x * Float64(-1.0 - Float64(x * Float64(x * Float64(-0.16666666666666666 + Float64(Float64(x * x) * Float64(0.008333333333333333 + Float64(Float64(x * x) * -0.0001984126984126984)))))))))) end
function tmp = code(x, eps) tmp = eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * (0.008333333333333333 + ((x * x) * -0.0001984126984126984))))))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(-0.5 + N[(x * N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * N[(-1.0 - N[(x * N[(x * N[(-0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * N[(0.008333333333333333 + N[(N[(x * x), $MachinePrecision] * -0.0001984126984126984), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \left(-0.5 + x \cdot \left(x \cdot 0.25\right)\right) + x \cdot \left(-1 - x \cdot \left(x \cdot \left(-0.16666666666666666 + \left(x \cdot x\right) \cdot \left(0.008333333333333333 + \left(x \cdot x\right) \cdot -0.0001984126984126984\right)\right)\right)\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
distribute-lft-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.3%
Simplified99.3%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.9%
Simplified98.9%
Final simplification98.9%
(FPCore (x eps)
:precision binary64
(*
eps
(+
(* eps (+ -0.5 (* x (* x 0.25))))
(*
x
(-
-1.0
(* x (* x (+ -0.16666666666666666 (* (* x x) 0.008333333333333333)))))))))
double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * 0.008333333333333333)))))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * ((-0.5d0) + (x * (x * 0.25d0)))) + (x * ((-1.0d0) - (x * (x * ((-0.16666666666666666d0) + ((x * x) * 0.008333333333333333d0)))))))
end function
public static double code(double x, double eps) {
return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * 0.008333333333333333)))))));
}
def code(x, eps): return eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * 0.008333333333333333)))))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * Float64(-0.5 + Float64(x * Float64(x * 0.25)))) + Float64(x * Float64(-1.0 - Float64(x * Float64(x * Float64(-0.16666666666666666 + Float64(Float64(x * x) * 0.008333333333333333)))))))) end
function tmp = code(x, eps) tmp = eps * ((eps * (-0.5 + (x * (x * 0.25)))) + (x * (-1.0 - (x * (x * (-0.16666666666666666 + ((x * x) * 0.008333333333333333))))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * N[(-0.5 + N[(x * N[(x * 0.25), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * N[(-1.0 - N[(x * N[(x * N[(-0.16666666666666666 + N[(N[(x * x), $MachinePrecision] * 0.008333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot \left(-0.5 + x \cdot \left(x \cdot 0.25\right)\right) + x \cdot \left(-1 - x \cdot \left(x \cdot \left(-0.16666666666666666 + \left(x \cdot x\right) \cdot 0.008333333333333333\right)\right)\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*l*N/A
distribute-lft-outN/A
+-commutativeN/A
metadata-evalN/A
sub-negN/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.3%
Simplified99.3%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.8%
Simplified98.8%
Final simplification98.8%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ -1.0 (* x (+ (* eps 0.25) (* x 0.16666666666666666))))))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((eps * 0.25) + (x * 0.16666666666666666))))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((-1.0d0) + (x * ((eps * 0.25d0) + (x * 0.16666666666666666d0))))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((eps * 0.25) + (x * 0.16666666666666666))))));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * (-1.0 + (x * ((eps * 0.25) + (x * 0.16666666666666666))))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(-1.0 + Float64(x * Float64(Float64(eps * 0.25) + Float64(x * 0.16666666666666666))))))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * (-1.0 + (x * ((eps * 0.25) + (x * 0.16666666666666666)))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(-1.0 + N[(x * N[(N[(eps * 0.25), $MachinePrecision] + N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(-1 + x \cdot \left(\varepsilon \cdot 0.25 + x \cdot 0.16666666666666666\right)\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6498.6%
Simplified98.6%
(FPCore (x eps) :precision binary64 (* eps (+ (* eps -0.5) (* x (+ -1.0 (* x (* x 0.16666666666666666)))))))
double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) + (x * ((-1.0d0) + (x * (x * 0.16666666666666666d0)))))
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))));
}
def code(x, eps): return eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666)))))
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) + Float64(x * Float64(-1.0 + Float64(x * Float64(x * 0.16666666666666666)))))) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) + (x * (-1.0 + (x * (x * 0.16666666666666666))))); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] + N[(x * N[(-1.0 + N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 + x \cdot \left(-1 + x \cdot \left(x \cdot 0.16666666666666666\right)\right)\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6498.6%
Simplified98.6%
Taylor expanded in eps around 0
*-commutativeN/A
*-lowering-*.f6498.6%
Simplified98.6%
(FPCore (x eps) :precision binary64 (if (<= x -1.8e-147) (* 0.5 (* x x)) (* -0.5 (* eps eps))))
double code(double x, double eps) {
double tmp;
if (x <= -1.8e-147) {
tmp = 0.5 * (x * x);
} else {
tmp = -0.5 * (eps * eps);
}
return tmp;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
real(8) :: tmp
if (x <= (-1.8d-147)) then
tmp = 0.5d0 * (x * x)
else
tmp = (-0.5d0) * (eps * eps)
end if
code = tmp
end function
public static double code(double x, double eps) {
double tmp;
if (x <= -1.8e-147) {
tmp = 0.5 * (x * x);
} else {
tmp = -0.5 * (eps * eps);
}
return tmp;
}
def code(x, eps): tmp = 0 if x <= -1.8e-147: tmp = 0.5 * (x * x) else: tmp = -0.5 * (eps * eps) return tmp
function code(x, eps) tmp = 0.0 if (x <= -1.8e-147) tmp = Float64(0.5 * Float64(x * x)); else tmp = Float64(-0.5 * Float64(eps * eps)); end return tmp end
function tmp_2 = code(x, eps) tmp = 0.0; if (x <= -1.8e-147) tmp = 0.5 * (x * x); else tmp = -0.5 * (eps * eps); end tmp_2 = tmp; end
code[x_, eps_] := If[LessEqual[x, -1.8e-147], N[(0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision], N[(-0.5 * N[(eps * eps), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.8 \cdot 10^{-147}:\\
\;\;\;\;0.5 \cdot \left(x \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;-0.5 \cdot \left(\varepsilon \cdot \varepsilon\right)\\
\end{array}
\end{array}
if x < -1.80000000000000006e-147Initial program 7.7%
Taylor expanded in x around 0
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f647.3%
Simplified7.3%
Taylor expanded in x around inf
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6412.3%
Simplified12.3%
if -1.80000000000000006e-147 < x Initial program 66.9%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
neg-mul-1N/A
Simplified99.8%
+-commutativeN/A
distribute-rgt-inN/A
fma-defineN/A
fma-lowering-fma.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f6499.9%
Applied egg-rr99.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6468.1%
Simplified68.1%
(FPCore (x eps) :precision binary64 (* eps (- (* eps -0.5) x)))
double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps * ((eps * (-0.5d0)) - x)
end function
public static double code(double x, double eps) {
return eps * ((eps * -0.5) - x);
}
def code(x, eps): return eps * ((eps * -0.5) - x)
function code(x, eps) return Float64(eps * Float64(Float64(eps * -0.5) - x)) end
function tmp = code(x, eps) tmp = eps * ((eps * -0.5) - x); end
code[x_, eps_] := N[(eps * N[(N[(eps * -0.5), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon \cdot \left(\varepsilon \cdot -0.5 - x\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
sin-lowering-sin.f6499.6%
Simplified99.6%
Taylor expanded in x around 0
mul-1-negN/A
*-commutativeN/A
distribute-lft-neg-inN/A
mul-1-negN/A
unpow2N/A
associate-*r*N/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
*-commutativeN/A
*-lowering-*.f6498.1%
Simplified98.1%
(FPCore (x eps) :precision binary64 (* -0.5 (* eps eps)))
double code(double x, double eps) {
return -0.5 * (eps * eps);
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (-0.5d0) * (eps * eps)
end function
public static double code(double x, double eps) {
return -0.5 * (eps * eps);
}
def code(x, eps): return -0.5 * (eps * eps)
function code(x, eps) return Float64(-0.5 * Float64(eps * eps)) end
function tmp = code(x, eps) tmp = -0.5 * (eps * eps); end
code[x_, eps_] := N[(-0.5 * N[(eps * eps), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \left(\varepsilon \cdot \varepsilon\right)
\end{array}
Initial program 53.3%
Taylor expanded in eps around 0
*-lowering-*.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-+l+N/A
*-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
neg-mul-1N/A
Simplified99.7%
+-commutativeN/A
distribute-rgt-inN/A
fma-defineN/A
fma-lowering-fma.f64N/A
*-lowering-*.f64N/A
sin-lowering-sin.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cos-lowering-cos.f64N/A
*-lowering-*.f6499.9%
Applied egg-rr99.9%
Taylor expanded in x around 0
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6453.4%
Simplified53.4%
(FPCore (x eps) :precision binary64 0.0)
double code(double x, double eps) {
return 0.0;
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = 0.0d0
end function
public static double code(double x, double eps) {
return 0.0;
}
def code(x, eps): return 0.0
function code(x, eps) return 0.0 end
function tmp = code(x, eps) tmp = 0.0; end
code[x_, eps_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 53.3%
Taylor expanded in x around 0
sub-negN/A
metadata-evalN/A
+-lowering-+.f64N/A
cos-lowering-cos.f6452.3%
Simplified52.3%
Taylor expanded in eps around 0
Simplified52.3%
metadata-eval52.3%
Applied egg-rr52.3%
(FPCore (x eps) :precision binary64 (* (* -2.0 (sin (+ x (/ eps 2.0)))) (sin (/ eps 2.0))))
double code(double x, double eps) {
return (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0));
}
real(8) function code(x, eps)
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((-2.0d0) * sin((x + (eps / 2.0d0)))) * sin((eps / 2.0d0))
end function
public static double code(double x, double eps) {
return (-2.0 * Math.sin((x + (eps / 2.0)))) * Math.sin((eps / 2.0));
}
def code(x, eps): return (-2.0 * math.sin((x + (eps / 2.0)))) * math.sin((eps / 2.0))
function code(x, eps) return Float64(Float64(-2.0 * sin(Float64(x + Float64(eps / 2.0)))) * sin(Float64(eps / 2.0))) end
function tmp = code(x, eps) tmp = (-2.0 * sin((x + (eps / 2.0)))) * sin((eps / 2.0)); end
code[x_, eps_] := N[(N[(-2.0 * N[Sin[N[(x + N[(eps / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Sin[N[(eps / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-2 \cdot \sin \left(x + \frac{\varepsilon}{2}\right)\right) \cdot \sin \left(\frac{\varepsilon}{2}\right)
\end{array}
herbie shell --seed 2024141
(FPCore (x eps)
:name "2cos (problem 3.3.5)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (* -2 (sin (+ x (/ eps 2))) (sin (/ eps 2))))
(- (cos (+ x eps)) (cos x)))