
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
(FPCore (x) :precision binary64 (/ (* (/ (tan (* x 0.5)) x) (sin x)) x))
double code(double x) {
return ((tan((x * 0.5)) / x) * sin(x)) / x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = ((tan((x * 0.5d0)) / x) * sin(x)) / x
end function
public static double code(double x) {
return ((Math.tan((x * 0.5)) / x) * Math.sin(x)) / x;
}
def code(x): return ((math.tan((x * 0.5)) / x) * math.sin(x)) / x
function code(x) return Float64(Float64(Float64(tan(Float64(x * 0.5)) / x) * sin(x)) / x) end
function tmp = code(x) tmp = ((tan((x * 0.5)) / x) * sin(x)) / x; end
code[x_] := N[(N[(N[(N[Tan[N[(x * 0.5), $MachinePrecision]], $MachinePrecision] / x), $MachinePrecision] * N[Sin[x], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\tan \left(x \cdot 0.5\right)}{x} \cdot \sin x}{x}
\end{array}
Initial program 48.7%
flip--48.7%
div-inv48.6%
metadata-eval48.6%
1-sub-cos76.7%
pow276.7%
Applied egg-rr76.7%
unpow276.7%
associate-*l*76.7%
associate-*r/76.7%
*-rgt-identity76.7%
hang-0p-tan76.9%
Simplified76.9%
*-commutative76.9%
times-frac99.8%
div-inv99.8%
metadata-eval99.8%
Applied egg-rr99.8%
associate-*r/99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (* (/ (tan (* x 0.5)) x) (/ (sin x) x)))
double code(double x) {
return (tan((x * 0.5)) / x) * (sin(x) / x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (tan((x * 0.5d0)) / x) * (sin(x) / x)
end function
public static double code(double x) {
return (Math.tan((x * 0.5)) / x) * (Math.sin(x) / x);
}
def code(x): return (math.tan((x * 0.5)) / x) * (math.sin(x) / x)
function code(x) return Float64(Float64(tan(Float64(x * 0.5)) / x) * Float64(sin(x) / x)) end
function tmp = code(x) tmp = (tan((x * 0.5)) / x) * (sin(x) / x); end
code[x_] := N[(N[(N[Tan[N[(x * 0.5), $MachinePrecision]], $MachinePrecision] / x), $MachinePrecision] * N[(N[Sin[x], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\tan \left(x \cdot 0.5\right)}{x} \cdot \frac{\sin x}{x}
\end{array}
Initial program 48.7%
flip--48.7%
div-inv48.6%
metadata-eval48.6%
1-sub-cos76.7%
pow276.7%
Applied egg-rr76.7%
unpow276.7%
associate-*l*76.7%
associate-*r/76.7%
*-rgt-identity76.7%
hang-0p-tan76.9%
Simplified76.9%
*-commutative76.9%
times-frac99.8%
div-inv99.8%
metadata-eval99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (if (<= x 0.00014) 0.5 (/ (- 1.0 (cos x)) (* x x))))
double code(double x) {
double tmp;
if (x <= 0.00014) {
tmp = 0.5;
} else {
tmp = (1.0 - cos(x)) / (x * x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 0.00014d0) then
tmp = 0.5d0
else
tmp = (1.0d0 - cos(x)) / (x * x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 0.00014) {
tmp = 0.5;
} else {
tmp = (1.0 - Math.cos(x)) / (x * x);
}
return tmp;
}
def code(x): tmp = 0 if x <= 0.00014: tmp = 0.5 else: tmp = (1.0 - math.cos(x)) / (x * x) return tmp
function code(x) tmp = 0.0 if (x <= 0.00014) tmp = 0.5; else tmp = Float64(Float64(1.0 - cos(x)) / Float64(x * x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 0.00014) tmp = 0.5; else tmp = (1.0 - cos(x)) / (x * x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 0.00014], 0.5, N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 0.00014:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x}{x \cdot x}\\
\end{array}
\end{array}
if x < 1.3999999999999999e-4Initial program 33.9%
Taylor expanded in x around 0 68.3%
if 1.3999999999999999e-4 < x Initial program 96.9%
Final simplification75.0%
(FPCore (x) :precision binary64 (if (<= x 0.00014) 0.5 (/ (/ (- 1.0 (cos x)) x) x)))
double code(double x) {
double tmp;
if (x <= 0.00014) {
tmp = 0.5;
} else {
tmp = ((1.0 - cos(x)) / x) / x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 0.00014d0) then
tmp = 0.5d0
else
tmp = ((1.0d0 - cos(x)) / x) / x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 0.00014) {
tmp = 0.5;
} else {
tmp = ((1.0 - Math.cos(x)) / x) / x;
}
return tmp;
}
def code(x): tmp = 0 if x <= 0.00014: tmp = 0.5 else: tmp = ((1.0 - math.cos(x)) / x) / x return tmp
function code(x) tmp = 0.0 if (x <= 0.00014) tmp = 0.5; else tmp = Float64(Float64(Float64(1.0 - cos(x)) / x) / x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 0.00014) tmp = 0.5; else tmp = ((1.0 - cos(x)) / x) / x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 0.00014], 0.5, N[(N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 0.00014:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x}{x}}{x}\\
\end{array}
\end{array}
if x < 1.3999999999999999e-4Initial program 33.9%
Taylor expanded in x around 0 68.3%
if 1.3999999999999999e-4 < x Initial program 96.9%
associate-/r*99.2%
div-inv99.2%
Applied egg-rr99.2%
un-div-inv99.2%
Applied egg-rr99.2%
Final simplification75.5%
(FPCore (x) :precision binary64 (if (<= x 8.2e+76) 0.5 (* (/ 1.0 x) (+ (/ 1.0 x) (/ -1.0 x)))))
double code(double x) {
double tmp;
if (x <= 8.2e+76) {
tmp = 0.5;
} else {
tmp = (1.0 / x) * ((1.0 / x) + (-1.0 / x));
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 8.2d+76) then
tmp = 0.5d0
else
tmp = (1.0d0 / x) * ((1.0d0 / x) + ((-1.0d0) / x))
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 8.2e+76) {
tmp = 0.5;
} else {
tmp = (1.0 / x) * ((1.0 / x) + (-1.0 / x));
}
return tmp;
}
def code(x): tmp = 0 if x <= 8.2e+76: tmp = 0.5 else: tmp = (1.0 / x) * ((1.0 / x) + (-1.0 / x)) return tmp
function code(x) tmp = 0.0 if (x <= 8.2e+76) tmp = 0.5; else tmp = Float64(Float64(1.0 / x) * Float64(Float64(1.0 / x) + Float64(-1.0 / x))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 8.2e+76) tmp = 0.5; else tmp = (1.0 / x) * ((1.0 / x) + (-1.0 / x)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 8.2e+76], 0.5, N[(N[(1.0 / x), $MachinePrecision] * N[(N[(1.0 / x), $MachinePrecision] + N[(-1.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 8.2 \cdot 10^{+76}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{x} \cdot \left(\frac{1}{x} + \frac{-1}{x}\right)\\
\end{array}
\end{array}
if x < 8.1999999999999997e76Initial program 39.3%
Taylor expanded in x around 0 63.1%
if 8.1999999999999997e76 < x Initial program 96.4%
associate-/r*99.5%
div-inv99.5%
Applied egg-rr99.5%
div-sub99.4%
Applied egg-rr99.4%
Taylor expanded in x around 0 63.8%
Final simplification63.2%
(FPCore (x) :precision binary64 (/ (/ 1.0 x) (+ (* x 0.16666666666666666) (* (/ 1.0 x) 2.0))))
double code(double x) {
return (1.0 / x) / ((x * 0.16666666666666666) + ((1.0 / x) * 2.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / x) / ((x * 0.16666666666666666d0) + ((1.0d0 / x) * 2.0d0))
end function
public static double code(double x) {
return (1.0 / x) / ((x * 0.16666666666666666) + ((1.0 / x) * 2.0));
}
def code(x): return (1.0 / x) / ((x * 0.16666666666666666) + ((1.0 / x) * 2.0))
function code(x) return Float64(Float64(1.0 / x) / Float64(Float64(x * 0.16666666666666666) + Float64(Float64(1.0 / x) * 2.0))) end
function tmp = code(x) tmp = (1.0 / x) / ((x * 0.16666666666666666) + ((1.0 / x) * 2.0)); end
code[x_] := N[(N[(1.0 / x), $MachinePrecision] / N[(N[(x * 0.16666666666666666), $MachinePrecision] + N[(N[(1.0 / x), $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{x}}{x \cdot 0.16666666666666666 + \frac{1}{x} \cdot 2}
\end{array}
Initial program 48.7%
associate-/r*50.1%
div-inv50.1%
Applied egg-rr50.1%
*-commutative50.1%
clear-num50.1%
un-div-inv50.1%
Applied egg-rr50.1%
Taylor expanded in x around 0 80.3%
Final simplification80.3%
(FPCore (x) :precision binary64 0.5)
double code(double x) {
return 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0
end function
public static double code(double x) {
return 0.5;
}
def code(x): return 0.5
function code(x) return 0.5 end
function tmp = code(x) tmp = 0.5; end
code[x_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 48.7%
Taylor expanded in x around 0 53.3%
Final simplification53.3%
herbie shell --seed 2023201
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))