
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.104)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(* (/ (/ -1.0 x_m) x_m) (+ -1.0 (cos x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.104) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = ((-1.0 / x_m) / x_m) * (-1.0 + cos(x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.104) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(Float64(-1.0 / x_m) / x_m) * Float64(-1.0 + cos(x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.104], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(-1.0 / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision] * N[(-1.0 + N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.104:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{-1}{x\_m}}{x\_m} \cdot \left(-1 + \cos x\_m\right)\\
\end{array}
\end{array}
if x < 0.103999999999999995Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites62.8%
if 0.103999999999999995 < x Initial program 97.8%
Applied rewrites97.8%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6499.1
Applied rewrites99.1%
Final simplification74.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.104)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (/ (- 1.0 (cos x_m)) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.104) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.104) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.104], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.104:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.103999999999999995Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites62.8%
if 0.103999999999999995 < x Initial program 97.8%
Applied rewrites97.8%
lift-*.f64N/A
lift-/.f64N/A
associate-*l/N/A
lift-+.f64N/A
+-commutativeN/A
distribute-lft-inN/A
metadata-evalN/A
neg-mul-1N/A
sub-negN/A
lift-cos.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-cos.f64N/A
lower--.f6499.0
Applied rewrites99.0%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.104)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (- 1.0 (cos x_m)) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.104) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.104) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.104], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.104:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.103999999999999995Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites62.8%
if 0.103999999999999995 < x Initial program 97.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 4.2)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (+ x_m (* x_m 1.0)) (* x_m (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 4.2) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = (x_m + (x_m * 1.0)) / (x_m * (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 4.2) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(x_m + Float64(x_m * 1.0)) / Float64(x_m * Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 4.2], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(x$95$m + N[(x$95$m * 1.0), $MachinePrecision]), $MachinePrecision] / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 4.2:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{x\_m + x\_m \cdot 1}{x\_m \cdot \left(x\_m \cdot x\_m\right)}\\
\end{array}
\end{array}
if x < 4.20000000000000018Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites62.8%
if 4.20000000000000018 < x Initial program 97.8%
Taylor expanded in x around 0
Applied rewrites52.8%
Applied rewrites59.3%
Final simplification61.7%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 2.75)
(fma
(* x_m x_m)
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
0.5)
(/ (+ x_m (* x_m 1.0)) (* x_m (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.75) {
tmp = fma((x_m * x_m), fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), 0.5);
} else {
tmp = (x_m + (x_m * 1.0)) / (x_m * (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.75) tmp = fma(Float64(x_m * x_m), fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), 0.5); else tmp = Float64(Float64(x_m + Float64(x_m * 1.0)) / Float64(x_m * Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.75], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(x$95$m + N[(x$95$m * 1.0), $MachinePrecision]), $MachinePrecision] / N[(x$95$m * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 2.75:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{x\_m + x\_m \cdot 1}{x\_m \cdot \left(x\_m \cdot x\_m\right)}\\
\end{array}
\end{array}
if x < 2.75Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6462.9
Applied rewrites62.9%
if 2.75 < x Initial program 97.8%
Taylor expanded in x around 0
Applied rewrites52.8%
Applied rewrites59.3%
Final simplification61.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 8e+38)
(fma
(* x_m x_m)
(fma (* x_m x_m) 0.001388888888888889 -0.041666666666666664)
0.5)
(/ (- 1.0 1.0) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 8e+38) {
tmp = fma((x_m * x_m), fma((x_m * x_m), 0.001388888888888889, -0.041666666666666664), 0.5);
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 8e+38) tmp = fma(Float64(x_m * x_m), fma(Float64(x_m * x_m), 0.001388888888888889, -0.041666666666666664), 0.5); else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 8e+38], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.001388888888888889 + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 8 \cdot 10^{+38}:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m \cdot x\_m, 0.001388888888888889, -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 7.99999999999999982e38Initial program 43.7%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6458.7
Applied rewrites58.7%
if 7.99999999999999982e38 < x Initial program 97.4%
Taylor expanded in x around 0
Applied rewrites63.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 3.45) (fma -0.041666666666666664 (* x_m x_m) 0.5) (/ (- 1.0 1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.45) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.45) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.45], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.45:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 3.4500000000000002Initial program 39.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6462.4
Applied rewrites62.4%
if 3.4500000000000002 < x Initial program 97.8%
Taylor expanded in x around 0
Applied rewrites52.8%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.5)
x_m = fabs(x);
double code(double x_m) {
return 0.5;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.5d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.5;
}
x_m = math.fabs(x) def code(x_m): return 0.5
x_m = abs(x) function code(x_m) return 0.5 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.5; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.5
\begin{array}{l}
x_m = \left|x\right|
\\
0.5
\end{array}
Initial program 57.8%
Taylor expanded in x around 0
Applied rewrites44.2%
herbie shell --seed 2024227
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))