
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.032)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(* (/ (- 1.0 (cos x_m)) x_m) (/ 1.0 x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.032) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) * (1.0 / x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.032) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) * Float64(1.0 / x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.032], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] * N[(1.0 / x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.032:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m} \cdot \frac{1}{x\_m}\\
\end{array}
\end{array}
if x < 0.032000000000000001Initial program 37.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-/r/N/A
clear-numN/A
lower-+.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-neg.f6438.5
Applied rewrites38.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.8
Applied rewrites64.8%
if 0.032000000000000001 < x Initial program 96.7%
Applied rewrites96.7%
lift-*.f64N/A
lift-/.f64N/A
associate-*l/N/A
*-lft-identityN/A
lift--.f64N/A
div-subN/A
lift-*.f64N/A
associate-/r*N/A
lift-/.f64N/A
lift-/.f64N/A
remove-double-negN/A
neg-mul-1N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
rgt-mult-inverseN/A
lift-/.f64N/A
associate-*l*N/A
*-commutativeN/A
lift-/.f64N/A
un-div-invN/A
Applied rewrites99.2%
Final simplification74.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.032)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (/ (- 1.0 (cos x_m)) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.032) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.032) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.032], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.032:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.032000000000000001Initial program 37.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-/r/N/A
clear-numN/A
lower-+.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-neg.f6438.5
Applied rewrites38.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.8
Applied rewrites64.8%
if 0.032000000000000001 < x Initial program 96.7%
Applied rewrites99.2%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.032)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- 1.0 (cos x_m)) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.032) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.032) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.032], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.032:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.032000000000000001Initial program 37.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-/r/N/A
clear-numN/A
lower-+.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-neg.f6438.5
Applied rewrites38.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.8
Applied rewrites64.8%
if 0.032000000000000001 < x Initial program 96.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (/ 1.0 x_m) (/ (fma 0.16666666666666666 (* x_m x_m) 2.0) x_m)))
x_m = fabs(x);
double code(double x_m) {
return (1.0 / x_m) / (fma(0.16666666666666666, (x_m * x_m), 2.0) / x_m);
}
x_m = abs(x) function code(x_m) return Float64(Float64(1.0 / x_m) / Float64(fma(0.16666666666666666, Float64(x_m * x_m), 2.0) / x_m)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(1.0 / x$95$m), $MachinePrecision] / N[(N[(0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision] + 2.0), $MachinePrecision] / x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\frac{1}{x\_m}}{\frac{\mathsf{fma}\left(0.16666666666666666, x\_m \cdot x\_m, 2\right)}{x\_m}}
\end{array}
Initial program 54.7%
Applied rewrites56.2%
Taylor expanded in x around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6476.6
Applied rewrites76.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 7e+38)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- 1.0 1.0) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 7e+38) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 7e+38) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 7e+38], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 7 \cdot 10^{+38}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 7.00000000000000003e38Initial program 41.9%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
associate-/r/N/A
clear-numN/A
lower-+.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f64N/A
lower-neg.f6442.5
Applied rewrites42.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6460.9
Applied rewrites60.9%
if 7.00000000000000003e38 < x Initial program 96.3%
Taylor expanded in x around 0
Applied rewrites57.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 9.6e+76) 0.5 (/ (- 1.0 1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 9.6e+76) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 9.6d+76) then
tmp = 0.5d0
else
tmp = (1.0d0 - 1.0d0) / (x_m * x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 9.6e+76) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 9.6e+76: tmp = 0.5 else: tmp = (1.0 - 1.0) / (x_m * x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 9.6e+76) tmp = 0.5; else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 9.6e+76) tmp = 0.5; else tmp = (1.0 - 1.0) / (x_m * x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 9.6e+76], 0.5, N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 9.6 \cdot 10^{+76}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 9.5999999999999999e76Initial program 44.9%
Taylor expanded in x around 0
Applied rewrites58.0%
if 9.5999999999999999e76 < x Initial program 95.9%
Taylor expanded in x around 0
Applied rewrites69.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.5)
x_m = fabs(x);
double code(double x_m) {
return 0.5;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.5d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.5;
}
x_m = math.fabs(x) def code(x_m): return 0.5
x_m = abs(x) function code(x_m) return 0.5 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.5; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.5
\begin{array}{l}
x_m = \left|x\right|
\\
0.5
\end{array}
Initial program 54.7%
Taylor expanded in x around 0
Applied rewrites47.5%
herbie shell --seed 2024240
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))