
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 0.0003) (fma -0.041666666666666664 (* x_m x_m) 0.5) (/ (/ (* (tan (* x_m 0.5)) (sin x_m)) x_m) x_m)))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.0003) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = ((tan((x_m * 0.5)) * sin(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.0003) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(tan(Float64(x_m * 0.5)) * sin(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.0003], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(N[Tan[N[(x$95$m * 0.5), $MachinePrecision]], $MachinePrecision] * N[Sin[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.0003:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{\tan \left(x\_m \cdot 0.5\right) \cdot \sin x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 2.99999999999999974e-4Initial program 36.3%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6466.6
Applied rewrites66.6%
if 2.99999999999999974e-4 < x Initial program 98.2%
lift-cos.f64N/A
flip--N/A
lift-*.f64N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.5
Applied rewrites99.5%
lift-sin.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
associate-*l/N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-*.f6499.6
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-*.f6499.6
Applied rewrites99.6%
Final simplification76.0%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.02)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (* (sin x_m) (tan (/ x_m 2.0))) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.02) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = (sin(x_m) * tan((x_m / 2.0))) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.02) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(sin(x_m) * tan(Float64(x_m / 2.0))) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.02], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[Sin[x$95$m], $MachinePrecision] * N[Tan[N[(x$95$m / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.02:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\sin x\_m \cdot \tan \left(\frac{x\_m}{2}\right)}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.0200000000000000004Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.0200000000000000004 < x Initial program 99.3%
lift-cos.f64N/A
flip--N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
associate-/l*N/A
lower-*.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.02)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(* (tan (/ x_m 2.0)) (/ (sin x_m) (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.02) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = tan((x_m / 2.0)) * (sin(x_m) / (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.02) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(tan(Float64(x_m / 2.0)) * Float64(sin(x_m) / Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.02], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[Tan[N[(x$95$m / 2.0), $MachinePrecision]], $MachinePrecision] * N[(N[Sin[x$95$m], $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.02:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\tan \left(\frac{x\_m}{2}\right) \cdot \frac{\sin x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.0200000000000000004Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.0200000000000000004 < x Initial program 99.3%
lift-cos.f64N/A
flip--N/A
lift-*.f64N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.5
Applied rewrites99.5%
Final simplification76.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* (/ 1.0 x_m) (* (tan (* x_m 0.5)) (/ (sin x_m) x_m))))
x_m = fabs(x);
double code(double x_m) {
return (1.0 / x_m) * (tan((x_m * 0.5)) * (sin(x_m) / x_m));
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = (1.0d0 / x_m) * (tan((x_m * 0.5d0)) * (sin(x_m) / x_m))
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return (1.0 / x_m) * (Math.tan((x_m * 0.5)) * (Math.sin(x_m) / x_m));
}
x_m = math.fabs(x) def code(x_m): return (1.0 / x_m) * (math.tan((x_m * 0.5)) * (math.sin(x_m) / x_m))
x_m = abs(x) function code(x_m) return Float64(Float64(1.0 / x_m) * Float64(tan(Float64(x_m * 0.5)) * Float64(sin(x_m) / x_m))) end
x_m = abs(x); function tmp = code(x_m) tmp = (1.0 / x_m) * (tan((x_m * 0.5)) * (sin(x_m) / x_m)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(1.0 / x$95$m), $MachinePrecision] * N[(N[Tan[N[(x$95$m * 0.5), $MachinePrecision]], $MachinePrecision] * N[(N[Sin[x$95$m], $MachinePrecision] / x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{x\_m} \cdot \left(\tan \left(x\_m \cdot 0.5\right) \cdot \frac{\sin x\_m}{x\_m}\right)
\end{array}
Initial program 54.0%
lift-cos.f64N/A
flip--N/A
lift-*.f64N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6480.1
Applied rewrites80.1%
lift-sin.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
associate-*l/N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lower-*.f6480.3
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-*.f6480.3
Applied rewrites80.3%
lift-sin.f64N/A
lift-*.f64N/A
lift-tan.f64N/A
lift-*.f64N/A
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lower-*.f64N/A
lower-/.f6480.2
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.02)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(* (sin x_m) (/ (tan (* x_m 0.5)) (* x_m x_m)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.02) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = sin(x_m) * (tan((x_m * 0.5)) / (x_m * x_m));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.02) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(sin(x_m) * Float64(tan(Float64(x_m * 0.5)) / Float64(x_m * x_m))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.02], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[Sin[x$95$m], $MachinePrecision] * N[(N[Tan[N[(x$95$m * 0.5), $MachinePrecision]], $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.02:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\sin x\_m \cdot \frac{\tan \left(x\_m \cdot 0.5\right)}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.0200000000000000004Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.0200000000000000004 < x Initial program 99.3%
lift-cos.f64N/A
flip--N/A
lift-*.f64N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.5
Applied rewrites99.5%
lift-sin.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-tan.f64N/A
associate-*l/N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.5
lift-/.f64N/A
div-invN/A
metadata-evalN/A
lower-*.f6499.5
Applied rewrites99.5%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.088)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (/ -1.0 x_m) (/ x_m (+ -1.0 (cos x_m))))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.088) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = (-1.0 / x_m) / (x_m / (-1.0 + cos(x_m)));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.088) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(-1.0 / x_m) / Float64(x_m / Float64(-1.0 + cos(x_m)))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.088], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(-1.0 / x$95$m), $MachinePrecision] / N[(x$95$m / N[(-1.0 + N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.088:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{-1}{x\_m}}{\frac{x\_m}{-1 + \cos x\_m}}\\
\end{array}
\end{array}
if x < 0.087999999999999995Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.087999999999999995 < x Initial program 99.3%
Applied rewrites99.4%
lift-cos.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-/.f64N/A
*-commutativeN/A
lift-/.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lower-/.f6499.4
Applied rewrites99.4%
Final simplification76.2%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.088)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (/ (- 1.0 (cos x_m)) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.088) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.088) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.088], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.088:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.087999999999999995Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.087999999999999995 < x Initial program 99.3%
lift-cos.f64N/A
lift-*.f64N/A
div-subN/A
lift-*.f64N/A
associate-/r*N/A
frac-2negN/A
metadata-evalN/A
lift-*.f64N/A
distribute-rgt-neg-inN/A
associate-/r*N/A
frac-2negN/A
sub-divN/A
lower-/.f64N/A
lower--.f64N/A
lower-/.f64N/A
lower-neg.f64N/A
lower-/.f64N/A
lower-neg.f6499.4
Applied rewrites99.4%
lift-/.f64N/A
lift-cos.f64N/A
frac-2negN/A
lift-neg.f64N/A
distribute-neg-frac2N/A
neg-mul-1N/A
neg-mul-1N/A
times-fracN/A
metadata-evalN/A
metadata-evalN/A
lift-neg.f64N/A
distribute-neg-frac2N/A
lift-/.f64N/A
lift-neg.f64N/A
lift-neg.f64N/A
sub-divN/A
Applied rewrites99.3%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.088)
(fma
(* x_m x_m)
(fma
x_m
(* x_m (fma x_m (* x_m -2.48015873015873e-5) 0.001388888888888889))
-0.041666666666666664)
0.5)
(/ (- 1.0 (cos x_m)) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.088) {
tmp = fma((x_m * x_m), fma(x_m, (x_m * fma(x_m, (x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.088) tmp = fma(Float64(x_m * x_m), fma(x_m, Float64(x_m * fma(x_m, Float64(x_m * -2.48015873015873e-5), 0.001388888888888889)), -0.041666666666666664), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.088], N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[(x$95$m * N[(x$95$m * N[(x$95$m * -2.48015873015873e-5), $MachinePrecision] + 0.001388888888888889), $MachinePrecision]), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.088:\\
\;\;\;\;\mathsf{fma}\left(x\_m \cdot x\_m, \mathsf{fma}\left(x\_m, x\_m \cdot \mathsf{fma}\left(x\_m, x\_m \cdot -2.48015873015873 \cdot 10^{-5}, 0.001388888888888889\right), -0.041666666666666664\right), 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.087999999999999995Initial program 36.9%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites67.4%
if 0.087999999999999995 < x Initial program 99.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 3.5) (fma -0.041666666666666664 (* x_m x_m) 0.5) (/ (+ 1.0 -1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.5) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = (1.0 + -1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.5) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 + -1.0) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.5], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 + -1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.5:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 + -1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 3.5Initial program 37.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6466.8
Applied rewrites66.8%
if 3.5 < x Initial program 99.4%
Taylor expanded in x around 0
Applied rewrites52.3%
Final simplification62.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma x_m (* x_m 0.16666666666666666) 2.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma(x_m, (x_m * 0.16666666666666666), 2.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(x_m, Float64(x_m * 0.16666666666666666), 2.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(x$95$m * N[(x$95$m * 0.16666666666666666), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(x\_m, x\_m \cdot 0.16666666666666666, 2\right)}
\end{array}
Initial program 54.0%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites29.4%
Applied rewrites49.6%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f6476.3
Applied rewrites76.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 3.5) (fma -0.041666666666666664 (* x_m x_m) 0.5) 0.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.5) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = 0.0;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.5) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = 0.0; end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.5], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], 0.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.5:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if x < 3.5Initial program 37.2%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6466.8
Applied rewrites66.8%
if 3.5 < x Initial program 99.4%
Taylor expanded in x around 0
Applied rewrites52.3%
metadata-evalN/A
lift-*.f64N/A
div052.3
Applied rewrites52.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 8.2e+76) 0.5 0.0))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 8.2e+76) {
tmp = 0.5;
} else {
tmp = 0.0;
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 8.2d+76) then
tmp = 0.5d0
else
tmp = 0.0d0
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 8.2e+76) {
tmp = 0.5;
} else {
tmp = 0.0;
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 8.2e+76: tmp = 0.5 else: tmp = 0.0 return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 8.2e+76) tmp = 0.5; else tmp = 0.0; end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 8.2e+76) tmp = 0.5; else tmp = 0.0; end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 8.2e+76], 0.5, 0.0]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 8.2 \cdot 10^{+76}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;0\\
\end{array}
\end{array}
if x < 8.1999999999999997e76Initial program 43.2%
Taylor expanded in x around 0
Applied rewrites60.6%
if 8.1999999999999997e76 < x Initial program 99.4%
Taylor expanded in x around 0
Applied rewrites72.0%
metadata-evalN/A
lift-*.f64N/A
div072.0
Applied rewrites72.0%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.0)
x_m = fabs(x);
double code(double x_m) {
return 0.0;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.0d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.0;
}
x_m = math.fabs(x) def code(x_m): return 0.0
x_m = abs(x) function code(x_m) return 0.0 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.0; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.0
\begin{array}{l}
x_m = \left|x\right|
\\
0
\end{array}
Initial program 54.0%
Taylor expanded in x around 0
Applied rewrites26.0%
metadata-evalN/A
lift-*.f64N/A
div026.5
Applied rewrites26.5%
herbie shell --seed 2024214
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))