
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- 1.0 (cos x)) (* x x)))
double code(double x) {
return (1.0 - cos(x)) / (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 - cos(x)) / (x * x)
end function
public static double code(double x) {
return (1.0 - Math.cos(x)) / (x * x);
}
def code(x): return (1.0 - math.cos(x)) / (x * x)
function code(x) return Float64(Float64(1.0 - cos(x)) / Float64(x * x)) end
function tmp = code(x) tmp = (1.0 - cos(x)) / (x * x); end
code[x_] := N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] / N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1 - \cos x}{x \cdot x}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.05)
(fma
(fma
(fma -2.48015873015873e-5 (* x_m x_m) 0.001388888888888889)
(* x_m x_m)
-0.041666666666666664)
(* x_m x_m)
0.5)
(* (/ (sin x_m) (* x_m x_m)) (tan (* x_m 0.5)))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.05) {
tmp = fma(fma(fma(-2.48015873015873e-5, (x_m * x_m), 0.001388888888888889), (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (sin(x_m) / (x_m * x_m)) * tan((x_m * 0.5));
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.05) tmp = fma(fma(fma(-2.48015873015873e-5, Float64(x_m * x_m), 0.001388888888888889), Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(sin(x_m) / Float64(x_m * x_m)) * tan(Float64(x_m * 0.5))); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.05], N[(N[(N[(-2.48015873015873e-5 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.001388888888888889), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[Sin[x$95$m], $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] * N[Tan[N[(x$95$m * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.05:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.48015873015873 \cdot 10^{-5}, x\_m \cdot x\_m, 0.001388888888888889\right), x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\sin x\_m}{x\_m \cdot x\_m} \cdot \tan \left(x\_m \cdot 0.5\right)\\
\end{array}
\end{array}
if x < 0.050000000000000003Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.0
Applied rewrites64.0%
if 0.050000000000000003 < x Initial program 99.1%
lift-/.f64N/A
lift--.f64N/A
flip--N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6499.5
Applied rewrites99.5%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
metadata-evalN/A
lower-*.f6499.5
Applied rewrites99.5%
Final simplification72.9%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (tan (* x_m 0.5)) (* (/ x_m (sin x_m)) x_m)))
x_m = fabs(x);
double code(double x_m) {
return tan((x_m * 0.5)) / ((x_m / sin(x_m)) * x_m);
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = tan((x_m * 0.5d0)) / ((x_m / sin(x_m)) * x_m)
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.tan((x_m * 0.5)) / ((x_m / Math.sin(x_m)) * x_m);
}
x_m = math.fabs(x) def code(x_m): return math.tan((x_m * 0.5)) / ((x_m / math.sin(x_m)) * x_m)
x_m = abs(x) function code(x_m) return Float64(tan(Float64(x_m * 0.5)) / Float64(Float64(x_m / sin(x_m)) * x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = tan((x_m * 0.5)) / ((x_m / sin(x_m)) * x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Tan[N[(x$95$m * 0.5), $MachinePrecision]], $MachinePrecision] / N[(N[(x$95$m / N[Sin[x$95$m], $MachinePrecision]), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\tan \left(x\_m \cdot 0.5\right)}{\frac{x\_m}{\sin x\_m} \cdot x\_m}
\end{array}
Initial program 52.9%
lift-/.f64N/A
lift--.f64N/A
flip--N/A
associate-/l/N/A
metadata-evalN/A
lift-cos.f64N/A
lift-cos.f64N/A
1-sub-cosN/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-sin.f64N/A
lift-cos.f64N/A
hang-0p-tanN/A
lower-tan.f64N/A
lower-/.f6475.2
Applied rewrites75.2%
lift-*.f64N/A
*-commutativeN/A
lift-/.f64N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
metadata-evalN/A
lower-*.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f6499.6
Applied rewrites99.6%
Final simplification99.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.1)
(fma
(fma
(fma -2.48015873015873e-5 (* x_m x_m) 0.001388888888888889)
(* x_m x_m)
-0.041666666666666664)
(* x_m x_m)
0.5)
(* (- 1.0 (cos x_m)) (pow x_m -2.0))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.1) {
tmp = fma(fma(fma(-2.48015873015873e-5, (x_m * x_m), 0.001388888888888889), (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - cos(x_m)) * pow(x_m, -2.0);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.1) tmp = fma(fma(fma(-2.48015873015873e-5, Float64(x_m * x_m), 0.001388888888888889), Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) * (x_m ^ -2.0)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.1], N[(N[(N[(-2.48015873015873e-5 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.001388888888888889), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Power[x$95$m, -2.0], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.48015873015873 \cdot 10^{-5}, x\_m \cdot x\_m, 0.001388888888888889\right), x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\left(1 - \cos x\_m\right) \cdot {x\_m}^{-2}\\
\end{array}
\end{array}
if x < 0.10000000000000001Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.0
Applied rewrites64.0%
if 0.10000000000000001 < x Initial program 99.1%
Applied rewrites98.9%
Final simplification72.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.1)
(fma
(fma
(fma -2.48015873015873e-5 (* x_m x_m) 0.001388888888888889)
(* x_m x_m)
-0.041666666666666664)
(* x_m x_m)
0.5)
(/ (/ (- 1.0 (cos x_m)) x_m) x_m)))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.1) {
tmp = fma(fma(fma(-2.48015873015873e-5, (x_m * x_m), 0.001388888888888889), (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = ((1.0 - cos(x_m)) / x_m) / x_m;
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.1) tmp = fma(fma(fma(-2.48015873015873e-5, Float64(x_m * x_m), 0.001388888888888889), Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(Float64(1.0 - cos(x_m)) / x_m) / x_m); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.1], N[(N[(N[(-2.48015873015873e-5 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.001388888888888889), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / x$95$m), $MachinePrecision] / x$95$m), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.48015873015873 \cdot 10^{-5}, x\_m \cdot x\_m, 0.001388888888888889\right), x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{1 - \cos x\_m}{x\_m}}{x\_m}\\
\end{array}
\end{array}
if x < 0.10000000000000001Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.0
Applied rewrites64.0%
if 0.10000000000000001 < x Initial program 99.1%
Applied rewrites98.9%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 0.1)
(fma
(fma
(fma -2.48015873015873e-5 (* x_m x_m) 0.001388888888888889)
(* x_m x_m)
-0.041666666666666664)
(* x_m x_m)
0.5)
(/ (- 1.0 (cos x_m)) (* x_m x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 0.1) {
tmp = fma(fma(fma(-2.48015873015873e-5, (x_m * x_m), 0.001388888888888889), (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = (1.0 - cos(x_m)) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 0.1) tmp = fma(fma(fma(-2.48015873015873e-5, Float64(x_m * x_m), 0.001388888888888889), Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(Float64(1.0 - cos(x_m)) / Float64(x_m * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 0.1], N[(N[(N[(-2.48015873015873e-5 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.001388888888888889), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(N[(1.0 - N[Cos[x$95$m], $MachinePrecision]), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 0.1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-2.48015873015873 \cdot 10^{-5}, x\_m \cdot x\_m, 0.001388888888888889\right), x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - \cos x\_m}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 0.10000000000000001Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.0
Applied rewrites64.0%
if 0.10000000000000001 < x Initial program 99.1%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(if (<= x_m 4.8)
(fma
(fma 0.001388888888888889 (* x_m x_m) -0.041666666666666664)
(* x_m x_m)
0.5)
(/ 1.0 (* (* 0.16666666666666666 x_m) x_m))))x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 4.8) {
tmp = fma(fma(0.001388888888888889, (x_m * x_m), -0.041666666666666664), (x_m * x_m), 0.5);
} else {
tmp = 1.0 / ((0.16666666666666666 * x_m) * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 4.8) tmp = fma(fma(0.001388888888888889, Float64(x_m * x_m), -0.041666666666666664), Float64(x_m * x_m), 0.5); else tmp = Float64(1.0 / Float64(Float64(0.16666666666666666 * x_m) * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 4.8], N[(N[(0.001388888888888889 * N[(x$95$m * x$95$m), $MachinePrecision] + -0.041666666666666664), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(1.0 / N[(N[(0.16666666666666666 * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 4.8:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x\_m \cdot x\_m, -0.041666666666666664\right), x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(0.16666666666666666 \cdot x\_m\right) \cdot x\_m}\\
\end{array}
\end{array}
if x < 4.79999999999999982Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
sub-negN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6464.3
Applied rewrites64.3%
if 4.79999999999999982 < x Initial program 99.1%
Applied rewrites98.9%
Taylor expanded in x around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6445.1
Applied rewrites45.1%
Taylor expanded in x around inf
Applied rewrites45.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 3.2) (fma -0.041666666666666664 (* x_m x_m) 0.5) (/ 1.0 (* (* 0.16666666666666666 x_m) x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 3.2) {
tmp = fma(-0.041666666666666664, (x_m * x_m), 0.5);
} else {
tmp = 1.0 / ((0.16666666666666666 * x_m) * x_m);
}
return tmp;
}
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 3.2) tmp = fma(-0.041666666666666664, Float64(x_m * x_m), 0.5); else tmp = Float64(1.0 / Float64(Float64(0.16666666666666666 * x_m) * x_m)); end return tmp end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 3.2], N[(-0.041666666666666664 * N[(x$95$m * x$95$m), $MachinePrecision] + 0.5), $MachinePrecision], N[(1.0 / N[(N[(0.16666666666666666 * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 3.2:\\
\;\;\;\;\mathsf{fma}\left(-0.041666666666666664, x\_m \cdot x\_m, 0.5\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\left(0.16666666666666666 \cdot x\_m\right) \cdot x\_m}\\
\end{array}
\end{array}
if x < 3.2000000000000002Initial program 37.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6463.9
Applied rewrites63.9%
if 3.2000000000000002 < x Initial program 99.1%
Applied rewrites98.9%
Taylor expanded in x around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6445.1
Applied rewrites45.1%
Taylor expanded in x around inf
Applied rewrites45.1%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.15e+77) 0.5 (/ (- 1.0 1.0) (* x_m x_m))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.15e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
real(8) :: tmp
if (x_m <= 1.15d+77) then
tmp = 0.5d0
else
tmp = (1.0d0 - 1.0d0) / (x_m * x_m)
end if
code = tmp
end function
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.15e+77) {
tmp = 0.5;
} else {
tmp = (1.0 - 1.0) / (x_m * x_m);
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.15e+77: tmp = 0.5 else: tmp = (1.0 - 1.0) / (x_m * x_m) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.15e+77) tmp = 0.5; else tmp = Float64(Float64(1.0 - 1.0) / Float64(x_m * x_m)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.15e+77) tmp = 0.5; else tmp = (1.0 - 1.0) / (x_m * x_m); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.15e+77], 0.5, N[(N[(1.0 - 1.0), $MachinePrecision] / N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.15 \cdot 10^{+77}:\\
\;\;\;\;0.5\\
\mathbf{else}:\\
\;\;\;\;\frac{1 - 1}{x\_m \cdot x\_m}\\
\end{array}
\end{array}
if x < 1.14999999999999997e77Initial program 43.8%
Taylor expanded in x around 0
Applied rewrites58.8%
if 1.14999999999999997e77 < x Initial program 99.1%
Taylor expanded in x around 0
Applied rewrites55.5%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ 1.0 (fma 0.16666666666666666 (* x_m x_m) 2.0)))
x_m = fabs(x);
double code(double x_m) {
return 1.0 / fma(0.16666666666666666, (x_m * x_m), 2.0);
}
x_m = abs(x) function code(x_m) return Float64(1.0 / fma(0.16666666666666666, Float64(x_m * x_m), 2.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(1.0 / N[(0.16666666666666666 * N[(x$95$m * x$95$m), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{1}{\mathsf{fma}\left(0.16666666666666666, x\_m \cdot x\_m, 2\right)}
\end{array}
Initial program 52.9%
Applied rewrites53.6%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6472.2
Applied rewrites72.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 0.5)
x_m = fabs(x);
double code(double x_m) {
return 0.5;
}
x_m = abs(x)
real(8) function code(x_m)
real(8), intent (in) :: x_m
code = 0.5d0
end function
x_m = Math.abs(x);
public static double code(double x_m) {
return 0.5;
}
x_m = math.fabs(x) def code(x_m): return 0.5
x_m = abs(x) function code(x_m) return 0.5 end
x_m = abs(x); function tmp = code(x_m) tmp = 0.5; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := 0.5
\begin{array}{l}
x_m = \left|x\right|
\\
0.5
\end{array}
Initial program 52.9%
Taylor expanded in x around 0
Applied rewrites49.7%
herbie shell --seed 2024331
(FPCore (x)
:name "cos2 (problem 3.4.1)"
:precision binary64
(/ (- 1.0 (cos x)) (* x x)))