
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(fabs
(*
(pow PI -0.5)
(fma
0.047619047619047616
(pow x 7.0)
(fma 2.0 x (fma 0.6666666666666666 (pow x 3.0) (* 0.2 (pow x 5.0))))))))
double code(double x) {
return fabs((pow(((double) M_PI), -0.5) * fma(0.047619047619047616, pow(x, 7.0), fma(2.0, x, fma(0.6666666666666666, pow(x, 3.0), (0.2 * pow(x, 5.0)))))));
}
function code(x) return abs(Float64((pi ^ -0.5) * fma(0.047619047619047616, (x ^ 7.0), fma(2.0, x, fma(0.6666666666666666, (x ^ 3.0), Float64(0.2 * (x ^ 5.0))))))) end
code[x_] := N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision] + N[(2.0 * x + N[(0.6666666666666666 * N[Power[x, 3.0], $MachinePrecision] + N[(0.2 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|{\pi}^{-0.5} \cdot \mathsf{fma}\left(0.047619047619047616, {x}^{7}, \mathsf{fma}\left(2, x, \mathsf{fma}\left(0.6666666666666666, {x}^{3}, 0.2 \cdot {x}^{5}\right)\right)\right)\right|
\end{array}
Initial program 99.9%
distribute-lft-in99.9%
Applied egg-rr99.9%
distribute-lft-out99.9%
+-commutative99.9%
fma-udef99.9%
*-commutative99.9%
pow-plus99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (x)
:precision binary64
(fabs
(*
(pow PI -0.5)
(+
(+ (* 0.2 (pow x 5.0)) (* 0.047619047619047616 (pow x 7.0)))
(* x 2.0)))))
double code(double x) {
return fabs((pow(((double) M_PI), -0.5) * (((0.2 * pow(x, 5.0)) + (0.047619047619047616 * pow(x, 7.0))) + (x * 2.0))));
}
public static double code(double x) {
return Math.abs((Math.pow(Math.PI, -0.5) * (((0.2 * Math.pow(x, 5.0)) + (0.047619047619047616 * Math.pow(x, 7.0))) + (x * 2.0))));
}
def code(x): return math.fabs((math.pow(math.pi, -0.5) * (((0.2 * math.pow(x, 5.0)) + (0.047619047619047616 * math.pow(x, 7.0))) + (x * 2.0))))
function code(x) return abs(Float64((pi ^ -0.5) * Float64(Float64(Float64(0.2 * (x ^ 5.0)) + Float64(0.047619047619047616 * (x ^ 7.0))) + Float64(x * 2.0)))) end
function tmp = code(x) tmp = abs(((pi ^ -0.5) * (((0.2 * (x ^ 5.0)) + (0.047619047619047616 * (x ^ 7.0))) + (x * 2.0)))); end
code[x_] := N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(N[(N[(0.2 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|{\pi}^{-0.5} \cdot \left(\left(0.2 \cdot {x}^{5} + 0.047619047619047616 \cdot {x}^{7}\right) + x \cdot 2\right)\right|
\end{array}
Initial program 99.9%
distribute-lft-in99.9%
Applied egg-rr99.9%
+-commutative99.9%
distribute-lft-out99.9%
fma-udef99.9%
Simplified99.9%
Taylor expanded in x around inf 99.5%
Taylor expanded in x around 0 99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(if (<= x -1.6)
(fabs
(/ (+ (* 0.2 (pow x 5.0)) (* 0.047619047619047616 (pow x 7.0))) (sqrt PI)))
(fabs (* (pow PI -0.5) (* x 2.0)))))
double code(double x) {
double tmp;
if (x <= -1.6) {
tmp = fabs((((0.2 * pow(x, 5.0)) + (0.047619047619047616 * pow(x, 7.0))) / sqrt(((double) M_PI))));
} else {
tmp = fabs((pow(((double) M_PI), -0.5) * (x * 2.0)));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -1.6) {
tmp = Math.abs((((0.2 * Math.pow(x, 5.0)) + (0.047619047619047616 * Math.pow(x, 7.0))) / Math.sqrt(Math.PI)));
} else {
tmp = Math.abs((Math.pow(Math.PI, -0.5) * (x * 2.0)));
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.6: tmp = math.fabs((((0.2 * math.pow(x, 5.0)) + (0.047619047619047616 * math.pow(x, 7.0))) / math.sqrt(math.pi))) else: tmp = math.fabs((math.pow(math.pi, -0.5) * (x * 2.0))) return tmp
function code(x) tmp = 0.0 if (x <= -1.6) tmp = abs(Float64(Float64(Float64(0.2 * (x ^ 5.0)) + Float64(0.047619047619047616 * (x ^ 7.0))) / sqrt(pi))); else tmp = abs(Float64((pi ^ -0.5) * Float64(x * 2.0))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.6) tmp = abs((((0.2 * (x ^ 5.0)) + (0.047619047619047616 * (x ^ 7.0))) / sqrt(pi))); else tmp = abs(((pi ^ -0.5) * (x * 2.0))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.6], N[Abs[N[(N[(N[(0.2 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.6:\\
\;\;\;\;\left|\frac{0.2 \cdot {x}^{5} + 0.047619047619047616 \cdot {x}^{7}}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{\pi}^{-0.5} \cdot \left(x \cdot 2\right)\right|\\
\end{array}
\end{array}
if x < -1.6000000000000001Initial program 99.8%
distribute-rgt-in99.8%
Applied egg-rr99.9%
+-commutative99.9%
*-rgt-identity99.9%
associate-*l/99.9%
*-lft-identity99.9%
times-frac99.9%
/-rgt-identity99.9%
*-rgt-identity99.9%
*-lft-identity99.9%
times-frac99.9%
/-rgt-identity99.9%
Simplified99.9%
Taylor expanded in x around inf 99.1%
if -1.6000000000000001 < x Initial program 99.9%
distribute-lft-in99.9%
Applied egg-rr99.9%
+-commutative99.9%
distribute-lft-out99.9%
fma-udef99.9%
Simplified99.9%
Taylor expanded in x around 0 99.7%
associate-*r*99.7%
*-commutative99.7%
unpow-199.7%
metadata-eval99.7%
pow-sqr99.7%
rem-sqrt-square99.7%
metadata-eval99.7%
pow-sqr99.7%
fabs-sqr99.7%
pow-sqr99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
Final simplification99.5%
(FPCore (x) :precision binary64 (if (<= x -1.86) (fabs (/ 0.047619047619047616 (* (sqrt PI) (pow x -7.0)))) (fabs (* (pow PI -0.5) (* x 2.0)))))
double code(double x) {
double tmp;
if (x <= -1.86) {
tmp = fabs((0.047619047619047616 / (sqrt(((double) M_PI)) * pow(x, -7.0))));
} else {
tmp = fabs((pow(((double) M_PI), -0.5) * (x * 2.0)));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -1.86) {
tmp = Math.abs((0.047619047619047616 / (Math.sqrt(Math.PI) * Math.pow(x, -7.0))));
} else {
tmp = Math.abs((Math.pow(Math.PI, -0.5) * (x * 2.0)));
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.86: tmp = math.fabs((0.047619047619047616 / (math.sqrt(math.pi) * math.pow(x, -7.0)))) else: tmp = math.fabs((math.pow(math.pi, -0.5) * (x * 2.0))) return tmp
function code(x) tmp = 0.0 if (x <= -1.86) tmp = abs(Float64(0.047619047619047616 / Float64(sqrt(pi) * (x ^ -7.0)))); else tmp = abs(Float64((pi ^ -0.5) * Float64(x * 2.0))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.86) tmp = abs((0.047619047619047616 / (sqrt(pi) * (x ^ -7.0)))); else tmp = abs(((pi ^ -0.5) * (x * 2.0))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.86], N[Abs[N[(0.047619047619047616 / N[(N[Sqrt[Pi], $MachinePrecision] * N[Power[x, -7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.86:\\
\;\;\;\;\left|\frac{0.047619047619047616}{\sqrt{\pi} \cdot {x}^{-7}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{\pi}^{-0.5} \cdot \left(x \cdot 2\right)\right|\\
\end{array}
\end{array}
if x < -1.8600000000000001Initial program 99.8%
distribute-rgt-in99.8%
Applied egg-rr99.9%
+-commutative99.9%
*-rgt-identity99.9%
associate-*l/99.9%
*-lft-identity99.9%
times-frac99.9%
/-rgt-identity99.9%
*-rgt-identity99.9%
*-lft-identity99.9%
times-frac99.9%
/-rgt-identity99.9%
Simplified99.9%
Taylor expanded in x around inf 98.8%
expm1-log1p-u0.0%
expm1-udef0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p98.8%
associate-/l*98.8%
Simplified98.8%
div-inv98.8%
pow-flip98.8%
metadata-eval98.8%
Applied egg-rr98.8%
if -1.8600000000000001 < x Initial program 99.9%
distribute-lft-in99.9%
Applied egg-rr99.9%
+-commutative99.9%
distribute-lft-out99.9%
fma-udef99.9%
Simplified99.9%
Taylor expanded in x around 0 99.7%
associate-*r*99.7%
*-commutative99.7%
unpow-199.7%
metadata-eval99.7%
pow-sqr99.7%
rem-sqrt-square99.7%
metadata-eval99.7%
pow-sqr99.7%
fabs-sqr99.7%
pow-sqr99.7%
metadata-eval99.7%
*-commutative99.7%
Simplified99.7%
Final simplification99.4%
(FPCore (x) :precision binary64 (fabs (* (pow PI -0.5) (* x 2.0))))
double code(double x) {
return fabs((pow(((double) M_PI), -0.5) * (x * 2.0)));
}
public static double code(double x) {
return Math.abs((Math.pow(Math.PI, -0.5) * (x * 2.0)));
}
def code(x): return math.fabs((math.pow(math.pi, -0.5) * (x * 2.0)))
function code(x) return abs(Float64((pi ^ -0.5) * Float64(x * 2.0))) end
function tmp = code(x) tmp = abs(((pi ^ -0.5) * (x * 2.0))); end
code[x_] := N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|{\pi}^{-0.5} \cdot \left(x \cdot 2\right)\right|
\end{array}
Initial program 99.9%
distribute-lft-in99.9%
Applied egg-rr99.9%
+-commutative99.9%
distribute-lft-out99.9%
fma-udef99.9%
Simplified99.9%
Taylor expanded in x around 0 68.6%
associate-*r*68.6%
*-commutative68.6%
unpow-168.6%
metadata-eval68.6%
pow-sqr68.6%
rem-sqrt-square68.6%
metadata-eval68.6%
pow-sqr68.6%
fabs-sqr68.6%
pow-sqr68.6%
metadata-eval68.6%
*-commutative68.6%
Simplified68.6%
Final simplification68.6%
(FPCore (x) :precision binary64 (fabs (/ (* x 2.0) (sqrt PI))))
double code(double x) {
return fabs(((x * 2.0) / sqrt(((double) M_PI))));
}
public static double code(double x) {
return Math.abs(((x * 2.0) / Math.sqrt(Math.PI)));
}
def code(x): return math.fabs(((x * 2.0) / math.sqrt(math.pi)))
function code(x) return abs(Float64(Float64(x * 2.0) / sqrt(pi))) end
function tmp = code(x) tmp = abs(((x * 2.0) / sqrt(pi))); end
code[x_] := N[Abs[N[(N[(x * 2.0), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{x \cdot 2}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.9%
distribute-rgt-in99.9%
Applied egg-rr99.4%
+-commutative99.4%
*-rgt-identity99.4%
associate-*l/99.4%
*-lft-identity99.4%
times-frac99.4%
/-rgt-identity99.4%
*-rgt-identity99.4%
*-lft-identity99.4%
times-frac99.9%
/-rgt-identity99.9%
Simplified99.4%
Taylor expanded in x around 0 68.1%
*-commutative68.1%
Simplified68.1%
Final simplification68.1%
herbie shell --seed 2023174
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))