
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* (fabs x) (* x x))) (t_1 (* (fabs x) (* (fabs x) t_0))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* 0.6666666666666666 t_0)) (* 0.2 t_1))
(* 0.047619047619047616 (* (fabs x) (* (fabs x) t_1))))))))
double code(double x) {
double t_0 = fabs(x) * (x * x);
double t_1 = fabs(x) * (fabs(x) * t_0);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (fabs(x) * (fabs(x) * t_1))))));
}
public static double code(double x) {
double t_0 = Math.abs(x) * (x * x);
double t_1 = Math.abs(x) * (Math.abs(x) * t_0);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (Math.abs(x) * (Math.abs(x) * t_1))))));
}
def code(x): t_0 = math.fabs(x) * (x * x) t_1 = math.fabs(x) * (math.fabs(x) * t_0) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (math.fabs(x) * (math.fabs(x) * t_1))))))
function code(x) t_0 = Float64(abs(x) * Float64(x * x)) t_1 = Float64(abs(x) * Float64(abs(x) * t_0)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(0.6666666666666666 * t_0)) + Float64(0.2 * t_1)) + Float64(0.047619047619047616 * Float64(abs(x) * Float64(abs(x) * t_1)))))) end
function tmp = code(x) t_0 = abs(x) * (x * x); t_1 = abs(x) * (abs(x) * t_0); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (abs(x) * (abs(x) * t_1)))))); end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.2 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left|x\right| \cdot \left(x \cdot x\right)\\
t_1 := \left|x\right| \cdot \left(\left|x\right| \cdot t_0\right)\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + 0.6666666666666666 \cdot t_0\right) + 0.2 \cdot t_1\right) + 0.047619047619047616 \cdot \left(\left|x\right| \cdot \left(\left|x\right| \cdot t_1\right)\right)\right)\right|
\end{array}
\end{array}
Initial program 99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) (* x x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+
(fma 2.0 (fabs x) (* 0.6666666666666666 (* (fabs x) (* x x))))
(* 0.2 (* (fabs x) t_0)))
(* 0.047619047619047616 (* (fabs x) (* (* x x) t_0))))))))
double code(double x) {
double t_0 = (x * x) * (x * x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((fma(2.0, fabs(x), (0.6666666666666666 * (fabs(x) * (x * x)))) + (0.2 * (fabs(x) * t_0))) + (0.047619047619047616 * (fabs(x) * ((x * x) * t_0))))));
}
function code(x) t_0 = Float64(Float64(x * x) * Float64(x * x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(fma(2.0, abs(x), Float64(0.6666666666666666 * Float64(abs(x) * Float64(x * x)))) + Float64(0.2 * Float64(abs(x) * t_0))) + Float64(0.047619047619047616 * Float64(abs(x) * Float64(Float64(x * x) * t_0)))))) end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision] + N[(0.6666666666666666 * N[(N[Abs[x], $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[(N[Abs[x], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\mathsf{fma}\left(2, \left|x\right|, 0.6666666666666666 \cdot \left(\left|x\right| \cdot \left(x \cdot x\right)\right)\right) + 0.2 \cdot \left(\left|x\right| \cdot t_0\right)\right) + 0.047619047619047616 \cdot \left(\left|x\right| \cdot \left(\left(x \cdot x\right) \cdot t_0\right)\right)\right)\right|
\end{array}
\end{array}
Initial program 99.5%
Simplified99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(fabs
(*
(/ x (sqrt PI))
(+
(+ 2.0 (* 0.6666666666666666 (* x x)))
(+ (* 0.2 (pow x 4.0)) (* 0.047619047619047616 (pow x 6.0)))))))
double code(double x) {
return fabs(((x / sqrt(((double) M_PI))) * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.2 * pow(x, 4.0)) + (0.047619047619047616 * pow(x, 6.0))))));
}
public static double code(double x) {
return Math.abs(((x / Math.sqrt(Math.PI)) * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.2 * Math.pow(x, 4.0)) + (0.047619047619047616 * Math.pow(x, 6.0))))));
}
def code(x): return math.fabs(((x / math.sqrt(math.pi)) * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.2 * math.pow(x, 4.0)) + (0.047619047619047616 * math.pow(x, 6.0))))))
function code(x) return abs(Float64(Float64(x / sqrt(pi)) * Float64(Float64(2.0 + Float64(0.6666666666666666 * Float64(x * x))) + Float64(Float64(0.2 * (x ^ 4.0)) + Float64(0.047619047619047616 * (x ^ 6.0)))))) end
function tmp = code(x) tmp = abs(((x / sqrt(pi)) * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.2 * (x ^ 4.0)) + (0.047619047619047616 * (x ^ 6.0)))))); end
code[x_] := N[Abs[N[(N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 + N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{x}{\sqrt{\pi}} \cdot \left(\left(2 + 0.6666666666666666 \cdot \left(x \cdot x\right)\right) + \left(0.2 \cdot {x}^{4} + 0.047619047619047616 \cdot {x}^{6}\right)\right)\right|
\end{array}
Initial program 99.5%
Simplified99.3%
*-un-lft-identity99.3%
Applied egg-rr99.3%
*-lft-identity99.3%
unpow199.3%
sqr-pow32.1%
fabs-sqr32.1%
sqr-pow99.3%
unpow199.3%
Simplified99.3%
Taylor expanded in x around 0 99.3%
fma-udef87.6%
Applied egg-rr99.3%
Final simplification99.3%
(FPCore (x) :precision binary64 (fabs (* (/ x (sqrt PI)) (+ 2.0 (+ (* 0.2 (pow x 4.0)) (* 0.047619047619047616 (pow x 6.0)))))))
double code(double x) {
return fabs(((x / sqrt(((double) M_PI))) * (2.0 + ((0.2 * pow(x, 4.0)) + (0.047619047619047616 * pow(x, 6.0))))));
}
public static double code(double x) {
return Math.abs(((x / Math.sqrt(Math.PI)) * (2.0 + ((0.2 * Math.pow(x, 4.0)) + (0.047619047619047616 * Math.pow(x, 6.0))))));
}
def code(x): return math.fabs(((x / math.sqrt(math.pi)) * (2.0 + ((0.2 * math.pow(x, 4.0)) + (0.047619047619047616 * math.pow(x, 6.0))))))
function code(x) return abs(Float64(Float64(x / sqrt(pi)) * Float64(2.0 + Float64(Float64(0.2 * (x ^ 4.0)) + Float64(0.047619047619047616 * (x ^ 6.0)))))) end
function tmp = code(x) tmp = abs(((x / sqrt(pi)) * (2.0 + ((0.2 * (x ^ 4.0)) + (0.047619047619047616 * (x ^ 6.0)))))); end
code[x_] := N[Abs[N[(N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(2.0 + N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{x}{\sqrt{\pi}} \cdot \left(2 + \left(0.2 \cdot {x}^{4} + 0.047619047619047616 \cdot {x}^{6}\right)\right)\right|
\end{array}
Initial program 99.5%
Simplified99.3%
*-un-lft-identity99.3%
Applied egg-rr99.3%
*-lft-identity99.3%
unpow199.3%
sqr-pow32.1%
fabs-sqr32.1%
sqr-pow99.3%
unpow199.3%
Simplified99.3%
Taylor expanded in x around 0 99.3%
Taylor expanded in x around 0 98.5%
Final simplification98.5%
(FPCore (x) :precision binary64 (fabs (/ (fma 2.0 x (* 0.047619047619047616 (pow x 7.0))) (sqrt PI))))
double code(double x) {
return fabs((fma(2.0, x, (0.047619047619047616 * pow(x, 7.0))) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(fma(2.0, x, Float64(0.047619047619047616 * (x ^ 7.0))) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(2.0 * x + N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\mathsf{fma}\left(2, x, 0.047619047619047616 \cdot {x}^{7}\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.5%
Simplified99.0%
Taylor expanded in x around inf 98.1%
Final simplification98.1%
(FPCore (x)
:precision binary64
(if (<= x -2.6)
(fabs (* x (/ (* 0.047619047619047616 (pow x 6.0)) (sqrt PI))))
(fabs
(*
(* x (pow PI -0.5))
(+ (+ 2.0 (* 0.6666666666666666 (* x x))) (* 0.2 (pow x 4.0)))))))
double code(double x) {
double tmp;
if (x <= -2.6) {
tmp = fabs((x * ((0.047619047619047616 * pow(x, 6.0)) / sqrt(((double) M_PI)))));
} else {
tmp = fabs(((x * pow(((double) M_PI), -0.5)) * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * pow(x, 4.0)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -2.6) {
tmp = Math.abs((x * ((0.047619047619047616 * Math.pow(x, 6.0)) / Math.sqrt(Math.PI))));
} else {
tmp = Math.abs(((x * Math.pow(Math.PI, -0.5)) * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * Math.pow(x, 4.0)))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -2.6: tmp = math.fabs((x * ((0.047619047619047616 * math.pow(x, 6.0)) / math.sqrt(math.pi)))) else: tmp = math.fabs(((x * math.pow(math.pi, -0.5)) * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * math.pow(x, 4.0))))) return tmp
function code(x) tmp = 0.0 if (x <= -2.6) tmp = abs(Float64(x * Float64(Float64(0.047619047619047616 * (x ^ 6.0)) / sqrt(pi)))); else tmp = abs(Float64(Float64(x * (pi ^ -0.5)) * Float64(Float64(2.0 + Float64(0.6666666666666666 * Float64(x * x))) + Float64(0.2 * (x ^ 4.0))))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -2.6) tmp = abs((x * ((0.047619047619047616 * (x ^ 6.0)) / sqrt(pi)))); else tmp = abs(((x * (pi ^ -0.5)) * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * (x ^ 4.0))))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -2.6], N[Abs[N[(x * N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 + N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.6:\\
\;\;\;\;\left|x \cdot \frac{0.047619047619047616 \cdot {x}^{6}}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\left(x \cdot {\pi}^{-0.5}\right) \cdot \left(\left(2 + 0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 0.2 \cdot {x}^{4}\right)\right|\\
\end{array}
\end{array}
if x < -2.60000000000000009Initial program 98.8%
Simplified98.7%
Taylor expanded in x around inf 98.3%
*-commutative98.3%
associate-*l*98.4%
associate-*l*99.3%
*-commutative99.3%
Simplified99.3%
expm1-log1p-u97.8%
expm1-udef97.8%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt0.0%
*-commutative0.0%
*-commutative0.0%
sqrt-div0.0%
metadata-eval0.0%
un-div-inv0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p99.4%
associate-*r/99.4%
Simplified99.4%
if -2.60000000000000009 < x Initial program 99.9%
Simplified99.1%
*-un-lft-identity99.1%
Applied egg-rr99.1%
*-lft-identity99.1%
unpow199.1%
sqr-pow46.6%
fabs-sqr46.6%
sqr-pow99.1%
unpow199.1%
Simplified99.1%
Taylor expanded in x around 0 98.7%
fma-udef99.4%
Applied egg-rr98.7%
*-un-lft-identity98.7%
associate-*l/99.5%
pow1/299.5%
pow-flip99.5%
metadata-eval99.5%
Applied egg-rr99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(if (<= x -2.2)
(fabs (* x (/ (* 0.047619047619047616 (pow x 6.0)) (sqrt PI))))
(fabs
(* (sqrt (/ 1.0 PI)) (+ (* x (* 0.6666666666666666 (* x x))) (* 2.0 x))))))
double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = fabs((x * ((0.047619047619047616 * pow(x, 6.0)) / sqrt(((double) M_PI)))));
} else {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = Math.abs((x * ((0.047619047619047616 * Math.pow(x, 6.0)) / Math.sqrt(Math.PI))));
} else {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -2.2: tmp = math.fabs((x * ((0.047619047619047616 * math.pow(x, 6.0)) / math.sqrt(math.pi)))) else: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))) return tmp
function code(x) tmp = 0.0 if (x <= -2.2) tmp = abs(Float64(x * Float64(Float64(0.047619047619047616 * (x ^ 6.0)) / sqrt(pi)))); else tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(x * Float64(0.6666666666666666 * Float64(x * x))) + Float64(2.0 * x)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -2.2) tmp = abs((x * ((0.047619047619047616 * (x ^ 6.0)) / sqrt(pi)))); else tmp = abs((sqrt((1.0 / pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -2.2], N[Abs[N[(x * N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.2:\\
\;\;\;\;\left|x \cdot \frac{0.047619047619047616 \cdot {x}^{6}}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 2 \cdot x\right)\right|\\
\end{array}
\end{array}
if x < -2.2000000000000002Initial program 98.8%
Simplified98.7%
Taylor expanded in x around inf 98.3%
*-commutative98.3%
associate-*l*98.4%
associate-*l*99.3%
*-commutative99.3%
Simplified99.3%
expm1-log1p-u97.8%
expm1-udef97.8%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt0.0%
*-commutative0.0%
*-commutative0.0%
sqrt-div0.0%
metadata-eval0.0%
un-div-inv0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p99.4%
associate-*r/99.4%
Simplified99.4%
if -2.2000000000000002 < x Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 99.4%
associate-*r*99.4%
unpow299.4%
associate-*r*99.4%
distribute-rgt-out99.4%
+-commutative99.4%
*-commutative99.4%
associate-*l*99.4%
*-commutative99.4%
*-commutative99.4%
distribute-lft-in99.4%
fma-udef99.4%
Simplified99.4%
fma-udef99.4%
distribute-rgt-in99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (x)
:precision binary64
(if (<= x -2.2)
(fabs (* 0.047619047619047616 (/ (pow x 7.0) (sqrt PI))))
(fabs
(* (sqrt (/ 1.0 PI)) (+ (* x (* 0.6666666666666666 (* x x))) (* 2.0 x))))))
double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = fabs((0.047619047619047616 * (pow(x, 7.0) / sqrt(((double) M_PI)))));
} else {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = Math.abs((0.047619047619047616 * (Math.pow(x, 7.0) / Math.sqrt(Math.PI))));
} else {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -2.2: tmp = math.fabs((0.047619047619047616 * (math.pow(x, 7.0) / math.sqrt(math.pi)))) else: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))) return tmp
function code(x) tmp = 0.0 if (x <= -2.2) tmp = abs(Float64(0.047619047619047616 * Float64((x ^ 7.0) / sqrt(pi)))); else tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(x * Float64(0.6666666666666666 * Float64(x * x))) + Float64(2.0 * x)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -2.2) tmp = abs((0.047619047619047616 * ((x ^ 7.0) / sqrt(pi)))); else tmp = abs((sqrt((1.0 / pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -2.2], N[Abs[N[(0.047619047619047616 * N[(N[Power[x, 7.0], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.2:\\
\;\;\;\;\left|0.047619047619047616 \cdot \frac{{x}^{7}}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 2 \cdot x\right)\right|\\
\end{array}
\end{array}
if x < -2.2000000000000002Initial program 98.8%
Simplified98.9%
Taylor expanded in x around inf 98.3%
associate-*r*98.3%
Simplified98.3%
expm1-log1p-u0.0%
expm1-udef0.0%
associate-*l*0.0%
sqrt-div0.0%
metadata-eval0.0%
un-div-inv0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p98.4%
Simplified98.4%
if -2.2000000000000002 < x Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 99.4%
associate-*r*99.4%
unpow299.4%
associate-*r*99.4%
distribute-rgt-out99.4%
+-commutative99.4%
*-commutative99.4%
associate-*l*99.4%
*-commutative99.4%
*-commutative99.4%
distribute-lft-in99.4%
fma-udef99.4%
Simplified99.4%
fma-udef99.4%
distribute-rgt-in99.4%
Applied egg-rr99.4%
Final simplification99.1%
(FPCore (x)
:precision binary64
(if (<= x -2.2)
(fabs (/ (* 0.047619047619047616 (pow x 7.0)) (sqrt PI)))
(fabs
(* (sqrt (/ 1.0 PI)) (+ (* x (* 0.6666666666666666 (* x x))) (* 2.0 x))))))
double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = fabs(((0.047619047619047616 * pow(x, 7.0)) / sqrt(((double) M_PI))));
} else {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -2.2) {
tmp = Math.abs(((0.047619047619047616 * Math.pow(x, 7.0)) / Math.sqrt(Math.PI)));
} else {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -2.2: tmp = math.fabs(((0.047619047619047616 * math.pow(x, 7.0)) / math.sqrt(math.pi))) else: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))) return tmp
function code(x) tmp = 0.0 if (x <= -2.2) tmp = abs(Float64(Float64(0.047619047619047616 * (x ^ 7.0)) / sqrt(pi))); else tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(x * Float64(0.6666666666666666 * Float64(x * x))) + Float64(2.0 * x)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -2.2) tmp = abs(((0.047619047619047616 * (x ^ 7.0)) / sqrt(pi))); else tmp = abs((sqrt((1.0 / pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -2.2], N[Abs[N[(N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -2.2:\\
\;\;\;\;\left|\frac{0.047619047619047616 \cdot {x}^{7}}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 2 \cdot x\right)\right|\\
\end{array}
\end{array}
if x < -2.2000000000000002Initial program 98.8%
Simplified98.9%
Taylor expanded in x around inf 98.3%
associate-*r*98.3%
Simplified98.3%
sqrt-div98.3%
metadata-eval98.3%
un-div-inv98.5%
Applied egg-rr98.5%
if -2.2000000000000002 < x Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 99.4%
associate-*r*99.4%
unpow299.4%
associate-*r*99.4%
distribute-rgt-out99.4%
+-commutative99.4%
*-commutative99.4%
associate-*l*99.4%
*-commutative99.4%
*-commutative99.4%
distribute-lft-in99.4%
fma-udef99.4%
Simplified99.4%
fma-udef99.4%
distribute-rgt-in99.4%
Applied egg-rr99.4%
Final simplification99.1%
(FPCore (x)
:precision binary64
(let* ((t_0 (* 0.6666666666666666 (* x x))))
(if (<= x -1.35e+154)
(fabs (* 2.0 (sqrt (/ (* x x) PI))))
(fabs (* (sqrt (/ 1.0 PI)) (* x (/ (- (* t_0 t_0) 4.0) (- t_0 2.0))))))))
double code(double x) {
double t_0 = 0.6666666666666666 * (x * x);
double tmp;
if (x <= -1.35e+154) {
tmp = fabs((2.0 * sqrt(((x * x) / ((double) M_PI)))));
} else {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * (x * (((t_0 * t_0) - 4.0) / (t_0 - 2.0)))));
}
return tmp;
}
public static double code(double x) {
double t_0 = 0.6666666666666666 * (x * x);
double tmp;
if (x <= -1.35e+154) {
tmp = Math.abs((2.0 * Math.sqrt(((x * x) / Math.PI))));
} else {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * (x * (((t_0 * t_0) - 4.0) / (t_0 - 2.0)))));
}
return tmp;
}
def code(x): t_0 = 0.6666666666666666 * (x * x) tmp = 0 if x <= -1.35e+154: tmp = math.fabs((2.0 * math.sqrt(((x * x) / math.pi)))) else: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * (x * (((t_0 * t_0) - 4.0) / (t_0 - 2.0))))) return tmp
function code(x) t_0 = Float64(0.6666666666666666 * Float64(x * x)) tmp = 0.0 if (x <= -1.35e+154) tmp = abs(Float64(2.0 * sqrt(Float64(Float64(x * x) / pi)))); else tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * Float64(Float64(Float64(t_0 * t_0) - 4.0) / Float64(t_0 - 2.0))))); end return tmp end
function tmp_2 = code(x) t_0 = 0.6666666666666666 * (x * x); tmp = 0.0; if (x <= -1.35e+154) tmp = abs((2.0 * sqrt(((x * x) / pi)))); else tmp = abs((sqrt((1.0 / pi)) * (x * (((t_0 * t_0) - 4.0) / (t_0 - 2.0))))); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -1.35e+154], N[Abs[N[(2.0 * N[Sqrt[N[(N[(x * x), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] - 4.0), $MachinePrecision] / N[(t$95$0 - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 0.6666666666666666 \cdot \left(x \cdot x\right)\\
\mathbf{if}\;x \leq -1.35 \cdot 10^{+154}:\\
\;\;\;\;\left|2 \cdot \sqrt{\frac{x \cdot x}{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \frac{t_0 \cdot t_0 - 4}{t_0 - 2}\right)\right|\\
\end{array}
\end{array}
if x < -1.35000000000000003e154Initial program 100.0%
Simplified100.0%
Taylor expanded in x around 0 7.2%
*-commutative7.2%
unpow17.2%
sqr-pow0.0%
fabs-sqr0.0%
sqr-pow7.2%
unpow17.2%
*-commutative7.2%
Simplified7.2%
sqrt-div7.2%
metadata-eval7.2%
div-inv7.2%
clear-num7.2%
Applied egg-rr7.2%
expm1-log1p-u0.0%
expm1-udef0.0%
clear-num0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p7.2%
Simplified7.2%
add-sqr-sqrt0.0%
sqrt-unprod100.0%
frac-times100.0%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
if -1.35000000000000003e154 < x Initial program 99.5%
Simplified99.5%
Taylor expanded in x around 0 85.6%
associate-*r*85.6%
unpow285.6%
associate-*r*85.6%
distribute-rgt-out85.6%
+-commutative85.6%
*-commutative85.6%
associate-*l*85.6%
*-commutative85.6%
*-commutative85.6%
distribute-lft-in85.6%
fma-udef85.6%
Simplified85.6%
fma-udef85.6%
flip-+88.8%
metadata-eval88.8%
Applied egg-rr88.8%
Final simplification90.4%
(FPCore (x) :precision binary64 (fabs (* (sqrt (/ 1.0 PI)) (+ (* x (* 0.6666666666666666 (* x x))) (* 2.0 x)))))
double code(double x) {
return fabs((sqrt((1.0 / ((double) M_PI))) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
public static double code(double x) {
return Math.abs((Math.sqrt((1.0 / Math.PI)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))));
}
def code(x): return math.fabs((math.sqrt((1.0 / math.pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x))))
function code(x) return abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(x * Float64(0.6666666666666666 * Float64(x * x))) + Float64(2.0 * x)))) end
function tmp = code(x) tmp = abs((sqrt((1.0 / pi)) * ((x * (0.6666666666666666 * (x * x))) + (2.0 * x)))); end
code[x_] := N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 2 \cdot x\right)\right|
\end{array}
Initial program 99.5%
Simplified99.5%
Taylor expanded in x around 0 87.6%
associate-*r*87.6%
unpow287.6%
associate-*r*87.6%
distribute-rgt-out87.6%
+-commutative87.6%
*-commutative87.6%
associate-*l*87.6%
*-commutative87.6%
*-commutative87.6%
distribute-lft-in87.6%
fma-udef87.6%
Simplified87.6%
fma-udef87.6%
distribute-rgt-in87.6%
Applied egg-rr87.6%
Final simplification87.6%
(FPCore (x) :precision binary64 (fabs (* (sqrt (/ 1.0 PI)) (* x (+ 2.0 (* 0.6666666666666666 (* x x)))))))
double code(double x) {
return fabs((sqrt((1.0 / ((double) M_PI))) * (x * (2.0 + (0.6666666666666666 * (x * x))))));
}
public static double code(double x) {
return Math.abs((Math.sqrt((1.0 / Math.PI)) * (x * (2.0 + (0.6666666666666666 * (x * x))))));
}
def code(x): return math.fabs((math.sqrt((1.0 / math.pi)) * (x * (2.0 + (0.6666666666666666 * (x * x))))))
function code(x) return abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * Float64(2.0 + Float64(0.6666666666666666 * Float64(x * x)))))) end
function tmp = code(x) tmp = abs((sqrt((1.0 / pi)) * (x * (2.0 + (0.6666666666666666 * (x * x)))))); end
code[x_] := N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(2.0 + N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(2 + 0.6666666666666666 \cdot \left(x \cdot x\right)\right)\right)\right|
\end{array}
Initial program 99.5%
Simplified99.5%
Taylor expanded in x around 0 87.6%
associate-*r*87.6%
unpow287.6%
associate-*r*87.6%
distribute-rgt-out87.6%
+-commutative87.6%
*-commutative87.6%
associate-*l*87.6%
*-commutative87.6%
*-commutative87.6%
distribute-lft-in87.6%
fma-udef87.6%
Simplified87.6%
fma-udef87.6%
Applied egg-rr87.6%
Final simplification87.6%
(FPCore (x) :precision binary64 (if (<= x -1.75) (fabs (* 0.6666666666666666 (* (* x x) (/ x (sqrt PI))))) (fabs (* 2.0 (* x (pow PI -0.5))))))
double code(double x) {
double tmp;
if (x <= -1.75) {
tmp = fabs((0.6666666666666666 * ((x * x) * (x / sqrt(((double) M_PI))))));
} else {
tmp = fabs((2.0 * (x * pow(((double) M_PI), -0.5))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -1.75) {
tmp = Math.abs((0.6666666666666666 * ((x * x) * (x / Math.sqrt(Math.PI)))));
} else {
tmp = Math.abs((2.0 * (x * Math.pow(Math.PI, -0.5))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.75: tmp = math.fabs((0.6666666666666666 * ((x * x) * (x / math.sqrt(math.pi))))) else: tmp = math.fabs((2.0 * (x * math.pow(math.pi, -0.5)))) return tmp
function code(x) tmp = 0.0 if (x <= -1.75) tmp = abs(Float64(0.6666666666666666 * Float64(Float64(x * x) * Float64(x / sqrt(pi))))); else tmp = abs(Float64(2.0 * Float64(x * (pi ^ -0.5)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.75) tmp = abs((0.6666666666666666 * ((x * x) * (x / sqrt(pi))))); else tmp = abs((2.0 * (x * (pi ^ -0.5)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.75], N[Abs[N[(0.6666666666666666 * N[(N[(x * x), $MachinePrecision] * N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(2.0 * N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.75:\\
\;\;\;\;\left|0.6666666666666666 \cdot \left(\left(x \cdot x\right) \cdot \frac{x}{\sqrt{\pi}}\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|2 \cdot \left(x \cdot {\pi}^{-0.5}\right)\right|\\
\end{array}
\end{array}
if x < -1.75Initial program 98.8%
Simplified98.7%
Taylor expanded in x around 0 61.5%
associate-*r*61.5%
unpow261.5%
associate-*r*61.5%
distribute-rgt-out61.5%
+-commutative61.5%
*-commutative61.5%
associate-*l*61.5%
*-commutative61.5%
*-commutative61.5%
distribute-lft-in61.5%
fma-udef61.5%
Simplified61.5%
Taylor expanded in x around inf 61.5%
unpow261.5%
*-commutative61.5%
associate-*l*61.5%
Simplified61.5%
associate-*r*61.5%
sqrt-div61.5%
metadata-eval61.5%
associate-/r/61.5%
associate-*l/61.5%
*-un-lft-identity61.5%
Applied egg-rr61.5%
div-inv61.5%
clear-num61.5%
associate-*r*61.5%
*-commutative61.5%
associate-*l*61.5%
Applied egg-rr61.5%
if -1.75 < x Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 98.7%
*-commutative98.7%
unpow198.7%
sqr-pow46.4%
fabs-sqr46.4%
sqr-pow98.7%
unpow198.7%
*-commutative98.7%
Simplified98.7%
sqrt-div98.7%
metadata-eval98.7%
div-inv97.9%
clear-num97.9%
Applied egg-rr97.9%
associate-/r/98.7%
pow1/298.7%
pow-flip98.7%
metadata-eval98.7%
Applied egg-rr98.7%
Final simplification87.1%
(FPCore (x) :precision binary64 (if (<= x -1.5e-5) (fabs (* 2.0 (sqrt (/ (* x x) PI)))) (fabs (* 2.0 (* x (pow PI -0.5))))))
double code(double x) {
double tmp;
if (x <= -1.5e-5) {
tmp = fabs((2.0 * sqrt(((x * x) / ((double) M_PI)))));
} else {
tmp = fabs((2.0 * (x * pow(((double) M_PI), -0.5))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= -1.5e-5) {
tmp = Math.abs((2.0 * Math.sqrt(((x * x) / Math.PI))));
} else {
tmp = Math.abs((2.0 * (x * Math.pow(Math.PI, -0.5))));
}
return tmp;
}
def code(x): tmp = 0 if x <= -1.5e-5: tmp = math.fabs((2.0 * math.sqrt(((x * x) / math.pi)))) else: tmp = math.fabs((2.0 * (x * math.pow(math.pi, -0.5)))) return tmp
function code(x) tmp = 0.0 if (x <= -1.5e-5) tmp = abs(Float64(2.0 * sqrt(Float64(Float64(x * x) / pi)))); else tmp = abs(Float64(2.0 * Float64(x * (pi ^ -0.5)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= -1.5e-5) tmp = abs((2.0 * sqrt(((x * x) / pi)))); else tmp = abs((2.0 * (x * (pi ^ -0.5)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, -1.5e-5], N[Abs[N[(2.0 * N[Sqrt[N[(N[(x * x), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(2.0 * N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.5 \cdot 10^{-5}:\\
\;\;\;\;\left|2 \cdot \sqrt{\frac{x \cdot x}{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|2 \cdot \left(x \cdot {\pi}^{-0.5}\right)\right|\\
\end{array}
\end{array}
if x < -1.50000000000000004e-5Initial program 98.8%
Simplified98.8%
Taylor expanded in x around 0 6.9%
*-commutative6.9%
unpow16.9%
sqr-pow0.0%
fabs-sqr0.0%
sqr-pow6.9%
unpow16.9%
*-commutative6.9%
Simplified6.9%
sqrt-div6.9%
metadata-eval6.9%
div-inv6.9%
clear-num6.9%
Applied egg-rr6.9%
expm1-log1p-u1.5%
expm1-udef1.5%
clear-num1.5%
Applied egg-rr1.5%
expm1-def1.5%
expm1-log1p6.9%
Simplified6.9%
add-sqr-sqrt0.0%
sqrt-unprod46.5%
frac-times46.5%
add-sqr-sqrt46.5%
Applied egg-rr46.5%
if -1.50000000000000004e-5 < x Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 99.1%
*-commutative99.1%
unpow199.1%
sqr-pow46.9%
fabs-sqr46.9%
sqr-pow99.1%
unpow199.1%
*-commutative99.1%
Simplified99.1%
sqrt-div99.1%
metadata-eval99.1%
div-inv98.3%
clear-num98.4%
Applied egg-rr98.4%
associate-/r/99.1%
pow1/299.1%
pow-flip99.1%
metadata-eval99.1%
Applied egg-rr99.1%
Final simplification82.3%
(FPCore (x) :precision binary64 (fabs (* 2.0 (* x (pow PI -0.5)))))
double code(double x) {
return fabs((2.0 * (x * pow(((double) M_PI), -0.5))));
}
public static double code(double x) {
return Math.abs((2.0 * (x * Math.pow(Math.PI, -0.5))));
}
def code(x): return math.fabs((2.0 * (x * math.pow(math.pi, -0.5))))
function code(x) return abs(Float64(2.0 * Float64(x * (pi ^ -0.5)))) end
function tmp = code(x) tmp = abs((2.0 * (x * (pi ^ -0.5)))); end
code[x_] := N[Abs[N[(2.0 * N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|2 \cdot \left(x \cdot {\pi}^{-0.5}\right)\right|
\end{array}
Initial program 99.5%
Simplified99.5%
Taylor expanded in x around 0 69.6%
*-commutative69.6%
unpow169.6%
sqr-pow31.9%
fabs-sqr31.9%
sqr-pow69.6%
unpow169.6%
*-commutative69.6%
Simplified69.6%
sqrt-div69.6%
metadata-eval69.6%
div-inv69.0%
clear-num69.1%
Applied egg-rr69.1%
associate-/r/69.6%
pow1/269.6%
pow-flip69.6%
metadata-eval69.6%
Applied egg-rr69.6%
Final simplification69.6%
(FPCore (x) :precision binary64 (fabs (* 2.0 (/ x (sqrt PI)))))
double code(double x) {
return fabs((2.0 * (x / sqrt(((double) M_PI)))));
}
public static double code(double x) {
return Math.abs((2.0 * (x / Math.sqrt(Math.PI))));
}
def code(x): return math.fabs((2.0 * (x / math.sqrt(math.pi))))
function code(x) return abs(Float64(2.0 * Float64(x / sqrt(pi)))) end
function tmp = code(x) tmp = abs((2.0 * (x / sqrt(pi)))); end
code[x_] := N[Abs[N[(2.0 * N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|2 \cdot \frac{x}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.5%
Simplified99.5%
Taylor expanded in x around 0 69.6%
*-commutative69.6%
unpow169.6%
sqr-pow31.9%
fabs-sqr31.9%
sqr-pow69.6%
unpow169.6%
*-commutative69.6%
Simplified69.6%
sqrt-div69.6%
metadata-eval69.6%
div-inv69.0%
clear-num69.1%
Applied egg-rr69.1%
expm1-log1p-u67.3%
expm1-udef5.9%
clear-num5.9%
Applied egg-rr5.9%
expm1-def67.3%
expm1-log1p69.0%
Simplified69.0%
Final simplification69.0%
herbie shell --seed 2023189
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))