
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (* (fabs x_m) (* x_m x_m))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(fma 2.0 (fabs x_m) (* 0.6666666666666666 t_0))
(+
(* 0.2 (* (* x_m x_m) t_0))
(* 0.047619047619047616 (* (* x_m x_m) (* x_m (pow x_m 4.0))))))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = fabs(x_m) * (x_m * x_m);
return fabs(((1.0 / sqrt(((double) M_PI))) * (fma(2.0, fabs(x_m), (0.6666666666666666 * t_0)) + ((0.2 * ((x_m * x_m) * t_0)) + (0.047619047619047616 * ((x_m * x_m) * (x_m * pow(x_m, 4.0))))))));
}
x_m = abs(x) function code(x_m) t_0 = Float64(abs(x_m) * Float64(x_m * x_m)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(fma(2.0, abs(x_m), Float64(0.6666666666666666 * t_0)) + Float64(Float64(0.2 * Float64(Float64(x_m * x_m) * t_0)) + Float64(0.047619047619047616 * Float64(Float64(x_m * x_m) * Float64(x_m * (x_m ^ 4.0)))))))) end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[Abs[x$95$m], $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(2.0 * N[Abs[x$95$m], $MachinePrecision] + N[(0.6666666666666666 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(0.2 * N[(N[(x$95$m * x$95$m), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \left|x_m\right| \cdot \left(x_m \cdot x_m\right)\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\mathsf{fma}\left(2, \left|x_m\right|, 0.6666666666666666 \cdot t_0\right) + \left(0.2 \cdot \left(\left(x_m \cdot x_m\right) \cdot t_0\right) + 0.047619047619047616 \cdot \left(\left(x_m \cdot x_m\right) \cdot \left(x_m \cdot {x_m}^{4}\right)\right)\right)\right)\right|
\end{array}
\end{array}
Initial program 99.9%
Simplified99.9%
Taylor expanded in x around 0 99.9%
*-commutative99.9%
rem-square-sqrt37.5%
fabs-sqr37.5%
rem-square-sqrt74.9%
Simplified74.9%
Final simplification74.9%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (sqrt (/ 1.0 PI))))
(+
(* t_0 (+ (* 0.6666666666666666 (pow x_m 3.0)) (* 2.0 x_m)))
(* t_0 (+ (* 0.2 (pow x_m 5.0)) (* 0.047619047619047616 (pow x_m 7.0)))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = sqrt((1.0 / ((double) M_PI)));
return (t_0 * ((0.6666666666666666 * pow(x_m, 3.0)) + (2.0 * x_m))) + (t_0 * ((0.2 * pow(x_m, 5.0)) + (0.047619047619047616 * pow(x_m, 7.0))));
}
x_m = Math.abs(x);
public static double code(double x_m) {
double t_0 = Math.sqrt((1.0 / Math.PI));
return (t_0 * ((0.6666666666666666 * Math.pow(x_m, 3.0)) + (2.0 * x_m))) + (t_0 * ((0.2 * Math.pow(x_m, 5.0)) + (0.047619047619047616 * Math.pow(x_m, 7.0))));
}
x_m = math.fabs(x) def code(x_m): t_0 = math.sqrt((1.0 / math.pi)) return (t_0 * ((0.6666666666666666 * math.pow(x_m, 3.0)) + (2.0 * x_m))) + (t_0 * ((0.2 * math.pow(x_m, 5.0)) + (0.047619047619047616 * math.pow(x_m, 7.0))))
x_m = abs(x) function code(x_m) t_0 = sqrt(Float64(1.0 / pi)) return Float64(Float64(t_0 * Float64(Float64(0.6666666666666666 * (x_m ^ 3.0)) + Float64(2.0 * x_m))) + Float64(t_0 * Float64(Float64(0.2 * (x_m ^ 5.0)) + Float64(0.047619047619047616 * (x_m ^ 7.0))))) end
x_m = abs(x); function tmp = code(x_m) t_0 = sqrt((1.0 / pi)); tmp = (t_0 * ((0.6666666666666666 * (x_m ^ 3.0)) + (2.0 * x_m))) + (t_0 * ((0.2 * (x_m ^ 5.0)) + (0.047619047619047616 * (x_m ^ 7.0)))); end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]}, N[(N[(t$95$0 * N[(N[(0.6666666666666666 * N[Power[x$95$m, 3.0], $MachinePrecision]), $MachinePrecision] + N[(2.0 * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(t$95$0 * N[(N[(0.2 * N[Power[x$95$m, 5.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x$95$m, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \sqrt{\frac{1}{\pi}}\\
t_0 \cdot \left(0.6666666666666666 \cdot {x_m}^{3} + 2 \cdot x_m\right) + t_0 \cdot \left(0.2 \cdot {x_m}^{5} + 0.047619047619047616 \cdot {x_m}^{7}\right)
\end{array}
\end{array}
Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.6%
+-commutative38.6%
+-commutative38.6%
associate-+l+38.6%
Simplified38.6%
Final simplification38.6%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(*
x_m
(/
(+
(+ (* 0.047619047619047616 (pow x_m 6.0)) (* 0.2 (pow x_m 4.0)))
(+ 2.0 (* 0.6666666666666666 (pow x_m 2.0))))
(sqrt PI))))x_m = fabs(x);
double code(double x_m) {
return x_m * ((((0.047619047619047616 * pow(x_m, 6.0)) + (0.2 * pow(x_m, 4.0))) + (2.0 + (0.6666666666666666 * pow(x_m, 2.0)))) / sqrt(((double) M_PI)));
}
x_m = Math.abs(x);
public static double code(double x_m) {
return x_m * ((((0.047619047619047616 * Math.pow(x_m, 6.0)) + (0.2 * Math.pow(x_m, 4.0))) + (2.0 + (0.6666666666666666 * Math.pow(x_m, 2.0)))) / Math.sqrt(Math.PI));
}
x_m = math.fabs(x) def code(x_m): return x_m * ((((0.047619047619047616 * math.pow(x_m, 6.0)) + (0.2 * math.pow(x_m, 4.0))) + (2.0 + (0.6666666666666666 * math.pow(x_m, 2.0)))) / math.sqrt(math.pi))
x_m = abs(x) function code(x_m) return Float64(x_m * Float64(Float64(Float64(Float64(0.047619047619047616 * (x_m ^ 6.0)) + Float64(0.2 * (x_m ^ 4.0))) + Float64(2.0 + Float64(0.6666666666666666 * (x_m ^ 2.0)))) / sqrt(pi))) end
x_m = abs(x); function tmp = code(x_m) tmp = x_m * ((((0.047619047619047616 * (x_m ^ 6.0)) + (0.2 * (x_m ^ 4.0))) + (2.0 + (0.6666666666666666 * (x_m ^ 2.0)))) / sqrt(pi)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(x$95$m * N[(N[(N[(N[(0.047619047619047616 * N[Power[x$95$m, 6.0], $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(2.0 + N[(0.6666666666666666 * N[Power[x$95$m, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
x_m \cdot \frac{\left(0.047619047619047616 \cdot {x_m}^{6} + 0.2 \cdot {x_m}^{4}\right) + \left(2 + 0.6666666666666666 \cdot {x_m}^{2}\right)}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
fma-udef38.6%
Applied egg-rr38.6%
fma-udef38.6%
Applied egg-rr38.6%
Final simplification38.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* x_m (/ (+ 2.0 (+ (* 0.047619047619047616 (pow x_m 6.0)) (* 0.2 (pow x_m 4.0)))) (sqrt PI))))
x_m = fabs(x);
double code(double x_m) {
return x_m * ((2.0 + ((0.047619047619047616 * pow(x_m, 6.0)) + (0.2 * pow(x_m, 4.0)))) / sqrt(((double) M_PI)));
}
x_m = Math.abs(x);
public static double code(double x_m) {
return x_m * ((2.0 + ((0.047619047619047616 * Math.pow(x_m, 6.0)) + (0.2 * Math.pow(x_m, 4.0)))) / Math.sqrt(Math.PI));
}
x_m = math.fabs(x) def code(x_m): return x_m * ((2.0 + ((0.047619047619047616 * math.pow(x_m, 6.0)) + (0.2 * math.pow(x_m, 4.0)))) / math.sqrt(math.pi))
x_m = abs(x) function code(x_m) return Float64(x_m * Float64(Float64(2.0 + Float64(Float64(0.047619047619047616 * (x_m ^ 6.0)) + Float64(0.2 * (x_m ^ 4.0)))) / sqrt(pi))) end
x_m = abs(x); function tmp = code(x_m) tmp = x_m * ((2.0 + ((0.047619047619047616 * (x_m ^ 6.0)) + (0.2 * (x_m ^ 4.0)))) / sqrt(pi)); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(x$95$m * N[(N[(2.0 + N[(N[(0.047619047619047616 * N[Power[x$95$m, 6.0], $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
x_m \cdot \frac{2 + \left(0.047619047619047616 \cdot {x_m}^{6} + 0.2 \cdot {x_m}^{4}\right)}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
fma-udef38.6%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.6%
Final simplification38.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= (fabs x_m) 0.02) (* (sqrt (/ 1.0 PI)) (+ (* 0.6666666666666666 (pow x_m 3.0)) (* 2.0 x_m))) (* 0.047619047619047616 (* (pow x_m 7.0) (pow PI -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (fabs(x_m) <= 0.02) {
tmp = sqrt((1.0 / ((double) M_PI))) * ((0.6666666666666666 * pow(x_m, 3.0)) + (2.0 * x_m));
} else {
tmp = 0.047619047619047616 * (pow(x_m, 7.0) * pow(((double) M_PI), -0.5));
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (Math.abs(x_m) <= 0.02) {
tmp = Math.sqrt((1.0 / Math.PI)) * ((0.6666666666666666 * Math.pow(x_m, 3.0)) + (2.0 * x_m));
} else {
tmp = 0.047619047619047616 * (Math.pow(x_m, 7.0) * Math.pow(Math.PI, -0.5));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if math.fabs(x_m) <= 0.02: tmp = math.sqrt((1.0 / math.pi)) * ((0.6666666666666666 * math.pow(x_m, 3.0)) + (2.0 * x_m)) else: tmp = 0.047619047619047616 * (math.pow(x_m, 7.0) * math.pow(math.pi, -0.5)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (abs(x_m) <= 0.02) tmp = Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(0.6666666666666666 * (x_m ^ 3.0)) + Float64(2.0 * x_m))); else tmp = Float64(0.047619047619047616 * Float64((x_m ^ 7.0) * (pi ^ -0.5))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (abs(x_m) <= 0.02) tmp = sqrt((1.0 / pi)) * ((0.6666666666666666 * (x_m ^ 3.0)) + (2.0 * x_m)); else tmp = 0.047619047619047616 * ((x_m ^ 7.0) * (pi ^ -0.5)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[N[Abs[x$95$m], $MachinePrecision], 0.02], N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(0.6666666666666666 * N[Power[x$95$m, 3.0], $MachinePrecision]), $MachinePrecision] + N[(2.0 * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.047619047619047616 * N[(N[Power[x$95$m, 7.0], $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;\left|x_m\right| \leq 0.02:\\
\;\;\;\;\sqrt{\frac{1}{\pi}} \cdot \left(0.6666666666666666 \cdot {x_m}^{3} + 2 \cdot x_m\right)\\
\mathbf{else}:\\
\;\;\;\;0.047619047619047616 \cdot \left({x_m}^{7} \cdot {\pi}^{-0.5}\right)\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0200000000000000004Initial program 99.9%
Simplified99.1%
div-inv99.8%
*-commutative99.8%
Applied egg-rr55.8%
Taylor expanded in x around 0 55.8%
+-commutative55.8%
associate-*r*55.8%
associate-*r*55.8%
distribute-rgt-out55.8%
Simplified55.8%
if 0.0200000000000000004 < (fabs.f64 x) Initial program 99.9%
Simplified99.9%
div-inv99.9%
*-commutative99.9%
Applied egg-rr0.1%
Taylor expanded in x around inf 0.1%
+-commutative0.1%
associate-*r*0.1%
associate-*r*0.1%
distribute-rgt-out0.1%
Simplified0.1%
Taylor expanded in x around inf 0.1%
associate-*r*0.1%
Simplified0.1%
expm1-log1p-u0.0%
expm1-udef0.0%
inv-pow0.0%
sqrt-pow10.0%
metadata-eval0.0%
Applied egg-rr0.0%
expm1-def0.0%
expm1-log1p0.1%
associate-*l*0.1%
Simplified0.1%
Final simplification38.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* (/ x_m (sqrt PI)) (fma 0.047619047619047616 (pow x_m 6.0) 2.0)))
x_m = fabs(x);
double code(double x_m) {
return (x_m / sqrt(((double) M_PI))) * fma(0.047619047619047616, pow(x_m, 6.0), 2.0);
}
x_m = abs(x) function code(x_m) return Float64(Float64(x_m / sqrt(pi)) * fma(0.047619047619047616, (x_m ^ 6.0), 2.0)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(x$95$m / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(0.047619047619047616 * N[Power[x$95$m, 6.0], $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{x_m}{\sqrt{\pi}} \cdot \mathsf{fma}\left(0.047619047619047616, {x_m}^{6}, 2\right)
\end{array}
Initial program 99.9%
Simplified99.4%
Taylor expanded in x around inf 99.0%
Taylor expanded in x around 0 98.8%
Taylor expanded in x around 0 98.8%
fabs-neg98.8%
associate-*r/98.8%
+-commutative98.8%
fma-udef98.8%
fabs-div98.8%
*-rgt-identity98.8%
fabs-div98.8%
fabs-div98.8%
associate-/r/98.8%
associate-*l/98.8%
associate-*r/99.3%
distribute-lft-neg-in99.3%
fabs-neg99.3%
rem-square-sqrt37.2%
Simplified38.3%
Final simplification38.3%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 2.3) (* x_m (/ (+ 2.0 (* 0.2 (pow x_m 4.0))) (sqrt PI))) (* 0.047619047619047616 (* (pow x_m 7.0) (pow PI -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 2.3) {
tmp = x_m * ((2.0 + (0.2 * pow(x_m, 4.0))) / sqrt(((double) M_PI)));
} else {
tmp = 0.047619047619047616 * (pow(x_m, 7.0) * pow(((double) M_PI), -0.5));
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 2.3) {
tmp = x_m * ((2.0 + (0.2 * Math.pow(x_m, 4.0))) / Math.sqrt(Math.PI));
} else {
tmp = 0.047619047619047616 * (Math.pow(x_m, 7.0) * Math.pow(Math.PI, -0.5));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 2.3: tmp = x_m * ((2.0 + (0.2 * math.pow(x_m, 4.0))) / math.sqrt(math.pi)) else: tmp = 0.047619047619047616 * (math.pow(x_m, 7.0) * math.pow(math.pi, -0.5)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 2.3) tmp = Float64(x_m * Float64(Float64(2.0 + Float64(0.2 * (x_m ^ 4.0))) / sqrt(pi))); else tmp = Float64(0.047619047619047616 * Float64((x_m ^ 7.0) * (pi ^ -0.5))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 2.3) tmp = x_m * ((2.0 + (0.2 * (x_m ^ 4.0))) / sqrt(pi)); else tmp = 0.047619047619047616 * ((x_m ^ 7.0) * (pi ^ -0.5)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 2.3], N[(x$95$m * N[(N[(2.0 + N[(0.2 * N[Power[x$95$m, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.047619047619047616 * N[(N[Power[x$95$m, 7.0], $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x_m \leq 2.3:\\
\;\;\;\;x_m \cdot \frac{2 + 0.2 \cdot {x_m}^{4}}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;0.047619047619047616 \cdot \left({x_m}^{7} \cdot {\pi}^{-0.5}\right)\\
\end{array}
\end{array}
if x < 2.2999999999999998Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.6%
*-commutative38.6%
Simplified38.6%
Taylor expanded in x around 0 38.6%
if 2.2999999999999998 < x Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around inf 3.8%
+-commutative3.8%
associate-*r*3.8%
associate-*r*3.8%
distribute-rgt-out3.8%
Simplified3.8%
Taylor expanded in x around inf 3.8%
associate-*r*3.8%
Simplified3.8%
expm1-log1p-u3.7%
expm1-udef3.7%
inv-pow3.7%
sqrt-pow13.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.7%
expm1-log1p3.8%
associate-*l*3.8%
Simplified3.8%
Final simplification38.6%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.9) (* (pow PI -0.5) (* 2.0 x_m)) (* 0.047619047619047616 (* (pow x_m 7.0) (pow PI -0.5)))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.9) {
tmp = pow(((double) M_PI), -0.5) * (2.0 * x_m);
} else {
tmp = 0.047619047619047616 * (pow(x_m, 7.0) * pow(((double) M_PI), -0.5));
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.9) {
tmp = Math.pow(Math.PI, -0.5) * (2.0 * x_m);
} else {
tmp = 0.047619047619047616 * (Math.pow(x_m, 7.0) * Math.pow(Math.PI, -0.5));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.9: tmp = math.pow(math.pi, -0.5) * (2.0 * x_m) else: tmp = 0.047619047619047616 * (math.pow(x_m, 7.0) * math.pow(math.pi, -0.5)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.9) tmp = Float64((pi ^ -0.5) * Float64(2.0 * x_m)); else tmp = Float64(0.047619047619047616 * Float64((x_m ^ 7.0) * (pi ^ -0.5))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.9) tmp = (pi ^ -0.5) * (2.0 * x_m); else tmp = 0.047619047619047616 * ((x_m ^ 7.0) * (pi ^ -0.5)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.9], N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(2.0 * x$95$m), $MachinePrecision]), $MachinePrecision], N[(0.047619047619047616 * N[(N[Power[x$95$m, 7.0], $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x_m \leq 1.9:\\
\;\;\;\;{\pi}^{-0.5} \cdot \left(2 \cdot x_m\right)\\
\mathbf{else}:\\
\;\;\;\;0.047619047619047616 \cdot \left({x_m}^{7} \cdot {\pi}^{-0.5}\right)\\
\end{array}
\end{array}
if x < 1.8999999999999999Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.7%
associate-*r*38.7%
Simplified38.7%
sqrt-div38.7%
metadata-eval38.7%
un-div-inv38.4%
*-commutative38.4%
Applied egg-rr38.4%
div-inv38.7%
pow1/238.7%
pow-flip38.7%
metadata-eval38.7%
Applied egg-rr38.7%
if 1.8999999999999999 < x Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around inf 3.8%
+-commutative3.8%
associate-*r*3.8%
associate-*r*3.8%
distribute-rgt-out3.8%
Simplified3.8%
Taylor expanded in x around inf 3.8%
associate-*r*3.8%
Simplified3.8%
expm1-log1p-u3.7%
expm1-udef3.7%
inv-pow3.7%
sqrt-pow13.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.7%
expm1-log1p3.8%
associate-*l*3.8%
Simplified3.8%
Final simplification38.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1e-41) (* (pow PI -0.5) (* 2.0 x_m)) (sqrt (/ (pow (* 2.0 x_m) 2.0) PI))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1e-41) {
tmp = pow(((double) M_PI), -0.5) * (2.0 * x_m);
} else {
tmp = sqrt((pow((2.0 * x_m), 2.0) / ((double) M_PI)));
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1e-41) {
tmp = Math.pow(Math.PI, -0.5) * (2.0 * x_m);
} else {
tmp = Math.sqrt((Math.pow((2.0 * x_m), 2.0) / Math.PI));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1e-41: tmp = math.pow(math.pi, -0.5) * (2.0 * x_m) else: tmp = math.sqrt((math.pow((2.0 * x_m), 2.0) / math.pi)) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1e-41) tmp = Float64((pi ^ -0.5) * Float64(2.0 * x_m)); else tmp = sqrt(Float64((Float64(2.0 * x_m) ^ 2.0) / pi)); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1e-41) tmp = (pi ^ -0.5) * (2.0 * x_m); else tmp = sqrt((((2.0 * x_m) ^ 2.0) / pi)); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1e-41], N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(2.0 * x$95$m), $MachinePrecision]), $MachinePrecision], N[Sqrt[N[(N[Power[N[(2.0 * x$95$m), $MachinePrecision], 2.0], $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x_m \leq 10^{-41}:\\
\;\;\;\;{\pi}^{-0.5} \cdot \left(2 \cdot x_m\right)\\
\mathbf{else}:\\
\;\;\;\;\sqrt{\frac{{\left(2 \cdot x_m\right)}^{2}}{\pi}}\\
\end{array}
\end{array}
if x < 1.00000000000000001e-41Initial program 99.9%
Simplified99.4%
div-inv99.8%
*-commutative99.8%
Applied egg-rr35.8%
Taylor expanded in x around 0 35.9%
associate-*r*35.9%
Simplified35.9%
sqrt-div35.9%
metadata-eval35.9%
un-div-inv35.7%
*-commutative35.7%
Applied egg-rr35.7%
div-inv35.9%
pow1/235.9%
pow-flip35.9%
metadata-eval35.9%
Applied egg-rr35.9%
if 1.00000000000000001e-41 < x Initial program 100.0%
Simplified99.3%
div-inv100.0%
*-commutative100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0 100.0%
associate-*r*100.0%
Simplified100.0%
sqrt-div100.0%
metadata-eval100.0%
un-div-inv99.3%
*-commutative99.3%
Applied egg-rr99.3%
add-sqr-sqrt98.9%
sqrt-unprod99.3%
frac-times99.6%
pow299.6%
add-sqr-sqrt100.0%
Applied egg-rr100.0%
Final simplification38.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (* (pow PI -0.5) (* 2.0 x_m)))
x_m = fabs(x);
double code(double x_m) {
return pow(((double) M_PI), -0.5) * (2.0 * x_m);
}
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.pow(Math.PI, -0.5) * (2.0 * x_m);
}
x_m = math.fabs(x) def code(x_m): return math.pow(math.pi, -0.5) * (2.0 * x_m)
x_m = abs(x) function code(x_m) return Float64((pi ^ -0.5) * Float64(2.0 * x_m)) end
x_m = abs(x); function tmp = code(x_m) tmp = (pi ^ -0.5) * (2.0 * x_m); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(2.0 * x$95$m), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
{\pi}^{-0.5} \cdot \left(2 \cdot x_m\right)
\end{array}
Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.7%
associate-*r*38.7%
Simplified38.7%
sqrt-div38.7%
metadata-eval38.7%
un-div-inv38.4%
*-commutative38.4%
Applied egg-rr38.4%
div-inv38.7%
pow1/238.7%
pow-flip38.7%
metadata-eval38.7%
Applied egg-rr38.7%
Final simplification38.7%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (* 2.0 x_m) (sqrt PI)))
x_m = fabs(x);
double code(double x_m) {
return (2.0 * x_m) / sqrt(((double) M_PI));
}
x_m = Math.abs(x);
public static double code(double x_m) {
return (2.0 * x_m) / Math.sqrt(Math.PI);
}
x_m = math.fabs(x) def code(x_m): return (2.0 * x_m) / math.sqrt(math.pi)
x_m = abs(x) function code(x_m) return Float64(Float64(2.0 * x_m) / sqrt(pi)) end
x_m = abs(x); function tmp = code(x_m) tmp = (2.0 * x_m) / sqrt(pi); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[(2.0 * x$95$m), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{2 \cdot x_m}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.4%
div-inv99.9%
*-commutative99.9%
Applied egg-rr38.6%
Taylor expanded in x around 0 38.7%
associate-*r*38.7%
Simplified38.7%
sqrt-div38.7%
metadata-eval38.7%
un-div-inv38.4%
*-commutative38.4%
Applied egg-rr38.4%
Final simplification38.4%
herbie shell --seed 2023332
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))