
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* (fabs x) (* x x))) (t_1 (* (fabs x) (* (fabs x) t_0))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* 0.6666666666666666 t_0)) (* 0.2 t_1))
(* 0.047619047619047616 (* (fabs x) (* (fabs x) t_1))))))))
double code(double x) {
double t_0 = fabs(x) * (x * x);
double t_1 = fabs(x) * (fabs(x) * t_0);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (fabs(x) * (fabs(x) * t_1))))));
}
public static double code(double x) {
double t_0 = Math.abs(x) * (x * x);
double t_1 = Math.abs(x) * (Math.abs(x) * t_0);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (Math.abs(x) * (Math.abs(x) * t_1))))));
}
def code(x): t_0 = math.fabs(x) * (x * x) t_1 = math.fabs(x) * (math.fabs(x) * t_0) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (math.fabs(x) * (math.fabs(x) * t_1))))))
function code(x) t_0 = Float64(abs(x) * Float64(x * x)) t_1 = Float64(abs(x) * Float64(abs(x) * t_0)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(0.6666666666666666 * t_0)) + Float64(0.2 * t_1)) + Float64(0.047619047619047616 * Float64(abs(x) * Float64(abs(x) * t_1)))))) end
function tmp = code(x) t_0 = abs(x) * (x * x); t_1 = abs(x) * (abs(x) * t_0); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + (0.6666666666666666 * t_0)) + (0.2 * t_1)) + (0.047619047619047616 * (abs(x) * (abs(x) * t_1)))))); end
code[x_] := Block[{t$95$0 = N[(N[Abs[x], $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(0.2 * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left|x\right| \cdot \left(x \cdot x\right)\\
t_1 := \left|x\right| \cdot \left(\left|x\right| \cdot t\_0\right)\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + 0.6666666666666666 \cdot t\_0\right) + 0.2 \cdot t\_1\right) + 0.047619047619047616 \cdot \left(\left|x\right| \cdot \left(\left|x\right| \cdot t\_1\right)\right)\right)\right|
\end{array}
\end{array}
Initial program 99.9%
Final simplification99.9%
(FPCore (x)
:precision binary64
(fabs
(*
(sqrt (/ 1.0 PI))
(+
(*
(fabs x)
(+ 2.0 (fma 0.2 (pow x 4.0) (* 0.047619047619047616 (pow x 6.0)))))
(* 0.6666666666666666 (pow x 3.0))))))
double code(double x) {
return fabs((sqrt((1.0 / ((double) M_PI))) * ((fabs(x) * (2.0 + fma(0.2, pow(x, 4.0), (0.047619047619047616 * pow(x, 6.0))))) + (0.6666666666666666 * pow(x, 3.0)))));
}
function code(x) return abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(abs(x) * Float64(2.0 + fma(0.2, (x ^ 4.0), Float64(0.047619047619047616 * (x ^ 6.0))))) + Float64(0.6666666666666666 * (x ^ 3.0))))) end
code[x_] := N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(N[Abs[x], $MachinePrecision] * N[(2.0 + N[(0.2 * N[Power[x, 4.0], $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| \cdot \left(2 + \mathsf{fma}\left(0.2, {x}^{4}, 0.047619047619047616 \cdot {x}^{6}\right)\right) + 0.6666666666666666 \cdot {x}^{3}\right)\right|
\end{array}
Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 99.9%
Simplified79.4%
Final simplification79.4%
(FPCore (x)
:precision binary64
(*
(*
x
(+
(fma 0.2 (pow x 4.0) (* 0.047619047619047616 (pow x 6.0)))
(fma 0.6666666666666666 (pow x 2.0) 2.0)))
(pow PI -0.5)))
double code(double x) {
return (x * (fma(0.2, pow(x, 4.0), (0.047619047619047616 * pow(x, 6.0))) + fma(0.6666666666666666, pow(x, 2.0), 2.0))) * pow(((double) M_PI), -0.5);
}
function code(x) return Float64(Float64(x * Float64(fma(0.2, (x ^ 4.0), Float64(0.047619047619047616 * (x ^ 6.0))) + fma(0.6666666666666666, (x ^ 2.0), 2.0))) * (pi ^ -0.5)) end
code[x_] := N[(N[(x * N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[Power[x, 2.0], $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot \left(\mathsf{fma}\left(0.2, {x}^{4}, 0.047619047619047616 \cdot {x}^{6}\right) + \mathsf{fma}\left(0.6666666666666666, {x}^{2}, 2\right)\right)\right) \cdot {\pi}^{-0.5}
\end{array}
Initial program 99.9%
Simplified99.3%
Applied egg-rr42.1%
Final simplification42.1%
(FPCore (x)
:precision binary64
(*
x
(/
(+
(fma 0.2 (pow x 4.0) (* 0.047619047619047616 (pow x 6.0)))
(fma 0.6666666666666666 (pow x 2.0) 2.0))
(sqrt PI))))
double code(double x) {
return x * ((fma(0.2, pow(x, 4.0), (0.047619047619047616 * pow(x, 6.0))) + fma(0.6666666666666666, pow(x, 2.0), 2.0)) / sqrt(((double) M_PI)));
}
function code(x) return Float64(x * Float64(Float64(fma(0.2, (x ^ 4.0), Float64(0.047619047619047616 * (x ^ 6.0))) + fma(0.6666666666666666, (x ^ 2.0), 2.0)) / sqrt(pi))) end
code[x_] := N[(x * N[(N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[Power[x, 2.0], $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{\mathsf{fma}\left(0.2, {x}^{4}, 0.047619047619047616 \cdot {x}^{6}\right) + \mathsf{fma}\left(0.6666666666666666, {x}^{2}, 2\right)}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 99.3%
associate-*r/99.3%
+-commutative99.3%
associate-+r+99.3%
+-commutative99.3%
fma-define99.3%
associate-+r+99.3%
fma-undefine99.3%
*-rgt-identity99.3%
Simplified42.1%
Final simplification42.1%
(FPCore (x)
:precision binary64
(fabs
(*
(/ x (sqrt PI))
(+
(+ (* 0.047619047619047616 (pow x 6.0)) (* 0.2 (pow x 4.0)))
(fma 0.6666666666666666 (* x x) 2.0)))))
double code(double x) {
return fabs(((x / sqrt(((double) M_PI))) * (((0.047619047619047616 * pow(x, 6.0)) + (0.2 * pow(x, 4.0))) + fma(0.6666666666666666, (x * x), 2.0))));
}
function code(x) return abs(Float64(Float64(x / sqrt(pi)) * Float64(Float64(Float64(0.047619047619047616 * (x ^ 6.0)) + Float64(0.2 * (x ^ 4.0))) + fma(0.6666666666666666, Float64(x * x), 2.0)))) end
code[x_] := N[Abs[N[(N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{x}{\sqrt{\pi}} \cdot \left(\left(0.047619047619047616 \cdot {x}^{6} + 0.2 \cdot {x}^{4}\right) + \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)\right)\right|
\end{array}
Initial program 99.9%
Simplified99.3%
*-un-lft-identity99.3%
add-sqr-sqrt40.5%
fabs-sqr40.5%
add-sqr-sqrt99.3%
Applied egg-rr99.3%
*-lft-identity99.3%
Simplified99.3%
metadata-eval99.3%
fma-undefine99.3%
metadata-eval99.3%
Applied egg-rr99.3%
Final simplification99.3%
(FPCore (x)
:precision binary64
(if (<= (fabs x) 0.1)
(*
(pow PI -0.5)
(/
x
(+
0.5
(+
(* (pow x 2.0) -0.16666666666666666)
(* (pow x 4.0) 0.005555555555555556)))))
(/ (- (pow x 7.0)) (* (sqrt PI) -21.0))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.1) {
tmp = pow(((double) M_PI), -0.5) * (x / (0.5 + ((pow(x, 2.0) * -0.16666666666666666) + (pow(x, 4.0) * 0.005555555555555556))));
} else {
tmp = -pow(x, 7.0) / (sqrt(((double) M_PI)) * -21.0);
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 0.1) {
tmp = Math.pow(Math.PI, -0.5) * (x / (0.5 + ((Math.pow(x, 2.0) * -0.16666666666666666) + (Math.pow(x, 4.0) * 0.005555555555555556))));
} else {
tmp = -Math.pow(x, 7.0) / (Math.sqrt(Math.PI) * -21.0);
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 0.1: tmp = math.pow(math.pi, -0.5) * (x / (0.5 + ((math.pow(x, 2.0) * -0.16666666666666666) + (math.pow(x, 4.0) * 0.005555555555555556)))) else: tmp = -math.pow(x, 7.0) / (math.sqrt(math.pi) * -21.0) return tmp
function code(x) tmp = 0.0 if (abs(x) <= 0.1) tmp = Float64((pi ^ -0.5) * Float64(x / Float64(0.5 + Float64(Float64((x ^ 2.0) * -0.16666666666666666) + Float64((x ^ 4.0) * 0.005555555555555556))))); else tmp = Float64(Float64(-(x ^ 7.0)) / Float64(sqrt(pi) * -21.0)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 0.1) tmp = (pi ^ -0.5) * (x / (0.5 + (((x ^ 2.0) * -0.16666666666666666) + ((x ^ 4.0) * 0.005555555555555556)))); else tmp = -(x ^ 7.0) / (sqrt(pi) * -21.0); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.1], N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x / N[(0.5 + N[(N[(N[Power[x, 2.0], $MachinePrecision] * -0.16666666666666666), $MachinePrecision] + N[(N[Power[x, 4.0], $MachinePrecision] * 0.005555555555555556), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[((-N[Power[x, 7.0], $MachinePrecision]) / N[(N[Sqrt[Pi], $MachinePrecision] * -21.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.1:\\
\;\;\;\;{\pi}^{-0.5} \cdot \frac{x}{0.5 + \left({x}^{2} \cdot -0.16666666666666666 + {x}^{4} \cdot 0.005555555555555556\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{-{x}^{7}}{\sqrt{\pi} \cdot -21}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.10000000000000001Initial program 99.9%
Simplified99.1%
Taylor expanded in x around 0 98.7%
add-sqr-sqrt55.0%
fabs-sqr55.0%
add-sqr-sqrt56.7%
*-un-lft-identity56.7%
add-sqr-sqrt57.0%
fabs-sqr57.0%
add-sqr-sqrt56.7%
div-inv56.7%
times-frac57.2%
pow1/257.2%
pow-flip57.2%
metadata-eval57.2%
Applied egg-rr57.2%
Taylor expanded in x around 0 57.2%
if 0.10000000000000001 < (fabs.f64 x) Initial program 99.9%
Simplified99.9%
Taylor expanded in x around inf 99.9%
associate-*r*99.9%
*-commutative99.9%
associate-*r/99.9%
metadata-eval99.9%
Simplified99.9%
associate-*r/99.9%
Applied egg-rr99.9%
frac-2neg99.9%
div-inv100.0%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt0.1%
add-sqr-sqrt0.1%
fabs-sqr0.1%
add-sqr-sqrt0.1%
distribute-neg-frac0.1%
distribute-rgt-neg-in0.1%
metadata-eval0.1%
Applied egg-rr0.1%
associate-*r/0.1%
*-rgt-identity0.1%
distribute-neg-frac0.1%
associate-/r/0.1%
distribute-rgt-neg-out0.1%
associate-*l/0.1%
distribute-rgt-neg-out0.1%
*-commutative0.1%
pow-plus0.1%
metadata-eval0.1%
Simplified0.1%
Final simplification41.8%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.1) (/ (* x (pow PI -0.5)) (fma (pow x 2.0) -0.16666666666666666 0.5)) (/ (- (pow x 7.0)) (* (sqrt PI) -21.0))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.1) {
tmp = (x * pow(((double) M_PI), -0.5)) / fma(pow(x, 2.0), -0.16666666666666666, 0.5);
} else {
tmp = -pow(x, 7.0) / (sqrt(((double) M_PI)) * -21.0);
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.1) tmp = Float64(Float64(x * (pi ^ -0.5)) / fma((x ^ 2.0), -0.16666666666666666, 0.5)); else tmp = Float64(Float64(-(x ^ 7.0)) / Float64(sqrt(pi) * -21.0)); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.1], N[(N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision] / N[(N[Power[x, 2.0], $MachinePrecision] * -0.16666666666666666 + 0.5), $MachinePrecision]), $MachinePrecision], N[((-N[Power[x, 7.0], $MachinePrecision]) / N[(N[Sqrt[Pi], $MachinePrecision] * -21.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.1:\\
\;\;\;\;\frac{x \cdot {\pi}^{-0.5}}{\mathsf{fma}\left({x}^{2}, -0.16666666666666666, 0.5\right)}\\
\mathbf{else}:\\
\;\;\;\;\frac{-{x}^{7}}{\sqrt{\pi} \cdot -21}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.10000000000000001Initial program 99.9%
Simplified99.1%
Taylor expanded in x around 0 98.7%
Taylor expanded in x around 0 98.4%
associate-*r*98.4%
distribute-rgt-out98.4%
*-commutative98.4%
Simplified98.4%
add-sqr-sqrt54.7%
fabs-sqr54.7%
add-sqr-sqrt56.4%
*-un-lft-identity56.4%
add-sqr-sqrt56.7%
fabs-sqr56.7%
add-sqr-sqrt56.4%
times-frac56.8%
pow1/256.8%
pow-flip56.8%
metadata-eval56.8%
fma-define56.8%
Applied egg-rr56.8%
associate-*r/56.8%
*-commutative56.8%
Simplified56.8%
if 0.10000000000000001 < (fabs.f64 x) Initial program 99.9%
Simplified99.9%
Taylor expanded in x around inf 99.9%
associate-*r*99.9%
*-commutative99.9%
associate-*r/99.9%
metadata-eval99.9%
Simplified99.9%
associate-*r/99.9%
Applied egg-rr99.9%
frac-2neg99.9%
div-inv100.0%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt0.1%
add-sqr-sqrt0.1%
fabs-sqr0.1%
add-sqr-sqrt0.1%
distribute-neg-frac0.1%
distribute-rgt-neg-in0.1%
metadata-eval0.1%
Applied egg-rr0.1%
associate-*r/0.1%
*-rgt-identity0.1%
distribute-neg-frac0.1%
associate-/r/0.1%
distribute-rgt-neg-out0.1%
associate-*l/0.1%
distribute-rgt-neg-out0.1%
*-commutative0.1%
pow-plus0.1%
metadata-eval0.1%
Simplified0.1%
Final simplification41.5%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.1) (* (pow PI -0.5) (/ x (+ 0.5 (* (pow x 2.0) -0.16666666666666666)))) (/ (- (pow x 7.0)) (* (sqrt PI) -21.0))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.1) {
tmp = pow(((double) M_PI), -0.5) * (x / (0.5 + (pow(x, 2.0) * -0.16666666666666666)));
} else {
tmp = -pow(x, 7.0) / (sqrt(((double) M_PI)) * -21.0);
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 0.1) {
tmp = Math.pow(Math.PI, -0.5) * (x / (0.5 + (Math.pow(x, 2.0) * -0.16666666666666666)));
} else {
tmp = -Math.pow(x, 7.0) / (Math.sqrt(Math.PI) * -21.0);
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 0.1: tmp = math.pow(math.pi, -0.5) * (x / (0.5 + (math.pow(x, 2.0) * -0.16666666666666666))) else: tmp = -math.pow(x, 7.0) / (math.sqrt(math.pi) * -21.0) return tmp
function code(x) tmp = 0.0 if (abs(x) <= 0.1) tmp = Float64((pi ^ -0.5) * Float64(x / Float64(0.5 + Float64((x ^ 2.0) * -0.16666666666666666)))); else tmp = Float64(Float64(-(x ^ 7.0)) / Float64(sqrt(pi) * -21.0)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 0.1) tmp = (pi ^ -0.5) * (x / (0.5 + ((x ^ 2.0) * -0.16666666666666666))); else tmp = -(x ^ 7.0) / (sqrt(pi) * -21.0); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.1], N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x / N[(0.5 + N[(N[Power[x, 2.0], $MachinePrecision] * -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[((-N[Power[x, 7.0], $MachinePrecision]) / N[(N[Sqrt[Pi], $MachinePrecision] * -21.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.1:\\
\;\;\;\;{\pi}^{-0.5} \cdot \frac{x}{0.5 + {x}^{2} \cdot -0.16666666666666666}\\
\mathbf{else}:\\
\;\;\;\;\frac{-{x}^{7}}{\sqrt{\pi} \cdot -21}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.10000000000000001Initial program 99.9%
Simplified99.1%
Taylor expanded in x around 0 98.7%
add-sqr-sqrt55.0%
fabs-sqr55.0%
add-sqr-sqrt56.7%
*-un-lft-identity56.7%
add-sqr-sqrt57.0%
fabs-sqr57.0%
add-sqr-sqrt56.7%
div-inv56.7%
times-frac57.2%
pow1/257.2%
pow-flip57.2%
metadata-eval57.2%
Applied egg-rr57.2%
Taylor expanded in x around 0 56.8%
if 0.10000000000000001 < (fabs.f64 x) Initial program 99.9%
Simplified99.9%
Taylor expanded in x around inf 99.9%
associate-*r*99.9%
*-commutative99.9%
associate-*r/99.9%
metadata-eval99.9%
Simplified99.9%
associate-*r/99.9%
Applied egg-rr99.9%
frac-2neg99.9%
div-inv100.0%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt0.1%
add-sqr-sqrt0.1%
fabs-sqr0.1%
add-sqr-sqrt0.1%
distribute-neg-frac0.1%
distribute-rgt-neg-in0.1%
metadata-eval0.1%
Applied egg-rr0.1%
associate-*r/0.1%
*-rgt-identity0.1%
distribute-neg-frac0.1%
associate-/r/0.1%
distribute-rgt-neg-out0.1%
associate-*l/0.1%
distribute-rgt-neg-out0.1%
*-commutative0.1%
pow-plus0.1%
metadata-eval0.1%
Simplified0.1%
Final simplification41.5%
(FPCore (x) :precision binary64 (if (<= x 1.85) (* x (/ 2.0 (sqrt PI))) (/ (- (pow x 7.0)) (* (sqrt PI) -21.0))))
double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 / sqrt(((double) M_PI)));
} else {
tmp = -pow(x, 7.0) / (sqrt(((double) M_PI)) * -21.0);
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 / Math.sqrt(Math.PI));
} else {
tmp = -Math.pow(x, 7.0) / (Math.sqrt(Math.PI) * -21.0);
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.85: tmp = x * (2.0 / math.sqrt(math.pi)) else: tmp = -math.pow(x, 7.0) / (math.sqrt(math.pi) * -21.0) return tmp
function code(x) tmp = 0.0 if (x <= 1.85) tmp = Float64(x * Float64(2.0 / sqrt(pi))); else tmp = Float64(Float64(-(x ^ 7.0)) / Float64(sqrt(pi) * -21.0)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.85) tmp = x * (2.0 / sqrt(pi)); else tmp = -(x ^ 7.0) / (sqrt(pi) * -21.0); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.85], N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[((-N[Power[x, 7.0], $MachinePrecision]) / N[(N[Sqrt[Pi], $MachinePrecision] * -21.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.85:\\
\;\;\;\;x \cdot \frac{2}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;\frac{-{x}^{7}}{\sqrt{\pi} \cdot -21}\\
\end{array}
\end{array}
if x < 1.8500000000000001Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 72.7%
*-commutative72.7%
Simplified72.7%
Taylor expanded in x around 0 72.7%
fabs-neg72.7%
*-commutative72.7%
fabs-div72.7%
neg-mul-172.7%
*-commutative72.7%
times-frac72.7%
metadata-eval72.7%
metadata-eval72.7%
distribute-lft-neg-in72.7%
fabs-neg72.7%
rem-square-sqrt39.4%
fabs-sqr39.4%
rem-square-sqrt40.7%
*-commutative40.7%
metadata-eval40.7%
times-frac40.7%
associate-*r/41.0%
*-commutative41.0%
associate-/r*41.0%
metadata-eval41.0%
Simplified41.0%
if 1.8500000000000001 < x Initial program 99.9%
Simplified99.3%
Taylor expanded in x around inf 31.0%
associate-*r*31.0%
*-commutative31.0%
associate-*r/31.0%
metadata-eval31.0%
Simplified31.0%
associate-*r/31.0%
Applied egg-rr31.0%
frac-2neg31.0%
div-inv31.0%
add-sqr-sqrt2.3%
fabs-sqr2.3%
add-sqr-sqrt3.9%
add-sqr-sqrt3.9%
fabs-sqr3.9%
add-sqr-sqrt3.9%
distribute-neg-frac3.9%
distribute-rgt-neg-in3.9%
metadata-eval3.9%
Applied egg-rr3.9%
associate-*r/3.9%
*-rgt-identity3.9%
distribute-neg-frac3.9%
associate-/r/3.9%
distribute-rgt-neg-out3.9%
associate-*l/3.9%
distribute-rgt-neg-out3.9%
*-commutative3.9%
pow-plus3.9%
metadata-eval3.9%
Simplified3.9%
Final simplification41.0%
(FPCore (x) :precision binary64 (if (<= x 1.85) (* x (/ 2.0 (sqrt PI))) (* 0.047619047619047616 (/ (pow x 7.0) (sqrt PI)))))
double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 / sqrt(((double) M_PI)));
} else {
tmp = 0.047619047619047616 * (pow(x, 7.0) / sqrt(((double) M_PI)));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 / Math.sqrt(Math.PI));
} else {
tmp = 0.047619047619047616 * (Math.pow(x, 7.0) / Math.sqrt(Math.PI));
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.85: tmp = x * (2.0 / math.sqrt(math.pi)) else: tmp = 0.047619047619047616 * (math.pow(x, 7.0) / math.sqrt(math.pi)) return tmp
function code(x) tmp = 0.0 if (x <= 1.85) tmp = Float64(x * Float64(2.0 / sqrt(pi))); else tmp = Float64(0.047619047619047616 * Float64((x ^ 7.0) / sqrt(pi))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.85) tmp = x * (2.0 / sqrt(pi)); else tmp = 0.047619047619047616 * ((x ^ 7.0) / sqrt(pi)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.85], N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.047619047619047616 * N[(N[Power[x, 7.0], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.85:\\
\;\;\;\;x \cdot \frac{2}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;0.047619047619047616 \cdot \frac{{x}^{7}}{\sqrt{\pi}}\\
\end{array}
\end{array}
if x < 1.8500000000000001Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 72.7%
*-commutative72.7%
Simplified72.7%
Taylor expanded in x around 0 72.7%
fabs-neg72.7%
*-commutative72.7%
fabs-div72.7%
neg-mul-172.7%
*-commutative72.7%
times-frac72.7%
metadata-eval72.7%
metadata-eval72.7%
distribute-lft-neg-in72.7%
fabs-neg72.7%
rem-square-sqrt39.4%
fabs-sqr39.4%
rem-square-sqrt40.7%
*-commutative40.7%
metadata-eval40.7%
times-frac40.7%
associate-*r/41.0%
*-commutative41.0%
associate-/r*41.0%
metadata-eval41.0%
Simplified41.0%
if 1.8500000000000001 < x Initial program 99.9%
Simplified99.3%
Taylor expanded in x around inf 31.0%
associate-*r*31.0%
*-commutative31.0%
associate-*r/31.0%
metadata-eval31.0%
Simplified31.0%
associate-*r/31.0%
Applied egg-rr31.0%
Taylor expanded in x around 0 31.0%
fabs-mul31.0%
metadata-eval31.0%
associate-/r*31.0%
associate-*l/31.0%
*-lft-identity31.0%
associate-/r*31.0%
fabs-neg31.0%
metadata-eval31.0%
fabs-mul31.0%
fabs-div31.0%
neg-mul-131.0%
times-frac31.0%
metadata-eval31.0%
metadata-eval31.0%
associate-/l*30.9%
Simplified3.9%
Final simplification41.0%
(FPCore (x) :precision binary64 (* 2.0 (/ x (sqrt PI))))
double code(double x) {
return 2.0 * (x / sqrt(((double) M_PI)));
}
public static double code(double x) {
return 2.0 * (x / Math.sqrt(Math.PI));
}
def code(x): return 2.0 * (x / math.sqrt(math.pi))
function code(x) return Float64(2.0 * Float64(x / sqrt(pi))) end
function tmp = code(x) tmp = 2.0 * (x / sqrt(pi)); end
code[x_] := N[(2.0 * N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
2 \cdot \frac{x}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 72.7%
*-commutative72.7%
Simplified72.7%
add-sqr-sqrt39.4%
fabs-sqr39.4%
add-sqr-sqrt39.5%
fabs-sqr39.5%
add-sqr-sqrt40.9%
*-un-lft-identity40.9%
add-sqr-sqrt40.7%
*-commutative40.7%
times-frac40.7%
metadata-eval40.7%
Applied egg-rr40.7%
Final simplification40.7%
(FPCore (x) :precision binary64 (* x (/ 2.0 (sqrt PI))))
double code(double x) {
return x * (2.0 / sqrt(((double) M_PI)));
}
public static double code(double x) {
return x * (2.0 / Math.sqrt(Math.PI));
}
def code(x): return x * (2.0 / math.sqrt(math.pi))
function code(x) return Float64(x * Float64(2.0 / sqrt(pi))) end
function tmp = code(x) tmp = x * (2.0 / sqrt(pi)); end
code[x_] := N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{2}{\sqrt{\pi}}
\end{array}
Initial program 99.9%
Simplified99.3%
Taylor expanded in x around 0 72.7%
*-commutative72.7%
Simplified72.7%
Taylor expanded in x around 0 72.7%
fabs-neg72.7%
*-commutative72.7%
fabs-div72.7%
neg-mul-172.7%
*-commutative72.7%
times-frac72.7%
metadata-eval72.7%
metadata-eval72.7%
distribute-lft-neg-in72.7%
fabs-neg72.7%
rem-square-sqrt39.4%
fabs-sqr39.4%
rem-square-sqrt40.7%
*-commutative40.7%
metadata-eval40.7%
times-frac40.7%
associate-*r/41.0%
*-commutative41.0%
associate-/r*41.0%
metadata-eval41.0%
Simplified41.0%
Final simplification41.0%
herbie shell --seed 2024032
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))