
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t_0\right) + \frac{1}{5} \cdot t_1\right) + \frac{1}{21} \cdot \left(\left(t_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(fabs
(*
x
(/
1.0
(/
(sqrt PI)
(+
(fma (* x 0.6666666666666666) x 2.0)
(fma 0.2 (pow x 4.0) (* 0.047619047619047616 (pow x 6.0)))))))))
double code(double x) {
return fabs((x * (1.0 / (sqrt(((double) M_PI)) / (fma((x * 0.6666666666666666), x, 2.0) + fma(0.2, pow(x, 4.0), (0.047619047619047616 * pow(x, 6.0))))))));
}
function code(x) return abs(Float64(x * Float64(1.0 / Float64(sqrt(pi) / Float64(fma(Float64(x * 0.6666666666666666), x, 2.0) + fma(0.2, (x ^ 4.0), Float64(0.047619047619047616 * (x ^ 6.0)))))))) end
code[x_] := N[Abs[N[(x * N[(1.0 / N[(N[Sqrt[Pi], $MachinePrecision] / N[(N[(N[(x * 0.6666666666666666), $MachinePrecision] * x + 2.0), $MachinePrecision] + N[(0.2 * N[Power[x, 4.0], $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|x \cdot \frac{1}{\frac{\sqrt{\pi}}{\mathsf{fma}\left(x \cdot 0.6666666666666666, x, 2\right) + \mathsf{fma}\left(0.2, {x}^{4}, 0.047619047619047616 \cdot {x}^{6}\right)}}\right|
\end{array}
Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x)
:precision binary64
(fabs
(*
(*
x
(+
(+ 2.0 (* 0.6666666666666666 (* x x)))
(+ (* 0.047619047619047616 (pow x 6.0)) (* 0.2 (pow x 4.0)))))
(pow PI -0.5))))
double code(double x) {
return fabs(((x * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.047619047619047616 * pow(x, 6.0)) + (0.2 * pow(x, 4.0))))) * pow(((double) M_PI), -0.5)));
}
public static double code(double x) {
return Math.abs(((x * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.047619047619047616 * Math.pow(x, 6.0)) + (0.2 * Math.pow(x, 4.0))))) * Math.pow(Math.PI, -0.5)));
}
def code(x): return math.fabs(((x * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.047619047619047616 * math.pow(x, 6.0)) + (0.2 * math.pow(x, 4.0))))) * math.pow(math.pi, -0.5)))
function code(x) return abs(Float64(Float64(x * Float64(Float64(2.0 + Float64(0.6666666666666666 * Float64(x * x))) + Float64(Float64(0.047619047619047616 * (x ^ 6.0)) + Float64(0.2 * (x ^ 4.0))))) * (pi ^ -0.5))) end
function tmp = code(x) tmp = abs(((x * ((2.0 + (0.6666666666666666 * (x * x))) + ((0.047619047619047616 * (x ^ 6.0)) + (0.2 * (x ^ 4.0))))) * (pi ^ -0.5))); end
code[x_] := N[Abs[N[(N[(x * N[(N[(2.0 + N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\left(x \cdot \left(\left(2 + 0.6666666666666666 \cdot \left(x \cdot x\right)\right) + \left(0.047619047619047616 \cdot {x}^{6} + 0.2 \cdot {x}^{4}\right)\right)\right) \cdot {\pi}^{-0.5}\right|
\end{array}
Initial program 99.5%
Simplified99.4%
div-inv99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 99.8%
fma-udef92.3%
*-commutative92.3%
associate-*r*92.3%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x)
:precision binary64
(fabs
(*
(pow PI -0.5)
(*
x
(+
(fma (* x 0.6666666666666666) x 2.0)
(* 0.047619047619047616 (pow x 6.0)))))))
double code(double x) {
return fabs((pow(((double) M_PI), -0.5) * (x * (fma((x * 0.6666666666666666), x, 2.0) + (0.047619047619047616 * pow(x, 6.0))))));
}
function code(x) return abs(Float64((pi ^ -0.5) * Float64(x * Float64(fma(Float64(x * 0.6666666666666666), x, 2.0) + Float64(0.047619047619047616 * (x ^ 6.0)))))) end
code[x_] := N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * N[(N[(N[(x * 0.6666666666666666), $MachinePrecision] * x + 2.0), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|{\pi}^{-0.5} \cdot \left(x \cdot \left(\mathsf{fma}\left(x \cdot 0.6666666666666666, x, 2\right) + 0.047619047619047616 \cdot {x}^{6}\right)\right)\right|
\end{array}
Initial program 99.5%
Simplified99.4%
div-inv99.8%
Applied egg-rr99.8%
Taylor expanded in x around inf 98.9%
Final simplification98.9%
(FPCore (x)
:precision binary64
(if (<= x 2.7)
(fabs
(*
(pow PI -0.5)
(* x (+ (+ 2.0 (* 0.6666666666666666 (* x x))) (* 0.2 (pow x 4.0))))))
(fabs (* (pow x 7.0) (/ 0.047619047619047616 (sqrt PI))))))
double code(double x) {
double tmp;
if (x <= 2.7) {
tmp = fabs((pow(((double) M_PI), -0.5) * (x * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * pow(x, 4.0))))));
} else {
tmp = fabs((pow(x, 7.0) * (0.047619047619047616 / sqrt(((double) M_PI)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 2.7) {
tmp = Math.abs((Math.pow(Math.PI, -0.5) * (x * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * Math.pow(x, 4.0))))));
} else {
tmp = Math.abs((Math.pow(x, 7.0) * (0.047619047619047616 / Math.sqrt(Math.PI))));
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.7: tmp = math.fabs((math.pow(math.pi, -0.5) * (x * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * math.pow(x, 4.0)))))) else: tmp = math.fabs((math.pow(x, 7.0) * (0.047619047619047616 / math.sqrt(math.pi)))) return tmp
function code(x) tmp = 0.0 if (x <= 2.7) tmp = abs(Float64((pi ^ -0.5) * Float64(x * Float64(Float64(2.0 + Float64(0.6666666666666666 * Float64(x * x))) + Float64(0.2 * (x ^ 4.0)))))); else tmp = abs(Float64((x ^ 7.0) * Float64(0.047619047619047616 / sqrt(pi)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.7) tmp = abs(((pi ^ -0.5) * (x * ((2.0 + (0.6666666666666666 * (x * x))) + (0.2 * (x ^ 4.0)))))); else tmp = abs(((x ^ 7.0) * (0.047619047619047616 / sqrt(pi)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.7], N[Abs[N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * N[(N[(2.0 + N[(0.6666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 7.0], $MachinePrecision] * N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.7:\\
\;\;\;\;\left|{\pi}^{-0.5} \cdot \left(x \cdot \left(\left(2 + 0.6666666666666666 \cdot \left(x \cdot x\right)\right) + 0.2 \cdot {x}^{4}\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{7} \cdot \frac{0.047619047619047616}{\sqrt{\pi}}\right|\\
\end{array}
\end{array}
if x < 2.7000000000000002Initial program 99.5%
Simplified99.4%
div-inv99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0 92.3%
fma-udef92.3%
*-commutative92.3%
associate-*r*92.3%
Applied egg-rr92.3%
if 2.7000000000000002 < x Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around inf 34.8%
expm1-log1p-u3.9%
expm1-udef3.7%
sqrt-div3.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.9%
expm1-log1p34.8%
associate-*l/34.8%
*-lft-identity34.8%
*-commutative34.8%
associate-*r/34.8%
Simplified34.8%
Final simplification92.3%
(FPCore (x)
:precision binary64
(if (<= x 2.2)
(fabs
(* (sqrt (/ 1.0 PI)) (+ (* x 2.0) (* 0.6666666666666666 (pow x 3.0)))))
(fabs (* (pow x 7.0) (/ 0.047619047619047616 (sqrt PI))))))
double code(double x) {
double tmp;
if (x <= 2.2) {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * ((x * 2.0) + (0.6666666666666666 * pow(x, 3.0)))));
} else {
tmp = fabs((pow(x, 7.0) * (0.047619047619047616 / sqrt(((double) M_PI)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 2.2) {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * ((x * 2.0) + (0.6666666666666666 * Math.pow(x, 3.0)))));
} else {
tmp = Math.abs((Math.pow(x, 7.0) * (0.047619047619047616 / Math.sqrt(Math.PI))));
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.2: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * ((x * 2.0) + (0.6666666666666666 * math.pow(x, 3.0))))) else: tmp = math.fabs((math.pow(x, 7.0) * (0.047619047619047616 / math.sqrt(math.pi)))) return tmp
function code(x) tmp = 0.0 if (x <= 2.2) tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(x * 2.0) + Float64(0.6666666666666666 * (x ^ 3.0))))); else tmp = abs(Float64((x ^ 7.0) * Float64(0.047619047619047616 / sqrt(pi)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.2) tmp = abs((sqrt((1.0 / pi)) * ((x * 2.0) + (0.6666666666666666 * (x ^ 3.0))))); else tmp = abs(((x ^ 7.0) * (0.047619047619047616 / sqrt(pi)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.2], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * 2.0), $MachinePrecision] + N[(0.6666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 7.0], $MachinePrecision] * N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.2:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot 2 + 0.6666666666666666 \cdot {x}^{3}\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{7} \cdot \frac{0.047619047619047616}{\sqrt{\pi}}\right|\\
\end{array}
\end{array}
if x < 2.2000000000000002Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Taylor expanded in x around 0 88.3%
+-commutative88.3%
associate-*r*88.3%
associate-*r*88.3%
distribute-rgt-out88.3%
Simplified88.3%
if 2.2000000000000002 < x Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around inf 34.8%
expm1-log1p-u3.9%
expm1-udef3.7%
sqrt-div3.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.9%
expm1-log1p34.8%
associate-*l/34.8%
*-lft-identity34.8%
*-commutative34.8%
associate-*r/34.8%
Simplified34.8%
Final simplification88.3%
(FPCore (x) :precision binary64 (if (<= x 2.2) (fabs (* x (* (pow PI -0.5) (fma x (* x 0.6666666666666666) 2.0)))) (fabs (* (pow x 7.0) (/ 0.047619047619047616 (sqrt PI))))))
double code(double x) {
double tmp;
if (x <= 2.2) {
tmp = fabs((x * (pow(((double) M_PI), -0.5) * fma(x, (x * 0.6666666666666666), 2.0))));
} else {
tmp = fabs((pow(x, 7.0) * (0.047619047619047616 / sqrt(((double) M_PI)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 2.2) tmp = abs(Float64(x * Float64((pi ^ -0.5) * fma(x, Float64(x * 0.6666666666666666), 2.0)))); else tmp = abs(Float64((x ^ 7.0) * Float64(0.047619047619047616 / sqrt(pi)))); end return tmp end
code[x_] := If[LessEqual[x, 2.2], N[Abs[N[(x * N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 7.0], $MachinePrecision] * N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.2:\\
\;\;\;\;\left|x \cdot \left({\pi}^{-0.5} \cdot \mathsf{fma}\left(x, x \cdot 0.6666666666666666, 2\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{7} \cdot \frac{0.047619047619047616}{\sqrt{\pi}}\right|\\
\end{array}
\end{array}
if x < 2.2000000000000002Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Taylor expanded in x around 0 88.3%
+-commutative88.3%
associate-*r*88.3%
associate-*r*88.3%
distribute-rgt-out88.3%
unpow388.3%
unpow288.3%
associate-*r*88.3%
unpow288.3%
associate-*l*88.3%
*-commutative88.3%
distribute-rgt-out88.3%
+-commutative88.3%
*-commutative88.3%
fma-def88.3%
Simplified88.3%
expm1-log1p-u67.5%
expm1-udef5.9%
pow1/25.9%
inv-pow5.9%
pow-pow5.9%
metadata-eval5.9%
Applied egg-rr5.9%
expm1-def67.5%
expm1-log1p88.3%
*-commutative88.3%
associate-*l*88.3%
Simplified88.3%
if 2.2000000000000002 < x Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around inf 34.8%
expm1-log1p-u3.9%
expm1-udef3.7%
sqrt-div3.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.9%
expm1-log1p34.8%
associate-*l/34.8%
*-lft-identity34.8%
*-commutative34.8%
associate-*r/34.8%
Simplified34.8%
Final simplification88.3%
(FPCore (x) :precision binary64 (if (<= x 2.2) (fabs (* (sqrt (/ 1.0 PI)) (* x (+ 2.0 (* x (* x 0.6666666666666666)))))) (fabs (* (pow x 7.0) (/ 0.047619047619047616 (sqrt PI))))))
double code(double x) {
double tmp;
if (x <= 2.2) {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * (x * (2.0 + (x * (x * 0.6666666666666666))))));
} else {
tmp = fabs((pow(x, 7.0) * (0.047619047619047616 / sqrt(((double) M_PI)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 2.2) {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * (x * (2.0 + (x * (x * 0.6666666666666666))))));
} else {
tmp = Math.abs((Math.pow(x, 7.0) * (0.047619047619047616 / Math.sqrt(Math.PI))));
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.2: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * (x * (2.0 + (x * (x * 0.6666666666666666)))))) else: tmp = math.fabs((math.pow(x, 7.0) * (0.047619047619047616 / math.sqrt(math.pi)))) return tmp
function code(x) tmp = 0.0 if (x <= 2.2) tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * Float64(2.0 + Float64(x * Float64(x * 0.6666666666666666)))))); else tmp = abs(Float64((x ^ 7.0) * Float64(0.047619047619047616 / sqrt(pi)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.2) tmp = abs((sqrt((1.0 / pi)) * (x * (2.0 + (x * (x * 0.6666666666666666)))))); else tmp = abs(((x ^ 7.0) * (0.047619047619047616 / sqrt(pi)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.2], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(2.0 + N[(x * N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 7.0], $MachinePrecision] * N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.2:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(2 + x \cdot \left(x \cdot 0.6666666666666666\right)\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{7} \cdot \frac{0.047619047619047616}{\sqrt{\pi}}\right|\\
\end{array}
\end{array}
if x < 2.2000000000000002Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Taylor expanded in x around 0 88.3%
+-commutative88.3%
associate-*r*88.3%
associate-*r*88.3%
distribute-rgt-out88.3%
unpow388.3%
unpow288.3%
associate-*r*88.3%
unpow288.3%
associate-*l*88.3%
*-commutative88.3%
distribute-rgt-out88.3%
+-commutative88.3%
*-commutative88.3%
fma-def88.3%
Simplified88.3%
fma-udef88.3%
Applied egg-rr88.3%
if 2.2000000000000002 < x Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around inf 34.8%
expm1-log1p-u3.9%
expm1-udef3.7%
sqrt-div3.7%
metadata-eval3.7%
Applied egg-rr3.7%
expm1-def3.9%
expm1-log1p34.8%
associate-*l/34.8%
*-lft-identity34.8%
*-commutative34.8%
associate-*r/34.8%
Simplified34.8%
Final simplification88.3%
(FPCore (x) :precision binary64 (if (<= x 1.75) (fabs (* x (/ 2.0 (sqrt PI)))) (fabs (* (sqrt (/ 1.0 PI)) (* x (* x (* x 0.6666666666666666)))))))
double code(double x) {
double tmp;
if (x <= 1.75) {
tmp = fabs((x * (2.0 / sqrt(((double) M_PI)))));
} else {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * (x * (x * (x * 0.6666666666666666)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.75) {
tmp = Math.abs((x * (2.0 / Math.sqrt(Math.PI))));
} else {
tmp = Math.abs((Math.sqrt((1.0 / Math.PI)) * (x * (x * (x * 0.6666666666666666)))));
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.75: tmp = math.fabs((x * (2.0 / math.sqrt(math.pi)))) else: tmp = math.fabs((math.sqrt((1.0 / math.pi)) * (x * (x * (x * 0.6666666666666666))))) return tmp
function code(x) tmp = 0.0 if (x <= 1.75) tmp = abs(Float64(x * Float64(2.0 / sqrt(pi)))); else tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * Float64(x * Float64(x * 0.6666666666666666))))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.75) tmp = abs((x * (2.0 / sqrt(pi)))); else tmp = abs((sqrt((1.0 / pi)) * (x * (x * (x * 0.6666666666666666))))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.75], N[Abs[N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(x * N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.75:\\
\;\;\;\;\left|x \cdot \frac{2}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(x \cdot \left(x \cdot 0.6666666666666666\right)\right)\right)\right|\\
\end{array}
\end{array}
if x < 1.75Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around 0 68.8%
expm1-log1p-u67.0%
expm1-udef5.6%
sqrt-div5.6%
metadata-eval5.6%
*-commutative5.6%
Applied egg-rr5.6%
expm1-def67.0%
expm1-log1p68.8%
associate-*l/68.4%
*-lft-identity68.4%
*-commutative68.4%
Simplified68.4%
expm1-log1p-u66.6%
expm1-udef5.6%
associate-/l*5.6%
Applied egg-rr5.6%
expm1-def66.6%
expm1-log1p68.3%
associate-/r/68.8%
Simplified68.8%
if 1.75 < x Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Taylor expanded in x around 0 88.3%
+-commutative88.3%
associate-*r*88.3%
associate-*r*88.3%
distribute-rgt-out88.3%
unpow388.3%
unpow288.3%
associate-*r*88.3%
unpow288.3%
associate-*l*88.3%
*-commutative88.3%
distribute-rgt-out88.3%
+-commutative88.3%
*-commutative88.3%
fma-def88.3%
Simplified88.3%
Taylor expanded in x around inf 25.0%
unpow225.0%
*-commutative25.0%
associate-*r*25.0%
Simplified25.0%
Final simplification68.8%
(FPCore (x) :precision binary64 (fabs (* (sqrt (/ 1.0 PI)) (* x (+ 2.0 (* x (* x 0.6666666666666666)))))))
double code(double x) {
return fabs((sqrt((1.0 / ((double) M_PI))) * (x * (2.0 + (x * (x * 0.6666666666666666))))));
}
public static double code(double x) {
return Math.abs((Math.sqrt((1.0 / Math.PI)) * (x * (2.0 + (x * (x * 0.6666666666666666))))));
}
def code(x): return math.fabs((math.sqrt((1.0 / math.pi)) * (x * (2.0 + (x * (x * 0.6666666666666666))))))
function code(x) return abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * Float64(2.0 + Float64(x * Float64(x * 0.6666666666666666)))))) end
function tmp = code(x) tmp = abs((sqrt((1.0 / pi)) * (x * (2.0 + (x * (x * 0.6666666666666666)))))); end
code[x_] := N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(2.0 + N[(x * N[(x * 0.6666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \left(2 + x \cdot \left(x \cdot 0.6666666666666666\right)\right)\right)\right|
\end{array}
Initial program 99.5%
Simplified99.4%
associate-/l*99.4%
add-sqr-sqrt34.0%
fabs-sqr34.0%
add-sqr-sqrt99.4%
div-inv99.9%
add-sqr-sqrt34.3%
fabs-sqr34.3%
add-sqr-sqrt99.9%
add-sqr-sqrt99.8%
pow299.8%
Applied egg-rr99.9%
Taylor expanded in x around 0 88.3%
+-commutative88.3%
associate-*r*88.3%
associate-*r*88.3%
distribute-rgt-out88.3%
unpow388.3%
unpow288.3%
associate-*r*88.3%
unpow288.3%
associate-*l*88.3%
*-commutative88.3%
distribute-rgt-out88.3%
+-commutative88.3%
*-commutative88.3%
fma-def88.3%
Simplified88.3%
fma-udef88.3%
Applied egg-rr88.3%
Final simplification88.3%
(FPCore (x) :precision binary64 (fabs (* x (/ 2.0 (sqrt PI)))))
double code(double x) {
return fabs((x * (2.0 / sqrt(((double) M_PI)))));
}
public static double code(double x) {
return Math.abs((x * (2.0 / Math.sqrt(Math.PI))));
}
def code(x): return math.fabs((x * (2.0 / math.sqrt(math.pi))))
function code(x) return abs(Float64(x * Float64(2.0 / sqrt(pi)))) end
function tmp = code(x) tmp = abs((x * (2.0 / sqrt(pi)))); end
code[x_] := N[Abs[N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|x \cdot \frac{2}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.5%
Simplified99.4%
Taylor expanded in x around 0 98.3%
*-commutative98.3%
*-commutative98.3%
associate-+r+98.3%
distribute-lft-in98.3%
fma-def98.3%
rem-square-sqrt34.0%
fabs-sqr34.0%
rem-square-sqrt76.0%
+-commutative76.0%
fma-def76.0%
rem-square-sqrt34.2%
fabs-sqr34.2%
rem-square-sqrt76.0%
*-commutative76.0%
Simplified98.0%
Taylor expanded in x around 0 68.8%
expm1-log1p-u67.0%
expm1-udef5.6%
sqrt-div5.6%
metadata-eval5.6%
*-commutative5.6%
Applied egg-rr5.6%
expm1-def67.0%
expm1-log1p68.8%
associate-*l/68.4%
*-lft-identity68.4%
*-commutative68.4%
Simplified68.4%
expm1-log1p-u66.6%
expm1-udef5.6%
associate-/l*5.6%
Applied egg-rr5.6%
expm1-def66.6%
expm1-log1p68.3%
associate-/r/68.8%
Simplified68.8%
Final simplification68.8%
herbie shell --seed 2023263
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))