
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(*
(fabs x)
(fabs
(/
(+
(+ (* 0.2 (pow x 4.0)) (* 0.047619047619047616 (pow x 6.0)))
(fma 0.6666666666666666 (* x x) 2.0))
(sqrt PI)))))
double code(double x) {
return fabs(x) * fabs(((((0.2 * pow(x, 4.0)) + (0.047619047619047616 * pow(x, 6.0))) + fma(0.6666666666666666, (x * x), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return Float64(abs(x) * abs(Float64(Float64(Float64(Float64(0.2 * (x ^ 4.0)) + Float64(0.047619047619047616 * (x ^ 6.0))) + fma(0.6666666666666666, Float64(x * x), 2.0)) / sqrt(pi)))) end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[Abs[N[(N[(N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \left|\frac{\left(0.2 \cdot {x}^{4} + 0.047619047619047616 \cdot {x}^{6}\right) + \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.9%
fma-undefine99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x)
:precision binary64
(if (<= (fabs x) 0.02)
(fabs
(*
(* x (pow PI -0.5))
(+ (* 0.2 (pow x 4.0)) (fma 0.6666666666666666 (* x x) 2.0))))
(fabs
(*
(pow x 6.0)
(* (sqrt (/ 1.0 PI)) (+ (/ 0.2 x) (* (fabs x) 0.047619047619047616)))))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.02) {
tmp = fabs(((x * pow(((double) M_PI), -0.5)) * ((0.2 * pow(x, 4.0)) + fma(0.6666666666666666, (x * x), 2.0))));
} else {
tmp = fabs((pow(x, 6.0) * (sqrt((1.0 / ((double) M_PI))) * ((0.2 / x) + (fabs(x) * 0.047619047619047616)))));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.02) tmp = abs(Float64(Float64(x * (pi ^ -0.5)) * Float64(Float64(0.2 * (x ^ 4.0)) + fma(0.6666666666666666, Float64(x * x), 2.0)))); else tmp = abs(Float64((x ^ 6.0) * Float64(sqrt(Float64(1.0 / pi)) * Float64(Float64(0.2 / x) + Float64(abs(x) * 0.047619047619047616))))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.02], N[Abs[N[(N[(x * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision] * N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 6.0], $MachinePrecision] * N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(0.2 / x), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * 0.047619047619047616), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.02:\\
\;\;\;\;\left|\left(x \cdot {\pi}^{-0.5}\right) \cdot \left(0.2 \cdot {x}^{4} + \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{6} \cdot \left(\sqrt{\frac{1}{\pi}} \cdot \left(\frac{0.2}{x} + \left|x\right| \cdot 0.047619047619047616\right)\right)\right|\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0200000000000000004Initial program 99.8%
Simplified99.1%
Taylor expanded in x around 0 99.1%
div-inv99.7%
pow1/299.7%
pow-flip99.7%
metadata-eval99.7%
Applied egg-rr99.7%
pow199.7%
add-sqr-sqrt43.7%
fabs-sqr43.7%
add-sqr-sqrt99.7%
Applied egg-rr99.7%
unpow199.7%
Simplified99.7%
if 0.0200000000000000004 < (fabs.f64 x) Initial program 99.8%
Simplified99.8%
Taylor expanded in x around inf 99.8%
+-commutative99.8%
associate-*r*99.8%
*-commutative99.8%
*-commutative99.8%
associate-*l*99.9%
distribute-lft-out99.9%
Simplified99.9%
expm1-log1p-u99.9%
expm1-undefine99.9%
pow299.9%
div-inv99.9%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt99.8%
pow299.8%
pow-flip99.8%
metadata-eval99.8%
Applied egg-rr99.8%
sub-neg99.8%
metadata-eval99.8%
+-commutative99.8%
log1p-undefine99.8%
rem-exp-log99.8%
associate-+r+99.8%
metadata-eval99.8%
metadata-eval99.8%
distribute-lft-in99.8%
+-lft-identity99.8%
*-commutative99.8%
pow-plus99.8%
metadata-eval99.8%
unpow-199.8%
*-inverses99.8%
associate-/r*99.8%
unpow299.8%
remove-double-neg99.8%
distribute-frac-neg299.8%
distribute-frac-neg299.8%
remove-double-neg99.8%
unpow299.8%
associate-/r*99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (sqrt (/ 1.0 PI))))
(if (<= (fabs x) 0.02)
(fabs (* t_0 (* x (fma 0.6666666666666666 (* x x) 2.0))))
(fabs
(*
(pow x 6.0)
(* t_0 (+ (/ 0.2 x) (* (fabs x) 0.047619047619047616))))))))
double code(double x) {
double t_0 = sqrt((1.0 / ((double) M_PI)));
double tmp;
if (fabs(x) <= 0.02) {
tmp = fabs((t_0 * (x * fma(0.6666666666666666, (x * x), 2.0))));
} else {
tmp = fabs((pow(x, 6.0) * (t_0 * ((0.2 / x) + (fabs(x) * 0.047619047619047616)))));
}
return tmp;
}
function code(x) t_0 = sqrt(Float64(1.0 / pi)) tmp = 0.0 if (abs(x) <= 0.02) tmp = abs(Float64(t_0 * Float64(x * fma(0.6666666666666666, Float64(x * x), 2.0)))); else tmp = abs(Float64((x ^ 6.0) * Float64(t_0 * Float64(Float64(0.2 / x) + Float64(abs(x) * 0.047619047619047616))))); end return tmp end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.02], N[Abs[N[(t$95$0 * N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[Power[x, 6.0], $MachinePrecision] * N[(t$95$0 * N[(N[(0.2 / x), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * 0.047619047619047616), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt{\frac{1}{\pi}}\\
\mathbf{if}\;\left|x\right| \leq 0.02:\\
\;\;\;\;\left|t\_0 \cdot \left(x \cdot \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|{x}^{6} \cdot \left(t\_0 \cdot \left(\frac{0.2}{x} + \left|x\right| \cdot 0.047619047619047616\right)\right)\right|\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0200000000000000004Initial program 99.8%
Simplified99.1%
Taylor expanded in x around 0 99.1%
div-inv99.7%
pow1/299.7%
pow-flip99.7%
metadata-eval99.7%
Applied egg-rr99.7%
Taylor expanded in x around 0 99.6%
associate-*r*99.6%
*-commutative99.6%
associate-*r*99.6%
distribute-rgt-out99.6%
associate-*r*99.6%
rem-square-sqrt43.9%
fabs-sqr43.9%
rem-square-sqrt99.4%
rem-square-sqrt43.6%
fabs-sqr43.6%
rem-square-sqrt99.6%
distribute-rgt-in99.6%
fma-undefine99.6%
Simplified99.6%
pow299.6%
Applied egg-rr99.6%
if 0.0200000000000000004 < (fabs.f64 x) Initial program 99.8%
Simplified99.8%
Taylor expanded in x around inf 99.8%
+-commutative99.8%
associate-*r*99.8%
*-commutative99.8%
*-commutative99.8%
associate-*l*99.9%
distribute-lft-out99.9%
Simplified99.9%
expm1-log1p-u99.9%
expm1-undefine99.9%
pow299.9%
div-inv99.9%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt99.8%
pow299.8%
pow-flip99.8%
metadata-eval99.8%
Applied egg-rr99.8%
sub-neg99.8%
metadata-eval99.8%
+-commutative99.8%
log1p-undefine99.8%
rem-exp-log99.8%
associate-+r+99.8%
metadata-eval99.8%
metadata-eval99.8%
distribute-lft-in99.8%
+-lft-identity99.8%
*-commutative99.8%
pow-plus99.8%
metadata-eval99.8%
unpow-199.8%
*-inverses99.8%
associate-/r*99.8%
unpow299.8%
remove-double-neg99.8%
distribute-frac-neg299.8%
distribute-frac-neg299.8%
remove-double-neg99.8%
unpow299.8%
associate-/r*99.8%
Simplified99.8%
Final simplification99.7%
(FPCore (x)
:precision binary64
(*
(fabs x)
(fabs
(/
(+
(* 0.047619047619047616 (pow x 6.0))
(fma 0.6666666666666666 (* x x) 2.0))
(sqrt PI)))))
double code(double x) {
return fabs(x) * fabs((((0.047619047619047616 * pow(x, 6.0)) + fma(0.6666666666666666, (x * x), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return Float64(abs(x) * abs(Float64(Float64(Float64(0.047619047619047616 * (x ^ 6.0)) + fma(0.6666666666666666, Float64(x * x), 2.0)) / sqrt(pi)))) end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[Abs[N[(N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \left|\frac{0.047619047619047616 \cdot {x}^{6} + \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.9%
Taylor expanded in x around inf 99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.02) (fabs (* (sqrt (/ 1.0 PI)) (* x (fma 0.6666666666666666 (* x x) 2.0)))) (fabs (* (/ 0.047619047619047616 (sqrt PI)) (pow x 7.0)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.02) {
tmp = fabs((sqrt((1.0 / ((double) M_PI))) * (x * fma(0.6666666666666666, (x * x), 2.0))));
} else {
tmp = fabs(((0.047619047619047616 / sqrt(((double) M_PI))) * pow(x, 7.0)));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.02) tmp = abs(Float64(sqrt(Float64(1.0 / pi)) * Float64(x * fma(0.6666666666666666, Float64(x * x), 2.0)))); else tmp = abs(Float64(Float64(0.047619047619047616 / sqrt(pi)) * (x ^ 7.0))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.02], N[Abs[N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(x * N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.02:\\
\;\;\;\;\left|\sqrt{\frac{1}{\pi}} \cdot \left(x \cdot \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)\right)\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\frac{0.047619047619047616}{\sqrt{\pi}} \cdot {x}^{7}\right|\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0200000000000000004Initial program 99.8%
Simplified99.1%
Taylor expanded in x around 0 99.1%
div-inv99.7%
pow1/299.7%
pow-flip99.7%
metadata-eval99.7%
Applied egg-rr99.7%
Taylor expanded in x around 0 99.6%
associate-*r*99.6%
*-commutative99.6%
associate-*r*99.6%
distribute-rgt-out99.6%
associate-*r*99.6%
rem-square-sqrt43.9%
fabs-sqr43.9%
rem-square-sqrt99.4%
rem-square-sqrt43.6%
fabs-sqr43.6%
rem-square-sqrt99.6%
distribute-rgt-in99.6%
fma-undefine99.6%
Simplified99.6%
pow299.6%
Applied egg-rr99.6%
if 0.0200000000000000004 < (fabs.f64 x) Initial program 99.8%
Simplified99.8%
Taylor expanded in x around inf 99.8%
*-commutative99.8%
*-commutative99.8%
associate-*r*99.8%
associate-*l*99.8%
*-commutative99.8%
associate-*l*99.8%
*-commutative99.8%
Simplified99.8%
pow199.8%
*-commutative99.8%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt99.8%
sqrt-div99.8%
metadata-eval99.8%
un-div-inv99.8%
Applied egg-rr99.8%
unpow199.8%
*-commutative99.8%
associate-*l/99.8%
associate-*r/99.9%
Simplified99.9%
associate-*r/99.8%
clear-num99.8%
associate-*r*99.8%
Applied egg-rr99.8%
associate-/r/99.9%
associate-*l*99.8%
associate-*r*99.8%
associate-*l/99.8%
metadata-eval99.8%
distribute-lft-neg-in99.8%
neg-mul-199.8%
remove-double-neg99.8%
/-rgt-identity99.8%
associate-*r/99.8%
times-frac99.8%
*-rgt-identity99.8%
associate-*r/99.9%
associate-*r/99.8%
*-commutative99.8%
associate-*r/99.9%
associate-*l/99.8%
associate-*l*99.9%
pow-plus99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.7%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.02) (fabs (* x (/ 2.0 (sqrt PI)))) (fabs (* (/ 0.047619047619047616 (sqrt PI)) (pow x 7.0)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.02) {
tmp = fabs((x * (2.0 / sqrt(((double) M_PI)))));
} else {
tmp = fabs(((0.047619047619047616 / sqrt(((double) M_PI))) * pow(x, 7.0)));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 0.02) {
tmp = Math.abs((x * (2.0 / Math.sqrt(Math.PI))));
} else {
tmp = Math.abs(((0.047619047619047616 / Math.sqrt(Math.PI)) * Math.pow(x, 7.0)));
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 0.02: tmp = math.fabs((x * (2.0 / math.sqrt(math.pi)))) else: tmp = math.fabs(((0.047619047619047616 / math.sqrt(math.pi)) * math.pow(x, 7.0))) return tmp
function code(x) tmp = 0.0 if (abs(x) <= 0.02) tmp = abs(Float64(x * Float64(2.0 / sqrt(pi)))); else tmp = abs(Float64(Float64(0.047619047619047616 / sqrt(pi)) * (x ^ 7.0))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 0.02) tmp = abs((x * (2.0 / sqrt(pi)))); else tmp = abs(((0.047619047619047616 / sqrt(pi)) * (x ^ 7.0))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.02], N[Abs[N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[(0.047619047619047616 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.02:\\
\;\;\;\;\left|x \cdot \frac{2}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\frac{0.047619047619047616}{\sqrt{\pi}} \cdot {x}^{7}\right|\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0200000000000000004Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 99.1%
associate-*r*99.1%
Simplified99.1%
pow199.1%
*-commutative99.1%
add-sqr-sqrt43.3%
fabs-sqr43.3%
add-sqr-sqrt99.1%
sqrt-div99.1%
metadata-eval99.1%
un-div-inv99.1%
Applied egg-rr99.1%
unpow199.1%
Simplified99.1%
if 0.0200000000000000004 < (fabs.f64 x) Initial program 99.8%
Simplified99.8%
Taylor expanded in x around inf 99.8%
*-commutative99.8%
*-commutative99.8%
associate-*r*99.8%
associate-*l*99.8%
*-commutative99.8%
associate-*l*99.8%
*-commutative99.8%
Simplified99.8%
pow199.8%
*-commutative99.8%
add-sqr-sqrt0.0%
fabs-sqr0.0%
add-sqr-sqrt99.8%
sqrt-div99.8%
metadata-eval99.8%
un-div-inv99.8%
Applied egg-rr99.8%
unpow199.8%
*-commutative99.8%
associate-*l/99.8%
associate-*r/99.9%
Simplified99.9%
associate-*r/99.8%
clear-num99.8%
associate-*r*99.8%
Applied egg-rr99.8%
associate-/r/99.9%
associate-*l*99.8%
associate-*r*99.8%
associate-*l/99.8%
metadata-eval99.8%
distribute-lft-neg-in99.8%
neg-mul-199.8%
remove-double-neg99.8%
/-rgt-identity99.8%
associate-*r/99.8%
times-frac99.8%
*-rgt-identity99.8%
associate-*r/99.9%
associate-*r/99.8%
*-commutative99.8%
associate-*r/99.9%
associate-*l/99.8%
associate-*l*99.9%
pow-plus99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.4%
(FPCore (x) :precision binary64 (fabs (* x (/ 2.0 (sqrt PI)))))
double code(double x) {
return fabs((x * (2.0 / sqrt(((double) M_PI)))));
}
public static double code(double x) {
return Math.abs((x * (2.0 / Math.sqrt(Math.PI))));
}
def code(x): return math.fabs((x * (2.0 / math.sqrt(math.pi))))
function code(x) return abs(Float64(x * Float64(2.0 / sqrt(pi)))) end
function tmp = code(x) tmp = abs((x * (2.0 / sqrt(pi)))); end
code[x_] := N[Abs[N[(x * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|x \cdot \frac{2}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 65.2%
associate-*r*65.2%
Simplified65.2%
pow165.2%
*-commutative65.2%
add-sqr-sqrt27.6%
fabs-sqr27.6%
add-sqr-sqrt65.2%
sqrt-div65.2%
metadata-eval65.2%
un-div-inv65.2%
Applied egg-rr65.2%
unpow165.2%
Simplified65.2%
Final simplification65.2%
herbie shell --seed 2024053
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))