
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(fma
(* (fabs x) (* t_0 t_0))
0.047619047619047616
(fma
(fabs x)
(* 0.2 (* x t_0))
(* (fabs x) (fma 0.6666666666666666 (* x x) 2.0))))))))
double code(double x) {
double t_0 = x * (x * x);
return fabs(((1.0 / sqrt(((double) M_PI))) * fma((fabs(x) * (t_0 * t_0)), 0.047619047619047616, fma(fabs(x), (0.2 * (x * t_0)), (fabs(x) * fma(0.6666666666666666, (x * x), 2.0))))));
}
function code(x) t_0 = Float64(x * Float64(x * x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * fma(Float64(abs(x) * Float64(t_0 * t_0)), 0.047619047619047616, fma(abs(x), Float64(0.2 * Float64(x * t_0)), Float64(abs(x) * fma(0.6666666666666666, Float64(x * x), 2.0)))))) end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[Abs[x], $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] * 0.047619047619047616 + N[(N[Abs[x], $MachinePrecision] * N[(0.2 * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
\left|\frac{1}{\sqrt{\pi}} \cdot \mathsf{fma}\left(\left|x\right| \cdot \left(t\_0 \cdot t\_0\right), 0.047619047619047616, \mathsf{fma}\left(\left|x\right|, 0.2 \cdot \left(x \cdot t\_0\right), \left|x\right| \cdot \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)\right)\right)\right|
\end{array}
\end{array}
Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
(FPCore (x)
:precision binary64
(*
(/ 1.0 (sqrt PI))
(fabs
(fma
(fma x (* x (fma x (* x 0.047619047619047616) 0.2)) 0.6666666666666666)
(* x (* x x))
(* (fabs x) 2.0)))))
double code(double x) {
return (1.0 / sqrt(((double) M_PI))) * fabs(fma(fma(x, (x * fma(x, (x * 0.047619047619047616), 0.2)), 0.6666666666666666), (x * (x * x)), (fabs(x) * 2.0)));
}
function code(x) return Float64(Float64(1.0 / sqrt(pi)) * abs(fma(fma(x, Float64(x * fma(x, Float64(x * 0.047619047619047616), 0.2)), 0.6666666666666666), Float64(x * Float64(x * x)), Float64(abs(x) * 2.0)))) end
code[x_] := N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Abs[N[(N[(x * N[(x * N[(x * N[(x * 0.047619047619047616), $MachinePrecision] + 0.2), $MachinePrecision]), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{\pi}} \cdot \left|\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.047619047619047616, 0.2\right), 0.6666666666666666\right), x \cdot \left(x \cdot x\right), \left|x\right| \cdot 2\right)\right|
\end{array}
Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.8%
fabs-mulN/A
fabs-divN/A
metadata-evalN/A
rem-sqrt-squareN/A
add-sqr-sqrtN/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f64N/A
Applied egg-rr99.9%
associate-*r*N/A
fabs-sqrN/A
fabs-mulN/A
pow3N/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
pow3N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x)
:precision binary64
(/
(fabs
(fma
(fma x (* x (fma x (* x 0.047619047619047616) 0.2)) 0.6666666666666666)
(* x (* x (fabs x)))
(* (fabs x) 2.0)))
(sqrt PI)))
double code(double x) {
return fabs(fma(fma(x, (x * fma(x, (x * 0.047619047619047616), 0.2)), 0.6666666666666666), (x * (x * fabs(x))), (fabs(x) * 2.0))) / sqrt(((double) M_PI));
}
function code(x) return Float64(abs(fma(fma(x, Float64(x * fma(x, Float64(x * 0.047619047619047616), 0.2)), 0.6666666666666666), Float64(x * Float64(x * abs(x))), Float64(abs(x) * 2.0))) / sqrt(pi)) end
code[x_] := N[(N[Abs[N[(N[(x * N[(x * N[(x * N[(x * 0.047619047619047616), $MachinePrecision] + 0.2), $MachinePrecision]), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * N[(x * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left|\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.047619047619047616, 0.2\right), 0.6666666666666666\right), x \cdot \left(x \cdot \left|x\right|\right), \left|x\right| \cdot 2\right)\right|}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.8%
associate-*l/N/A
fabs-divN/A
*-lft-identityN/A
rem-sqrt-squareN/A
add-sqr-sqrtN/A
/-lowering-/.f64N/A
Applied egg-rr99.4%
(FPCore (x)
:precision binary64
(fabs
(/
(fma
(* x x)
(*
(fabs x)
(fma (* x x) (fma (* x x) 0.047619047619047616 0.2) 0.6666666666666666))
(* (fabs x) 2.0))
(sqrt PI))))
double code(double x) {
return fabs((fma((x * x), (fabs(x) * fma((x * x), fma((x * x), 0.047619047619047616, 0.2), 0.6666666666666666)), (fabs(x) * 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(fma(Float64(x * x), Float64(abs(x) * fma(Float64(x * x), fma(Float64(x * x), 0.047619047619047616, 0.2), 0.6666666666666666)), Float64(abs(x) * 2.0)) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(N[(x * x), $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.047619047619047616 + 0.2), $MachinePrecision] + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\mathsf{fma}\left(x \cdot x, \left|x\right| \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.047619047619047616, 0.2\right), 0.6666666666666666\right), \left|x\right| \cdot 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.4%
Final simplification99.4%
(FPCore (x)
:precision binary64
(if (<= (fabs x) 0.04)
(*
(fabs x)
(/
(fabs (fma x (* x (fma (* x x) 0.2 0.6666666666666666)) 2.0))
(sqrt PI)))
(*
0.047619047619047616
(/ (* (fabs x) (* (* x x) (* x (* x (* x x))))) (sqrt PI)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.04) {
tmp = fabs(x) * (fabs(fma(x, (x * fma((x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(((double) M_PI)));
} else {
tmp = 0.047619047619047616 * ((fabs(x) * ((x * x) * (x * (x * (x * x))))) / sqrt(((double) M_PI)));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.04) tmp = Float64(abs(x) * Float64(abs(fma(x, Float64(x * fma(Float64(x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(pi))); else tmp = Float64(0.047619047619047616 * Float64(Float64(abs(x) * Float64(Float64(x * x) * Float64(x * Float64(x * Float64(x * x))))) / sqrt(pi))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.04], N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.047619047619047616 * N[(N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.04:\\
\;\;\;\;\left|x\right| \cdot \frac{\left|\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.2, 0.6666666666666666\right), 2\right)\right|}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;0.047619047619047616 \cdot \frac{\left|x\right| \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)}{\sqrt{\pi}}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0400000000000000008Initial program 99.9%
Applied egg-rr99.2%
Taylor expanded in x around 0
+-commutativeN/A
Simplified99.1%
associate-/l*N/A
*-commutativeN/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
if 0.0400000000000000008 < (fabs.f64 x) Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
*-commutativeN/A
metadata-evalN/A
pow-sqrN/A
cube-prodN/A
unpow2N/A
unpow3N/A
pow-sqrN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.4
Simplified99.4%
associate-*l/N/A
fabs-divN/A
Applied egg-rr99.4%
Final simplification99.6%
(FPCore (x)
:precision binary64
(if (<= (fabs x) 0.04)
(*
(fabs x)
(/
(fabs (fma x (* x (fma (* x x) 0.2 0.6666666666666666)) 2.0))
(sqrt PI)))
(*
(* 0.047619047619047616 (* (* x x) (* x (* x (* x x)))))
(fabs (/ x (sqrt PI))))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.04) {
tmp = fabs(x) * (fabs(fma(x, (x * fma((x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(((double) M_PI)));
} else {
tmp = (0.047619047619047616 * ((x * x) * (x * (x * (x * x))))) * fabs((x / sqrt(((double) M_PI))));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.04) tmp = Float64(abs(x) * Float64(abs(fma(x, Float64(x * fma(Float64(x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(pi))); else tmp = Float64(Float64(0.047619047619047616 * Float64(Float64(x * x) * Float64(x * Float64(x * Float64(x * x))))) * abs(Float64(x / sqrt(pi)))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.04], N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(0.047619047619047616 * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Abs[N[(x / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.04:\\
\;\;\;\;\left|x\right| \cdot \frac{\left|\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.2, 0.6666666666666666\right), 2\right)\right|}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;\left(0.047619047619047616 \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)\right) \cdot \left|\frac{x}{\sqrt{\pi}}\right|\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0400000000000000008Initial program 99.9%
Applied egg-rr99.2%
Taylor expanded in x around 0
+-commutativeN/A
Simplified99.1%
associate-/l*N/A
*-commutativeN/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
if 0.0400000000000000008 < (fabs.f64 x) Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
*-commutativeN/A
metadata-evalN/A
pow-sqrN/A
cube-prodN/A
unpow2N/A
unpow3N/A
pow-sqrN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.4
Simplified99.4%
Applied egg-rr99.4%
Final simplification99.6%
(FPCore (x)
:precision binary64
(if (<= (fabs x) 0.04)
(*
(fabs x)
(/
(fabs (fma x (* x (fma (* x x) 0.2 0.6666666666666666)) 2.0))
(sqrt PI)))
(*
(* (* x x) (* x (* x x)))
(/ (* (* x x) 0.047619047619047616) (sqrt PI)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.04) {
tmp = fabs(x) * (fabs(fma(x, (x * fma((x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(((double) M_PI)));
} else {
tmp = ((x * x) * (x * (x * x))) * (((x * x) * 0.047619047619047616) / sqrt(((double) M_PI)));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.04) tmp = Float64(abs(x) * Float64(abs(fma(x, Float64(x * fma(Float64(x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(pi))); else tmp = Float64(Float64(Float64(x * x) * Float64(x * Float64(x * x))) * Float64(Float64(Float64(x * x) * 0.047619047619047616) / sqrt(pi))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.04], N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(x * x), $MachinePrecision] * 0.047619047619047616), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.04:\\
\;\;\;\;\left|x\right| \cdot \frac{\left|\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.2, 0.6666666666666666\right), 2\right)\right|}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;\left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \frac{\left(x \cdot x\right) \cdot 0.047619047619047616}{\sqrt{\pi}}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0400000000000000008Initial program 99.9%
Applied egg-rr99.2%
Taylor expanded in x around 0
+-commutativeN/A
Simplified99.1%
associate-/l*N/A
*-commutativeN/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
if 0.0400000000000000008 < (fabs.f64 x) Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
*-commutativeN/A
metadata-evalN/A
pow-sqrN/A
cube-prodN/A
unpow2N/A
unpow3N/A
pow-sqrN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.4
Simplified99.4%
associate-*l/N/A
fabs-divN/A
*-lft-identityN/A
rem-sqrt-squareN/A
add-sqr-sqrtN/A
div-invN/A
Applied egg-rr0.1%
Final simplification65.1%
(FPCore (x)
:precision binary64
(fabs
(/
(*
(fabs x)
(fma
(* x x)
(fma x (* x (fma (* x x) 0.047619047619047616 0.2)) 0.6666666666666666)
2.0))
(sqrt PI))))
double code(double x) {
return fabs(((fabs(x) * fma((x * x), fma(x, (x * fma((x * x), 0.047619047619047616, 0.2)), 0.6666666666666666), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(Float64(abs(x) * fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 0.047619047619047616, 0.2)), 0.6666666666666666), 2.0)) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.047619047619047616 + 0.2), $MachinePrecision]), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\left|x\right| \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.047619047619047616, 0.2\right), 0.6666666666666666\right), 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.4%
Taylor expanded in x around 0
Simplified99.4%
(FPCore (x) :precision binary64 (if (<= (fabs x) 0.04) (* (fabs x) (fabs (/ (fma x (* x 0.6666666666666666) 2.0) (sqrt PI)))) (* (* (* x x) (* x (* x x))) (/ 0.2 (sqrt PI)))))
double code(double x) {
double tmp;
if (fabs(x) <= 0.04) {
tmp = fabs(x) * fabs((fma(x, (x * 0.6666666666666666), 2.0) / sqrt(((double) M_PI))));
} else {
tmp = ((x * x) * (x * (x * x))) * (0.2 / sqrt(((double) M_PI)));
}
return tmp;
}
function code(x) tmp = 0.0 if (abs(x) <= 0.04) tmp = Float64(abs(x) * abs(Float64(fma(x, Float64(x * 0.6666666666666666), 2.0) / sqrt(pi)))); else tmp = Float64(Float64(Float64(x * x) * Float64(x * Float64(x * x))) * Float64(0.2 / sqrt(pi))); end return tmp end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 0.04], N[(N[Abs[x], $MachinePrecision] * N[Abs[N[(N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 2.0), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(0.2 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 0.04:\\
\;\;\;\;\left|x\right| \cdot \left|\frac{\mathsf{fma}\left(x, x \cdot 0.6666666666666666, 2\right)}{\sqrt{\pi}}\right|\\
\mathbf{else}:\\
\;\;\;\;\left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\right) \cdot \frac{0.2}{\sqrt{\pi}}\\
\end{array}
\end{array}
if (fabs.f64 x) < 0.0400000000000000008Initial program 99.9%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.8%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.9%
Taylor expanded in x around 0
+-commutativeN/A
associate-*r*N/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6499.6
Simplified99.6%
*-commutativeN/A
associate-*l*N/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
un-div-invN/A
fabs-lowering-fabs.f64N/A
/-lowering-/.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f6499.6
Applied egg-rr99.6%
if 0.0400000000000000008 < (fabs.f64 x) Initial program 99.8%
Applied egg-rr99.8%
Taylor expanded in x around 0
+-commutativeN/A
Simplified86.5%
Taylor expanded in x around inf
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-multN/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
Simplified86.5%
associate-*r*N/A
sqrt-divN/A
metadata-evalN/A
associate-*r*N/A
un-div-invN/A
fabs-divN/A
Applied egg-rr0.1%
Final simplification65.0%
(FPCore (x) :precision binary64 (* (fabs x) (/ (fabs (fma x (* x (fma (* x x) 0.2 0.6666666666666666)) 2.0)) (sqrt PI))))
double code(double x) {
return fabs(x) * (fabs(fma(x, (x * fma((x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(((double) M_PI)));
}
function code(x) return Float64(abs(x) * Float64(abs(fma(x, Float64(x * fma(Float64(x * x), 0.2, 0.6666666666666666)), 2.0)) / sqrt(pi))) end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[(N[Abs[N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \frac{\left|\mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.2, 0.6666666666666666\right), 2\right)\right|}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
Simplified94.7%
associate-/l*N/A
*-commutativeN/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
Applied egg-rr95.1%
Final simplification95.1%
(FPCore (x) :precision binary64 (fabs (/ (* (fabs x) (fma (* x x) (fma (* x x) 0.2 0.6666666666666666) 2.0)) (sqrt PI))))
double code(double x) {
return fabs(((fabs(x) * fma((x * x), fma((x * x), 0.2, 0.6666666666666666), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(Float64(abs(x) * fma(Float64(x * x), fma(Float64(x * x), 0.2, 0.6666666666666666), 2.0)) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\left|x\right| \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, 0.2, 0.6666666666666666\right), 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
Simplified94.7%
(FPCore (x) :precision binary64 (fabs (/ (* (fabs x) (fma (* x x) (* (* x x) 0.2) 2.0)) (sqrt PI))))
double code(double x) {
return fabs(((fabs(x) * fma((x * x), ((x * x) * 0.2), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(Float64(abs(x) * fma(Float64(x * x), Float64(Float64(x * x) * 0.2), 2.0)) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.2), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\left|x\right| \cdot \mathsf{fma}\left(x \cdot x, \left(x \cdot x\right) \cdot 0.2, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
Simplified94.7%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6494.3
Simplified94.3%
(FPCore (x) :precision binary64 (* (fabs x) (fabs (/ (fma x (* x 0.6666666666666666) 2.0) (sqrt PI)))))
double code(double x) {
return fabs(x) * fabs((fma(x, (x * 0.6666666666666666), 2.0) / sqrt(((double) M_PI))));
}
function code(x) return Float64(abs(x) * abs(Float64(fma(x, Float64(x * 0.6666666666666666), 2.0) / sqrt(pi)))) end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[Abs[N[(N[(x * N[(x * 0.6666666666666666), $MachinePrecision] + 2.0), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \left|\frac{\mathsf{fma}\left(x, x \cdot 0.6666666666666666, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
Applied egg-rr99.9%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
Simplified99.8%
Taylor expanded in x around 0
+-commutativeN/A
associate-*r*N/A
distribute-rgt-outN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6489.2
Simplified89.2%
*-commutativeN/A
associate-*l*N/A
fabs-mulN/A
fabs-fabsN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
un-div-invN/A
fabs-lowering-fabs.f64N/A
/-lowering-/.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f6489.2
Applied egg-rr89.2%
(FPCore (x) :precision binary64 (fabs (/ (* (fabs x) (fma 0.6666666666666666 (* x x) 2.0)) (sqrt PI))))
double code(double x) {
return fabs(((fabs(x) * fma(0.6666666666666666, (x * x), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return abs(Float64(Float64(abs(x) * fma(0.6666666666666666, Float64(x * x), 2.0)) / sqrt(pi))) end
code[x_] := N[Abs[N[(N[(N[Abs[x], $MachinePrecision] * N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{\left|x\right| \cdot \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
+-commutativeN/A
associate-*r*N/A
distribute-rgt-inN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6488.8
Simplified88.8%
(FPCore (x) :precision binary64 (* (fabs x) (/ 2.0 (sqrt PI))))
double code(double x) {
return fabs(x) * (2.0 / sqrt(((double) M_PI)));
}
public static double code(double x) {
return Math.abs(x) * (2.0 / Math.sqrt(Math.PI));
}
def code(x): return math.fabs(x) * (2.0 / math.sqrt(math.pi))
function code(x) return Float64(abs(x) * Float64(2.0 / sqrt(pi))) end
function tmp = code(x) tmp = abs(x) * (2.0 / sqrt(pi)); end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[(2.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \frac{2}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
Applied egg-rr99.4%
Taylor expanded in x around 0
*-lowering-*.f64N/A
fabs-lowering-fabs.f6466.3
Simplified66.3%
fabs-divN/A
neg-fabsN/A
neg-fabsN/A
fabs-mulN/A
metadata-evalN/A
fabs-fabsN/A
*-commutativeN/A
rem-sqrt-squareN/A
add-sqr-sqrtN/A
associate-/l*N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f6466.7
Applied egg-rr66.7%
herbie shell --seed 2024194
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))