
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) (* x x))))
(*
(* (/ 1.0 (sqrt PI)) (exp (* x x)))
(+
(/
1.0
(/
1.0
(+ (/ 0.75 (* (fabs x) t_0)) (/ (+ 1.0 (/ 0.5 (* x x))) (fabs x)))))
(* 1.875 (/ 1.0 (* x (* (* x x) t_0))))))))
double code(double x) {
double t_0 = (x * x) * (x * x);
return ((1.0 / sqrt(((double) M_PI))) * exp((x * x))) * ((1.0 / (1.0 / ((0.75 / (fabs(x) * t_0)) + ((1.0 + (0.5 / (x * x))) / fabs(x))))) + (1.875 * (1.0 / (x * ((x * x) * t_0)))));
}
public static double code(double x) {
double t_0 = (x * x) * (x * x);
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((x * x))) * ((1.0 / (1.0 / ((0.75 / (Math.abs(x) * t_0)) + ((1.0 + (0.5 / (x * x))) / Math.abs(x))))) + (1.875 * (1.0 / (x * ((x * x) * t_0)))));
}
def code(x): t_0 = (x * x) * (x * x) return ((1.0 / math.sqrt(math.pi)) * math.exp((x * x))) * ((1.0 / (1.0 / ((0.75 / (math.fabs(x) * t_0)) + ((1.0 + (0.5 / (x * x))) / math.fabs(x))))) + (1.875 * (1.0 / (x * ((x * x) * t_0)))))
function code(x) t_0 = Float64(Float64(x * x) * Float64(x * x)) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(x * x))) * Float64(Float64(1.0 / Float64(1.0 / Float64(Float64(0.75 / Float64(abs(x) * t_0)) + Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x))))) + Float64(1.875 * Float64(1.0 / Float64(x * Float64(Float64(x * x) * t_0)))))) end
function tmp = code(x) t_0 = (x * x) * (x * x); tmp = ((1.0 / sqrt(pi)) * exp((x * x))) * ((1.0 / (1.0 / ((0.75 / (abs(x) * t_0)) + ((1.0 + (0.5 / (x * x))) / abs(x))))) + (1.875 * (1.0 / (x * ((x * x) * t_0))))); end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 / N[(1.0 / N[(N[(0.75 / N[(N[Abs[x], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.875 * N[(1.0 / N[(x * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot x\right)\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{x \cdot x}\right) \cdot \left(\frac{1}{\frac{1}{\frac{0.75}{\left|x\right| \cdot t\_0} + \frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|}}} + 1.875 \cdot \frac{1}{x \cdot \left(\left(x \cdot x\right) \cdot t\_0\right)}\right)
\end{array}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
associate-*l*N/A
associate-*l*N/A
pow3N/A
frac-timesN/A
metadata-evalN/A
sqr-absN/A
cube-divN/A
metadata-evalN/A
cube-unmultN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64100.0
Applied egg-rr100.0%
un-div-invN/A
associate-/r*N/A
/-lowering-/.f64N/A
associate-*r*N/A
associate-*l*N/A
associate-*l*N/A
Applied egg-rr100.0%
metadata-eval100.0
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(*
(*
(+
(/ (+ 1.0 (/ 0.5 (* x x))) (fabs x))
(/ (+ (/ 0.75 (fabs x)) (/ 1.875 (* x (* x x)))) (* (* x x) (* x x))))
(sqrt (/ 1.0 PI)))
(exp (* x x))))
double code(double x) {
return ((((1.0 + (0.5 / (x * x))) / fabs(x)) + (((0.75 / fabs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * sqrt((1.0 / ((double) M_PI)))) * exp((x * x));
}
public static double code(double x) {
return ((((1.0 + (0.5 / (x * x))) / Math.abs(x)) + (((0.75 / Math.abs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * Math.sqrt((1.0 / Math.PI))) * Math.exp((x * x));
}
def code(x): return ((((1.0 + (0.5 / (x * x))) / math.fabs(x)) + (((0.75 / math.fabs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * math.sqrt((1.0 / math.pi))) * math.exp((x * x))
function code(x) return Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x)) + Float64(Float64(Float64(0.75 / abs(x)) + Float64(1.875 / Float64(x * Float64(x * x)))) / Float64(Float64(x * x) * Float64(x * x)))) * sqrt(Float64(1.0 / pi))) * exp(Float64(x * x))) end
function tmp = code(x) tmp = ((((1.0 + (0.5 / (x * x))) / abs(x)) + (((0.75 / abs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * sqrt((1.0 / pi))) * exp((x * x)); end
code[x_] := N[(N[(N[(N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.75 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(1.875 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(\frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|} + \frac{\frac{0.75}{\left|x\right|} + \frac{1.875}{x \cdot \left(x \cdot x\right)}}{\left(x \cdot x\right) \cdot \left(x \cdot x\right)}\right) \cdot \sqrt{\frac{1}{\pi}}\right) \cdot e^{x \cdot x}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
fabs-lowering-fabs.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-unmultN/A
metadata-evalN/A
Simplified100.0%
Applied egg-rr100.0%
(FPCore (x)
:precision binary64
(/
(*
(+
(/ (+ 1.0 (/ 0.5 (* x x))) (fabs x))
(/ (+ (/ 0.75 (fabs x)) (/ 1.875 (* x (* x x)))) (* (* x x) (* x x))))
(exp (* x x)))
(sqrt PI)))
double code(double x) {
return ((((1.0 + (0.5 / (x * x))) / fabs(x)) + (((0.75 / fabs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * exp((x * x))) / sqrt(((double) M_PI));
}
public static double code(double x) {
return ((((1.0 + (0.5 / (x * x))) / Math.abs(x)) + (((0.75 / Math.abs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * Math.exp((x * x))) / Math.sqrt(Math.PI);
}
def code(x): return ((((1.0 + (0.5 / (x * x))) / math.fabs(x)) + (((0.75 / math.fabs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * math.exp((x * x))) / math.sqrt(math.pi)
function code(x) return Float64(Float64(Float64(Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x)) + Float64(Float64(Float64(0.75 / abs(x)) + Float64(1.875 / Float64(x * Float64(x * x)))) / Float64(Float64(x * x) * Float64(x * x)))) * exp(Float64(x * x))) / sqrt(pi)) end
function tmp = code(x) tmp = ((((1.0 + (0.5 / (x * x))) / abs(x)) + (((0.75 / abs(x)) + (1.875 / (x * (x * x)))) / ((x * x) * (x * x)))) * exp((x * x))) / sqrt(pi); end
code[x_] := N[(N[(N[(N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.75 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(1.875 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(\frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|} + \frac{\frac{0.75}{\left|x\right|} + \frac{1.875}{x \cdot \left(x \cdot x\right)}}{\left(x \cdot x\right) \cdot \left(x \cdot x\right)}\right) \cdot e^{x \cdot x}}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
fabs-lowering-fabs.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-unmultN/A
metadata-evalN/A
Simplified100.0%
associate-*l/N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (/ (* (exp (* x x)) (+ (/ (+ 1.0 (/ 0.5 (* x x))) (fabs x)) (/ 0.75 (* x (* (* x x) (* x x)))))) (sqrt PI)))
double code(double x) {
return (exp((x * x)) * (((1.0 + (0.5 / (x * x))) / fabs(x)) + (0.75 / (x * ((x * x) * (x * x)))))) / sqrt(((double) M_PI));
}
public static double code(double x) {
return (Math.exp((x * x)) * (((1.0 + (0.5 / (x * x))) / Math.abs(x)) + (0.75 / (x * ((x * x) * (x * x)))))) / Math.sqrt(Math.PI);
}
def code(x): return (math.exp((x * x)) * (((1.0 + (0.5 / (x * x))) / math.fabs(x)) + (0.75 / (x * ((x * x) * (x * x)))))) / math.sqrt(math.pi)
function code(x) return Float64(Float64(exp(Float64(x * x)) * Float64(Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x)) + Float64(0.75 / Float64(x * Float64(Float64(x * x) * Float64(x * x)))))) / sqrt(pi)) end
function tmp = code(x) tmp = (exp((x * x)) * (((1.0 + (0.5 / (x * x))) / abs(x)) + (0.75 / (x * ((x * x) * (x * x)))))) / sqrt(pi); end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] * N[(N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.75 / N[(x * N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x} \cdot \left(\frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|} + \frac{0.75}{x \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right)}\right)}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
/-lowering-/.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
unpow2N/A
associate-*l*N/A
unpow2N/A
cube-unmultN/A
metadata-evalN/A
pow-plusN/A
*-lowering-*.f64N/A
pow-plusN/A
metadata-evalN/A
cube-unmultN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f6499.8
Simplified99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (/ (* (exp (* x x)) (+ (/ 1.0 (fabs x)) (/ 0.5 (* x (* x x))))) (sqrt PI)))
double code(double x) {
return (exp((x * x)) * ((1.0 / fabs(x)) + (0.5 / (x * (x * x))))) / sqrt(((double) M_PI));
}
public static double code(double x) {
return (Math.exp((x * x)) * ((1.0 / Math.abs(x)) + (0.5 / (x * (x * x))))) / Math.sqrt(Math.PI);
}
def code(x): return (math.exp((x * x)) * ((1.0 / math.fabs(x)) + (0.5 / (x * (x * x))))) / math.sqrt(math.pi)
function code(x) return Float64(Float64(exp(Float64(x * x)) * Float64(Float64(1.0 / abs(x)) + Float64(0.5 / Float64(x * Float64(x * x))))) / sqrt(pi)) end
function tmp = code(x) tmp = (exp((x * x)) * ((1.0 / abs(x)) + (0.5 / (x * (x * x))))) / sqrt(pi); end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] * N[(N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.5 / N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x} \cdot \left(\frac{1}{\left|x\right|} + \frac{0.5}{x \cdot \left(x \cdot x\right)}\right)}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
associate-*l*N/A
associate-*l*N/A
pow3N/A
frac-timesN/A
metadata-evalN/A
sqr-absN/A
cube-divN/A
metadata-evalN/A
cube-unmultN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64100.0
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-/l/N/A
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-inversesN/A
associate-/r*N/A
sqr-absN/A
unpow2N/A
times-fracN/A
pow-sqrN/A
metadata-evalN/A
*-rgt-identityN/A
times-fracN/A
metadata-evalN/A
metadata-evalN/A
distribute-rgt-outN/A
+-lowering-+.f64N/A
Simplified99.8%
associate-*l/N/A
associate-*l/N/A
/-lowering-/.f64N/A
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (/ (exp (* x x)) (* (sqrt PI) (fabs x))))
double code(double x) {
return exp((x * x)) / (sqrt(((double) M_PI)) * fabs(x));
}
public static double code(double x) {
return Math.exp((x * x)) / (Math.sqrt(Math.PI) * Math.abs(x));
}
def code(x): return math.exp((x * x)) / (math.sqrt(math.pi) * math.fabs(x))
function code(x) return Float64(exp(Float64(x * x)) / Float64(sqrt(pi) * abs(x))) end
function tmp = code(x) tmp = exp((x * x)) / (sqrt(pi) * abs(x)); end
code[x_] := N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[(N[Sqrt[Pi], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\sqrt{\pi} \cdot \left|x\right|}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
sqr-absN/A
clear-numN/A
un-div-invN/A
*-lft-identityN/A
/-lowering-/.f64N/A
*-lft-identityN/A
sqr-absN/A
exp-lowering-exp.f64N/A
*-lowering-*.f64N/A
sqrt-divN/A
metadata-evalN/A
associate-/r/N/A
/-rgt-identityN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f6499.7
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x)
:precision binary64
(let* ((t_0 (fma 0.5 (* x x) 1.0)))
(if (<= (fabs x) 1e+61)
(/
(fma (* x x) (* t_0 (* (* x x) t_0)) -1.0)
(* (* (sqrt PI) (fabs x)) (fma (* x x) t_0 -1.0)))
(*
(* (* x x) (* x x))
(* (sqrt (/ 1.0 PI)) (* (fabs x) 0.16666666666666666))))))
double code(double x) {
double t_0 = fma(0.5, (x * x), 1.0);
double tmp;
if (fabs(x) <= 1e+61) {
tmp = fma((x * x), (t_0 * ((x * x) * t_0)), -1.0) / ((sqrt(((double) M_PI)) * fabs(x)) * fma((x * x), t_0, -1.0));
} else {
tmp = ((x * x) * (x * x)) * (sqrt((1.0 / ((double) M_PI))) * (fabs(x) * 0.16666666666666666));
}
return tmp;
}
function code(x) t_0 = fma(0.5, Float64(x * x), 1.0) tmp = 0.0 if (abs(x) <= 1e+61) tmp = Float64(fma(Float64(x * x), Float64(t_0 * Float64(Float64(x * x) * t_0)), -1.0) / Float64(Float64(sqrt(pi) * abs(x)) * fma(Float64(x * x), t_0, -1.0))); else tmp = Float64(Float64(Float64(x * x) * Float64(x * x)) * Float64(sqrt(Float64(1.0 / pi)) * Float64(abs(x) * 0.16666666666666666))); end return tmp end
code[x_] := Block[{t$95$0 = N[(0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 1e+61], N[(N[(N[(x * x), $MachinePrecision] * N[(t$95$0 * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(N[(N[Sqrt[Pi], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * t$95$0 + -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(0.5, x \cdot x, 1\right)\\
\mathbf{if}\;\left|x\right| \leq 10^{+61}:\\
\;\;\;\;\frac{\mathsf{fma}\left(x \cdot x, t\_0 \cdot \left(\left(x \cdot x\right) \cdot t\_0\right), -1\right)}{\left(\sqrt{\pi} \cdot \left|x\right|\right) \cdot \mathsf{fma}\left(x \cdot x, t\_0, -1\right)}\\
\mathbf{else}:\\
\;\;\;\;\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| \cdot 0.16666666666666666\right)\right)\\
\end{array}
\end{array}
if (fabs.f64 x) < 9.99999999999999949e60Initial program 99.9%
Applied egg-rr99.9%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6498.7
Simplified98.7%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f644.1
Simplified4.1%
flip-+N/A
clear-numN/A
frac-timesN/A
/-lowering-/.f64N/A
Applied egg-rr43.7%
if 9.99999999999999949e60 < (fabs.f64 x) Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f64100.0
Simplified100.0%
Taylor expanded in x around 0
Simplified100.0%
Taylor expanded in x around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64100.0
Simplified100.0%
Final simplification88.3%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma x (* x (fma (* x x) 0.16666666666666666 0.5)) 1.0) 1.0) (/ (sqrt (/ 1.0 PI)) (fabs x))))
double code(double x) {
return fma((x * x), fma(x, (x * fma((x * x), 0.16666666666666666, 0.5)), 1.0), 1.0) * (sqrt((1.0 / ((double) M_PI))) / fabs(x));
}
function code(x) return Float64(fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), 0.16666666666666666, 0.5)), 1.0), 1.0) * Float64(sqrt(Float64(1.0 / pi)) / abs(x))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * 0.16666666666666666 + 0.5), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision] * N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, 0.16666666666666666, 0.5\right), 1\right), 1\right) \cdot \frac{\sqrt{\frac{1}{\pi}}}{\left|x\right|}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6484.3
Simplified84.3%
(FPCore (x) :precision binary64 (* (* (* x x) (* x x)) (* (sqrt (/ 1.0 PI)) (fma (fabs x) 0.16666666666666666 (/ 0.5 (fabs x))))))
double code(double x) {
return ((x * x) * (x * x)) * (sqrt((1.0 / ((double) M_PI))) * fma(fabs(x), 0.16666666666666666, (0.5 / fabs(x))));
}
function code(x) return Float64(Float64(Float64(x * x) * Float64(x * x)) * Float64(sqrt(Float64(1.0 / pi)) * fma(abs(x), 0.16666666666666666, Float64(0.5 / abs(x))))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * 0.16666666666666666 + N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(\sqrt{\frac{1}{\pi}} \cdot \mathsf{fma}\left(\left|x\right|, 0.16666666666666666, \frac{0.5}{\left|x\right|}\right)\right)
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
Simplified80.5%
Taylor expanded in x around inf
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*r*N/A
associate-*r*N/A
distribute-rgt-outN/A
*-commutativeN/A
*-lowering-*.f64N/A
Simplified80.5%
(FPCore (x) :precision binary64 (* (* (* x x) (* x x)) (* (sqrt (/ 1.0 PI)) (* (fabs x) 0.16666666666666666))))
double code(double x) {
return ((x * x) * (x * x)) * (sqrt((1.0 / ((double) M_PI))) * (fabs(x) * 0.16666666666666666));
}
public static double code(double x) {
return ((x * x) * (x * x)) * (Math.sqrt((1.0 / Math.PI)) * (Math.abs(x) * 0.16666666666666666));
}
def code(x): return ((x * x) * (x * x)) * (math.sqrt((1.0 / math.pi)) * (math.fabs(x) * 0.16666666666666666))
function code(x) return Float64(Float64(Float64(x * x) * Float64(x * x)) * Float64(sqrt(Float64(1.0 / pi)) * Float64(abs(x) * 0.16666666666666666))) end
function tmp = code(x) tmp = ((x * x) * (x * x)) * (sqrt((1.0 / pi)) * (abs(x) * 0.16666666666666666)); end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot \left(\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| \cdot 0.16666666666666666\right)\right)
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
Simplified80.5%
Taylor expanded in x around inf
*-commutativeN/A
associate-*l*N/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
metadata-evalN/A
pow-sqrN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
*-commutativeN/A
associate-*l*N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f6480.5
Simplified80.5%
(FPCore (x) :precision binary64 (/ (fma (* x x) (fma 0.5 (* x x) 1.0) 1.0) (* (sqrt PI) (fabs x))))
double code(double x) {
return fma((x * x), fma(0.5, (x * x), 1.0), 1.0) / (sqrt(((double) M_PI)) * fabs(x));
}
function code(x) return Float64(fma(Float64(x * x), fma(0.5, Float64(x * x), 1.0), 1.0) / Float64(sqrt(pi) * abs(x))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision] / N[(N[Sqrt[Pi], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(0.5, x \cdot x, 1\right), 1\right)}{\sqrt{\pi} \cdot \left|x\right|}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6474.6
Simplified74.6%
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
sqrt-divN/A
metadata-evalN/A
associate-/r/N/A
/-rgt-identityN/A
*-lowering-*.f64N/A
fabs-lowering-fabs.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f6474.6
Applied egg-rr74.6%
Final simplification74.6%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (* (fabs x) (fma 0.5 (* x x) 1.0))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fabs(x) * fma(0.5, (x * x), 1.0));
}
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(abs(x) * fma(0.5, Float64(x * x), 1.0))) end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * N[(0.5 * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| \cdot \mathsf{fma}\left(0.5, x \cdot x, 1\right)\right)
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6474.6
Simplified74.6%
Taylor expanded in x around inf
Simplified68.1%
Final simplification68.1%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (* (fabs x) (* 0.5 (* x x)))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fabs(x) * (0.5 * (x * x)));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.abs(x) * (0.5 * (x * x)));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.fabs(x) * (0.5 * (x * x)))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(abs(x) * Float64(0.5 * Float64(x * x)))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * (abs(x) * (0.5 * (x * x))); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * N[(0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| \cdot \left(0.5 \cdot \left(x \cdot x\right)\right)\right)
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f6474.6
Simplified74.6%
Taylor expanded in x around inf
associate-*r*N/A
*-commutativeN/A
associate-*r/N/A
metadata-evalN/A
pow-sqrN/A
associate-*l*N/A
associate-*l/N/A
associate-*r/N/A
*-commutativeN/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
*-commutativeN/A
associate-*r/N/A
Simplified68.1%
(FPCore (x) :precision binary64 (/ 1.0 (* (sqrt PI) (fabs x))))
double code(double x) {
return 1.0 / (sqrt(((double) M_PI)) * fabs(x));
}
public static double code(double x) {
return 1.0 / (Math.sqrt(Math.PI) * Math.abs(x));
}
def code(x): return 1.0 / (math.sqrt(math.pi) * math.fabs(x))
function code(x) return Float64(1.0 / Float64(sqrt(pi) * abs(x))) end
function tmp = code(x) tmp = 1.0 / (sqrt(pi) * abs(x)); end
code[x_] := N[(1.0 / N[(N[Sqrt[Pi], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\sqrt{\pi} \cdot \left|x\right|}
\end{array}
Initial program 100.0%
Applied egg-rr100.0%
Taylor expanded in x around inf
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
*-lowering-*.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
exp-lowering-exp.f64N/A
unpow2N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
*-inversesN/A
/-lowering-/.f64N/A
*-inversesN/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f6499.7
Simplified99.7%
Taylor expanded in x around 0
associate-*r/N/A
*-rgt-identityN/A
/-lowering-/.f64N/A
sqrt-lowering-sqrt.f64N/A
/-lowering-/.f64N/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f642.3
Simplified2.3%
div-invN/A
sqrt-divN/A
metadata-evalN/A
frac-timesN/A
metadata-evalN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
sqrt-lowering-sqrt.f64N/A
PI-lowering-PI.f64N/A
fabs-lowering-fabs.f642.3
Applied egg-rr2.3%
herbie shell --seed 2024194
(FPCore (x)
:name "Jmat.Real.erfi, branch x greater than or equal to 5"
:precision binary64
:pre (>= x 0.5)
(* (* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x)))) (+ (+ (+ (/ 1.0 (fabs x)) (* (/ 1.0 2.0) (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 3.0 4.0) (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 15.0 8.0) (* (* (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x)))))))