
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x))))
(*
(* (sqrt (/ 1.0 PI)) (/ (pow (exp x) x) (fabs x)))
(+
(/ (+ (/ 0.5 (fabs x)) (/ 0.75 t_0)) (fabs x))
(+ 1.0 (/ 1.875 (* (* x x) (* x t_0))))))))
double code(double x) {
double t_0 = x * (x * x);
return (sqrt((1.0 / ((double) M_PI))) * (pow(exp(x), x) / fabs(x))) * ((((0.5 / fabs(x)) + (0.75 / t_0)) / fabs(x)) + (1.0 + (1.875 / ((x * x) * (x * t_0)))));
}
public static double code(double x) {
double t_0 = x * (x * x);
return (Math.sqrt((1.0 / Math.PI)) * (Math.pow(Math.exp(x), x) / Math.abs(x))) * ((((0.5 / Math.abs(x)) + (0.75 / t_0)) / Math.abs(x)) + (1.0 + (1.875 / ((x * x) * (x * t_0)))));
}
def code(x): t_0 = x * (x * x) return (math.sqrt((1.0 / math.pi)) * (math.pow(math.exp(x), x) / math.fabs(x))) * ((((0.5 / math.fabs(x)) + (0.75 / t_0)) / math.fabs(x)) + (1.0 + (1.875 / ((x * x) * (x * t_0)))))
function code(x) t_0 = Float64(x * Float64(x * x)) return Float64(Float64(sqrt(Float64(1.0 / pi)) * Float64((exp(x) ^ x) / abs(x))) * Float64(Float64(Float64(Float64(0.5 / abs(x)) + Float64(0.75 / t_0)) / abs(x)) + Float64(1.0 + Float64(1.875 / Float64(Float64(x * x) * Float64(x * t_0)))))) end
function tmp = code(x) t_0 = x * (x * x); tmp = (sqrt((1.0 / pi)) * ((exp(x) ^ x) / abs(x))) * ((((0.5 / abs(x)) + (0.75 / t_0)) / abs(x)) + (1.0 + (1.875 / ((x * x) * (x * t_0))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.75 / t$95$0), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(1.0 + N[(1.875 / N[(N[(x * x), $MachinePrecision] * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
\left(\sqrt{\frac{1}{\pi}} \cdot \frac{{\left(e^{x}\right)}^{x}}{\left|x\right|}\right) \cdot \left(\frac{\frac{0.5}{\left|x\right|} + \frac{0.75}{t\_0}}{\left|x\right|} + \left(1 + \frac{1.875}{\left(x \cdot x\right) \cdot \left(x \cdot t\_0\right)}\right)\right)
\end{array}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
cube-unmultN/A
sqr-powN/A
fabs-sqrN/A
sqr-powN/A
cube-unmultN/A
lift-*.f64N/A
lift-*.f64N/A
lower-/.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x))))
(/
(*
(/ (exp (* x x)) (sqrt PI))
(+
1.0
(+
(/ (+ (/ 0.5 (fabs x)) (/ 0.75 t_0)) (fabs x))
(/ 1.875 (* (* x x) (* x t_0))))))
(fabs x))))
double code(double x) {
double t_0 = x * (x * x);
return ((exp((x * x)) / sqrt(((double) M_PI))) * (1.0 + ((((0.5 / fabs(x)) + (0.75 / t_0)) / fabs(x)) + (1.875 / ((x * x) * (x * t_0)))))) / fabs(x);
}
public static double code(double x) {
double t_0 = x * (x * x);
return ((Math.exp((x * x)) / Math.sqrt(Math.PI)) * (1.0 + ((((0.5 / Math.abs(x)) + (0.75 / t_0)) / Math.abs(x)) + (1.875 / ((x * x) * (x * t_0)))))) / Math.abs(x);
}
def code(x): t_0 = x * (x * x) return ((math.exp((x * x)) / math.sqrt(math.pi)) * (1.0 + ((((0.5 / math.fabs(x)) + (0.75 / t_0)) / math.fabs(x)) + (1.875 / ((x * x) * (x * t_0)))))) / math.fabs(x)
function code(x) t_0 = Float64(x * Float64(x * x)) return Float64(Float64(Float64(exp(Float64(x * x)) / sqrt(pi)) * Float64(1.0 + Float64(Float64(Float64(Float64(0.5 / abs(x)) + Float64(0.75 / t_0)) / abs(x)) + Float64(1.875 / Float64(Float64(x * x) * Float64(x * t_0)))))) / abs(x)) end
function tmp = code(x) t_0 = x * (x * x); tmp = ((exp((x * x)) / sqrt(pi)) * (1.0 + ((((0.5 / abs(x)) + (0.75 / t_0)) / abs(x)) + (1.875 / ((x * x) * (x * t_0)))))) / abs(x); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(N[(N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.75 / t$95$0), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(1.875 / N[(N[(x * x), $MachinePrecision] * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
\frac{\frac{e^{x \cdot x}}{\sqrt{\pi}} \cdot \left(1 + \left(\frac{\frac{0.5}{\left|x\right|} + \frac{0.75}{t\_0}}{\left|x\right|} + \frac{1.875}{\left(x \cdot x\right) \cdot \left(x \cdot t\_0\right)}\right)\right)}{\left|x\right|}
\end{array}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x))))
(*
(/ (exp (* x x)) (fabs (* x (sqrt PI))))
(+
(+ 1.0 (/ 1.875 (* (* x x) (* x t_0))))
(/ (+ (/ 0.5 (fabs x)) (/ 0.75 (fabs t_0))) (fabs x))))))
double code(double x) {
double t_0 = x * (x * x);
return (exp((x * x)) / fabs((x * sqrt(((double) M_PI))))) * ((1.0 + (1.875 / ((x * x) * (x * t_0)))) + (((0.5 / fabs(x)) + (0.75 / fabs(t_0))) / fabs(x)));
}
public static double code(double x) {
double t_0 = x * (x * x);
return (Math.exp((x * x)) / Math.abs((x * Math.sqrt(Math.PI)))) * ((1.0 + (1.875 / ((x * x) * (x * t_0)))) + (((0.5 / Math.abs(x)) + (0.75 / Math.abs(t_0))) / Math.abs(x)));
}
def code(x): t_0 = x * (x * x) return (math.exp((x * x)) / math.fabs((x * math.sqrt(math.pi)))) * ((1.0 + (1.875 / ((x * x) * (x * t_0)))) + (((0.5 / math.fabs(x)) + (0.75 / math.fabs(t_0))) / math.fabs(x)))
function code(x) t_0 = Float64(x * Float64(x * x)) return Float64(Float64(exp(Float64(x * x)) / abs(Float64(x * sqrt(pi)))) * Float64(Float64(1.0 + Float64(1.875 / Float64(Float64(x * x) * Float64(x * t_0)))) + Float64(Float64(Float64(0.5 / abs(x)) + Float64(0.75 / abs(t_0))) / abs(x)))) end
function tmp = code(x) t_0 = x * (x * x); tmp = (exp((x * x)) / abs((x * sqrt(pi)))) * ((1.0 + (1.875 / ((x * x) * (x * t_0)))) + (((0.5 / abs(x)) + (0.75 / abs(t_0))) / abs(x))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Abs[N[(x * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[(1.875 / N[(N[(x * x), $MachinePrecision] * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.5 / N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(0.75 / N[Abs[t$95$0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
\frac{e^{x \cdot x}}{\left|x \cdot \sqrt{\pi}\right|} \cdot \left(\left(1 + \frac{1.875}{\left(x \cdot x\right) \cdot \left(x \cdot t\_0\right)}\right) + \frac{\frac{0.5}{\left|x\right|} + \frac{0.75}{\left|t\_0\right|}}{\left|x\right|}\right)
\end{array}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
lift-PI.f64N/A
sqrt-divN/A
metadata-evalN/A
lift-sqrt.f64N/A
sqr-absN/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-exp.f64N/A
lift-fabs.f64N/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lift-*.f64N/A
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (* (/ 1.0 (sqrt PI)) (exp (fabs (* x x)))) (fma (/ 1.0 (fabs (* x (* x x)))) (+ 0.5 (/ 0.75 (* x x))) (/ 1.0 (fabs x)))))
double code(double x) {
return ((1.0 / sqrt(((double) M_PI))) * exp(fabs((x * x)))) * fma((1.0 / fabs((x * (x * x)))), (0.5 + (0.75 / (x * x))), (1.0 / fabs(x)));
}
function code(x) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(abs(Float64(x * x)))) * fma(Float64(1.0 / abs(Float64(x * Float64(x * x)))), Float64(0.5 + Float64(0.75 / Float64(x * x))), Float64(1.0 / abs(x)))) end
code[x_] := N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[Abs[N[(x * x), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 / N[Abs[N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(0.5 + N[(0.75 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x \cdot x\right|}\right) \cdot \mathsf{fma}\left(\frac{1}{\left|x \cdot \left(x \cdot x\right)\right|}, 0.5 + \frac{0.75}{x \cdot x}, \frac{1}{\left|x\right|}\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites98.6%
Final simplification98.6%
(FPCore (x) :precision binary64 (* (/ (exp (* x x)) (fabs (* x (sqrt PI)))) (fma (/ 1.0 (* x x)) (+ 0.5 (/ 0.75 (* x x))) 1.0)))
double code(double x) {
return (exp((x * x)) / fabs((x * sqrt(((double) M_PI))))) * fma((1.0 / (x * x)), (0.5 + (0.75 / (x * x))), 1.0);
}
function code(x) return Float64(Float64(exp(Float64(x * x)) / abs(Float64(x * sqrt(pi)))) * fma(Float64(1.0 / Float64(x * x)), Float64(0.5 + Float64(0.75 / Float64(x * x))), 1.0)) end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Abs[N[(x * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(0.5 + N[(0.75 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\left|x \cdot \sqrt{\pi}\right|} \cdot \mathsf{fma}\left(\frac{1}{x \cdot x}, 0.5 + \frac{0.75}{x \cdot x}, 1\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites98.6%
lift-PI.f64N/A
sqrt-divN/A
metadata-evalN/A
lift-sqrt.f64N/A
sqr-absN/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-exp.f64N/A
lift-fabs.f64N/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lift-*.f64N/A
Applied rewrites98.6%
Taylor expanded in x around inf
Applied rewrites98.6%
Final simplification98.6%
(FPCore (x) :precision binary64 (* (/ (exp (* x x)) (fabs (* x (sqrt PI)))) (+ 1.0 (/ 0.5 (* x x)))))
double code(double x) {
return (exp((x * x)) / fabs((x * sqrt(((double) M_PI))))) * (1.0 + (0.5 / (x * x)));
}
public static double code(double x) {
return (Math.exp((x * x)) / Math.abs((x * Math.sqrt(Math.PI)))) * (1.0 + (0.5 / (x * x)));
}
def code(x): return (math.exp((x * x)) / math.fabs((x * math.sqrt(math.pi)))) * (1.0 + (0.5 / (x * x)))
function code(x) return Float64(Float64(exp(Float64(x * x)) / abs(Float64(x * sqrt(pi)))) * Float64(1.0 + Float64(0.5 / Float64(x * x)))) end
function tmp = code(x) tmp = (exp((x * x)) / abs((x * sqrt(pi)))) * (1.0 + (0.5 / (x * x))); end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Abs[N[(x * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\left|x \cdot \sqrt{\pi}\right|} \cdot \left(1 + \frac{0.5}{x \cdot x}\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
associate-/r*N/A
associate-*r/N/A
*-rgt-identityN/A
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
lower-*.f64N/A
lower-+.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Applied rewrites98.5%
Final simplification98.5%
(FPCore (x) :precision binary64 (/ (exp (* x x)) (fabs (* x (sqrt PI)))))
double code(double x) {
return exp((x * x)) / fabs((x * sqrt(((double) M_PI))));
}
public static double code(double x) {
return Math.exp((x * x)) / Math.abs((x * Math.sqrt(Math.PI)));
}
def code(x): return math.exp((x * x)) / math.fabs((x * math.sqrt(math.pi)))
function code(x) return Float64(exp(Float64(x * x)) / abs(Float64(x * sqrt(pi)))) end
function tmp = code(x) tmp = exp((x * x)) / abs((x * sqrt(pi))); end
code[x_] := N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Abs[N[(x * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\left|x \cdot \sqrt{\pi}\right|}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
lift-PI.f64N/A
sqrt-divN/A
metadata-evalN/A
lift-sqrt.f64N/A
sqr-absN/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-exp.f64N/A
lift-fabs.f64N/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lift-*.f64N/A
Applied rewrites98.5%
Final simplification98.5%
(FPCore (x) :precision binary64 (* (/ (fma x (fma x (* x (* x (fma x (* x 0.16666666666666666) 0.5))) x) 1.0) (fabs (* x (sqrt PI)))) (+ (+ 1.0 (/ 0.5 (* x x))) (/ 0.75 (* x (* x (* x x)))))))
double code(double x) {
return (fma(x, fma(x, (x * (x * fma(x, (x * 0.16666666666666666), 0.5))), x), 1.0) / fabs((x * sqrt(((double) M_PI))))) * ((1.0 + (0.5 / (x * x))) + (0.75 / (x * (x * (x * x)))));
}
function code(x) return Float64(Float64(fma(x, fma(x, Float64(x * Float64(x * fma(x, Float64(x * 0.16666666666666666), 0.5))), x), 1.0) / abs(Float64(x * sqrt(pi)))) * Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) + Float64(0.75 / Float64(x * Float64(x * Float64(x * x)))))) end
code[x_] := N[(N[(N[(x * N[(x * N[(x * N[(x * N[(x * N[(x * 0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision] + 1.0), $MachinePrecision] / N[Abs[N[(x * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.75 / N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(x, \mathsf{fma}\left(x, x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot 0.16666666666666666, 0.5\right)\right), x\right), 1\right)}{\left|x \cdot \sqrt{\pi}\right|} \cdot \left(\left(1 + \frac{0.5}{x \cdot x}\right) + \frac{0.75}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites98.6%
lift-PI.f64N/A
sqrt-divN/A
metadata-evalN/A
lift-sqrt.f64N/A
sqr-absN/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-exp.f64N/A
lift-fabs.f64N/A
frac-timesN/A
*-lft-identityN/A
lower-/.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lift-*.f64N/A
Applied rewrites98.6%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites83.8%
Final simplification83.8%
(FPCore (x) :precision binary64 (* (fma (* x x) (fma (* x x) (fma x (* x 0.16666666666666666) 0.5) 1.0) 1.0) (/ 1.0 (* (fabs x) (sqrt PI)))))
double code(double x) {
return fma((x * x), fma((x * x), fma(x, (x * 0.16666666666666666), 0.5), 1.0), 1.0) * (1.0 / (fabs(x) * sqrt(((double) M_PI))));
}
function code(x) return Float64(fma(Float64(x * x), fma(Float64(x * x), fma(x, Float64(x * 0.16666666666666666), 0.5), 1.0), 1.0) * Float64(1.0 / Float64(abs(x) * sqrt(pi)))) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * 0.16666666666666666), $MachinePrecision] + 0.5), $MachinePrecision] + 1.0), $MachinePrecision] + 1.0), $MachinePrecision] * N[(1.0 / N[(N[Abs[x], $MachinePrecision] * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot 0.16666666666666666, 0.5\right), 1\right), 1\right) \cdot \frac{1}{\left|x\right| \cdot \sqrt{\pi}}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6483.8
Applied rewrites83.8%
Applied rewrites83.8%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (fma x (fma x (* 0.5 (* x x)) x) 1.0) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fma(x, fma(x, (0.5 * (x * x)), x), 1.0) / fabs(x));
}
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(fma(x, fma(x, Float64(0.5 * Float64(x * x)), x), 1.0) / abs(x))) end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * N[(x * N[(0.5 * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision] + 1.0), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{\mathsf{fma}\left(x, \mathsf{fma}\left(x, 0.5 \cdot \left(x \cdot x\right), x\right), 1\right)}{\left|x\right|}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6477.0
Applied rewrites77.0%
Final simplification77.0%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (fma (fabs x) (fma (* x x) 0.5 1.0) (/ 1.0 (fabs x)))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * fma(fabs(x), fma((x * x), 0.5, 1.0), (1.0 / fabs(x)));
}
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * fma(abs(x), fma(Float64(x * x), 0.5, 1.0), Float64(1.0 / abs(x)))) end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.5 + 1.0), $MachinePrecision] + N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \mathsf{fma}\left(\left|x\right|, \mathsf{fma}\left(x \cdot x, 0.5, 1\right), \frac{1}{\left|x\right|}\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites70.9%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (fma x x 1.0) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fma(x, x, 1.0) / fabs(x));
}
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(fma(x, x, 1.0) / abs(x))) end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x + 1.0), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{\mathsf{fma}\left(x, x, 1\right)}{\left|x\right|}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
+-commutativeN/A
unpow2N/A
lower-fma.f6454.4
Applied rewrites54.4%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (+ (fabs x) (/ 1.0 (fabs x)))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fabs(x) + (1.0 / fabs(x)));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.abs(x) + (1.0 / Math.abs(x)));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.fabs(x) + (1.0 / math.fabs(x)))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(abs(x) + Float64(1.0 / abs(x)))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * (abs(x) + (1.0 / abs(x))); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Abs[x], $MachinePrecision] + N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \left(\left|x\right| + \frac{1}{\left|x\right|}\right)
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
*-commutativeN/A
distribute-lft-outN/A
+-commutativeN/A
*-lft-identityN/A
associate-*l/N/A
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
associate-*l/N/A
*-lft-identityN/A
Applied rewrites5.6%
(FPCore (x) :precision binary64 (/ 1.0 (* (fabs x) (sqrt PI))))
double code(double x) {
return 1.0 / (fabs(x) * sqrt(((double) M_PI)));
}
public static double code(double x) {
return 1.0 / (Math.abs(x) * Math.sqrt(Math.PI));
}
def code(x): return 1.0 / (math.fabs(x) * math.sqrt(math.pi))
function code(x) return Float64(1.0 / Float64(abs(x) * sqrt(pi))) end
function tmp = code(x) tmp = 1.0 / (abs(x) * sqrt(pi)); end
code[x_] := N[(1.0 / N[(N[Abs[x], $MachinePrecision] * N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left|x\right| \cdot \sqrt{\pi}}
\end{array}
Initial program 100.0%
Applied rewrites100.0%
Taylor expanded in x around inf
lower-*.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-/.f64N/A
unpow2N/A
sqr-absN/A
unpow2N/A
lower-exp.f64N/A
unpow2N/A
lower-*.f64N/A
lower-fabs.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
associate-*r/N/A
*-rgt-identityN/A
lower-/.f64N/A
lower-sqrt.f64N/A
lower-/.f64N/A
lower-PI.f64N/A
lower-fabs.f642.4
Applied rewrites2.4%
lift-PI.f64N/A
sqrt-divN/A
metadata-evalN/A
lift-sqrt.f64N/A
lift-fabs.f64N/A
associate-/l/N/A
lift-fabs.f64N/A
rem-square-sqrtN/A
sqrt-prodN/A
rem-sqrt-squareN/A
fabs-mulN/A
*-commutativeN/A
lift-*.f64N/A
lift-fabs.f64N/A
lower-/.f642.4
lift-fabs.f64N/A
lift-*.f64N/A
*-commutativeN/A
fabs-mulN/A
lift-fabs.f64N/A
rem-sqrt-squareN/A
sqrt-prodN/A
Applied rewrites2.4%
herbie shell --seed 2024214
(FPCore (x)
:name "Jmat.Real.erfi, branch x greater than or equal to 5"
:precision binary64
:pre (>= x 0.5)
(* (* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x)))) (+ (+ (+ (/ 1.0 (fabs x)) (* (/ 1.0 2.0) (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 3.0 4.0) (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 15.0 8.0) (* (* (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x)))))))