
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (exp (pow x -3.0))))
(*
(/ (pow (exp x) x) (sqrt PI))
(fma
0.5
(+ (log (cbrt (pow t_0 2.0))) (log (cbrt t_0)))
(*
(/ 1.0 (fabs x))
(+ 1.0 (+ (* 1.875 (pow x -6.0)) (* 0.75 (pow x -4.0)))))))))
double code(double x) {
double t_0 = exp(pow(x, -3.0));
return (pow(exp(x), x) / sqrt(((double) M_PI))) * fma(0.5, (log(cbrt(pow(t_0, 2.0))) + log(cbrt(t_0))), ((1.0 / fabs(x)) * (1.0 + ((1.875 * pow(x, -6.0)) + (0.75 * pow(x, -4.0))))));
}
function code(x) t_0 = exp((x ^ -3.0)) return Float64(Float64((exp(x) ^ x) / sqrt(pi)) * fma(0.5, Float64(log(cbrt((t_0 ^ 2.0))) + log(cbrt(t_0))), Float64(Float64(1.0 / abs(x)) * Float64(1.0 + Float64(Float64(1.875 * (x ^ -6.0)) + Float64(0.75 * (x ^ -4.0))))))) end
code[x_] := Block[{t$95$0 = N[Exp[N[Power[x, -3.0], $MachinePrecision]], $MachinePrecision]}, N[(N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(0.5 * N[(N[Log[N[Power[N[Power[t$95$0, 2.0], $MachinePrecision], 1/3], $MachinePrecision]], $MachinePrecision] + N[Log[N[Power[t$95$0, 1/3], $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(1.875 * N[Power[x, -6.0], $MachinePrecision]), $MachinePrecision] + N[(0.75 * N[Power[x, -4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{{x}^{-3}}\\
\frac{{\left(e^{x}\right)}^{x}}{\sqrt{\pi}} \cdot \mathsf{fma}\left(0.5, \log \left(\sqrt[3]{{t\_0}^{2}}\right) + \log \left(\sqrt[3]{t\_0}\right), \frac{1}{\left|x\right|} \cdot \left(1 + \left(1.875 \cdot {x}^{-6} + 0.75 \cdot {x}^{-4}\right)\right)\right)
\end{array}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
add-cube-cbrt100.0%
log-prod100.0%
Applied egg-rr100.0%
fma-undefine100.0%
+-commutative100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0 100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
pow-flip100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (log (cbrt (exp (pow x -3.0))))))
(*
(/ (pow (exp x) x) (sqrt PI))
(fma
0.5
(+ t_0 (* 2.0 t_0))
(*
(/ 1.0 (fabs x))
(+ 1.0 (+ (* 1.875 (pow x -6.0)) (* 0.75 (pow x -4.0)))))))))
double code(double x) {
double t_0 = log(cbrt(exp(pow(x, -3.0))));
return (pow(exp(x), x) / sqrt(((double) M_PI))) * fma(0.5, (t_0 + (2.0 * t_0)), ((1.0 / fabs(x)) * (1.0 + ((1.875 * pow(x, -6.0)) + (0.75 * pow(x, -4.0))))));
}
function code(x) t_0 = log(cbrt(exp((x ^ -3.0)))) return Float64(Float64((exp(x) ^ x) / sqrt(pi)) * fma(0.5, Float64(t_0 + Float64(2.0 * t_0)), Float64(Float64(1.0 / abs(x)) * Float64(1.0 + Float64(Float64(1.875 * (x ^ -6.0)) + Float64(0.75 * (x ^ -4.0))))))) end
code[x_] := Block[{t$95$0 = N[Log[N[Power[N[Exp[N[Power[x, -3.0], $MachinePrecision]], $MachinePrecision], 1/3], $MachinePrecision]], $MachinePrecision]}, N[(N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(0.5 * N[(t$95$0 + N[(2.0 * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[(N[(1.875 * N[Power[x, -6.0], $MachinePrecision]), $MachinePrecision] + N[(0.75 * N[Power[x, -4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \log \left(\sqrt[3]{e^{{x}^{-3}}}\right)\\
\frac{{\left(e^{x}\right)}^{x}}{\sqrt{\pi}} \cdot \mathsf{fma}\left(0.5, t\_0 + 2 \cdot t\_0, \frac{1}{\left|x\right|} \cdot \left(1 + \left(1.875 \cdot {x}^{-6} + 0.75 \cdot {x}^{-4}\right)\right)\right)
\end{array}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
add-cube-cbrt100.0%
log-prod100.0%
Applied egg-rr100.0%
fma-undefine100.0%
+-commutative100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
Taylor expanded in x around 0 100.0%
add-sqr-sqrt100.0%
log-prod100.0%
Applied egg-rr100.0%
count-2100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(*
(/ (pow (exp x) x) (cbrt (pow PI 1.5)))
(fma
0.5
(pow x -3.0)
(*
(/ 1.0 (fabs x))
(+ 1.0 (log (exp (fma 0.75 (pow x -4.0) (* 1.875 (pow x -6.0))))))))))
double code(double x) {
return (pow(exp(x), x) / cbrt(pow(((double) M_PI), 1.5))) * fma(0.5, pow(x, -3.0), ((1.0 / fabs(x)) * (1.0 + log(exp(fma(0.75, pow(x, -4.0), (1.875 * pow(x, -6.0))))))));
}
function code(x) return Float64(Float64((exp(x) ^ x) / cbrt((pi ^ 1.5))) * fma(0.5, (x ^ -3.0), Float64(Float64(1.0 / abs(x)) * Float64(1.0 + log(exp(fma(0.75, (x ^ -4.0), Float64(1.875 * (x ^ -6.0))))))))) end
code[x_] := N[(N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Power[N[Power[Pi, 1.5], $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision] * N[(0.5 * N[Power[x, -3.0], $MachinePrecision] + N[(N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + N[Log[N[Exp[N[(0.75 * N[Power[x, -4.0], $MachinePrecision] + N[(1.875 * N[Power[x, -6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\left(e^{x}\right)}^{x}}{\sqrt[3]{{\pi}^{1.5}}} \cdot \mathsf{fma}\left(0.5, {x}^{-3}, \frac{1}{\left|x\right|} \cdot \left(1 + \log \left(e^{\mathsf{fma}\left(0.75, {x}^{-4}, 1.875 \cdot {x}^{-6}\right)}\right)\right)\right)
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
pow-flip100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
add-cbrt-cube100.0%
pow1/3100.0%
add-sqr-sqrt100.0%
pow1100.0%
pow1/2100.0%
pow-prod-up100.0%
metadata-eval100.0%
Applied egg-rr100.0%
unpow1/3100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (/ (pow (exp x) x) (cbrt (pow PI 1.5))) (fma 0.75 (pow x -5.0) (fma 1.875 (pow x -7.0) (/ (+ 1.0 (/ 0.5 (* x x))) (fabs x))))))
double code(double x) {
return (pow(exp(x), x) / cbrt(pow(((double) M_PI), 1.5))) * fma(0.75, pow(x, -5.0), fma(1.875, pow(x, -7.0), ((1.0 + (0.5 / (x * x))) / fabs(x))));
}
function code(x) return Float64(Float64((exp(x) ^ x) / cbrt((pi ^ 1.5))) * fma(0.75, (x ^ -5.0), fma(1.875, (x ^ -7.0), Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x))))) end
code[x_] := N[(N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Power[N[Power[Pi, 1.5], $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision] * N[(0.75 * N[Power[x, -5.0], $MachinePrecision] + N[(1.875 * N[Power[x, -7.0], $MachinePrecision] + N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\left(e^{x}\right)}^{x}}{\sqrt[3]{{\pi}^{1.5}}} \cdot \mathsf{fma}\left(0.75, {x}^{-5}, \mathsf{fma}\left(1.875, {x}^{-7}, \frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|}\right)\right)
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
exp-prod100.0%
Applied egg-rr100.0%
add-cbrt-cube100.0%
pow1/3100.0%
add-sqr-sqrt100.0%
pow1100.0%
pow1/2100.0%
pow-prod-up100.0%
metadata-eval100.0%
Applied egg-rr100.0%
unpow1/3100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (/ (pow (exp x) x) (sqrt PI)) (fma 0.75 (pow x -5.0) (fma 1.875 (pow x -7.0) (/ (+ 1.0 (/ 0.5 (* x x))) (fabs x))))))
double code(double x) {
return (pow(exp(x), x) / sqrt(((double) M_PI))) * fma(0.75, pow(x, -5.0), fma(1.875, pow(x, -7.0), ((1.0 + (0.5 / (x * x))) / fabs(x))));
}
function code(x) return Float64(Float64((exp(x) ^ x) / sqrt(pi)) * fma(0.75, (x ^ -5.0), fma(1.875, (x ^ -7.0), Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x))))) end
code[x_] := N[(N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(0.75 * N[Power[x, -5.0], $MachinePrecision] + N[(1.875 * N[Power[x, -7.0], $MachinePrecision] + N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{{\left(e^{x}\right)}^{x}}{\sqrt{\pi}} \cdot \mathsf{fma}\left(0.75, {x}^{-5}, \mathsf{fma}\left(1.875, {x}^{-7}, \frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|}\right)\right)
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
exp-prod100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (fma 0.75 (pow x -5.0) (fma 1.875 (pow x -7.0) (/ (+ 1.0 (/ 0.5 (* x x))) (fabs x)))) (/ (exp (* x x)) (sqrt PI))))
double code(double x) {
return fma(0.75, pow(x, -5.0), fma(1.875, pow(x, -7.0), ((1.0 + (0.5 / (x * x))) / fabs(x)))) * (exp((x * x)) / sqrt(((double) M_PI)));
}
function code(x) return Float64(fma(0.75, (x ^ -5.0), fma(1.875, (x ^ -7.0), Float64(Float64(1.0 + Float64(0.5 / Float64(x * x))) / abs(x)))) * Float64(exp(Float64(x * x)) / sqrt(pi))) end
code[x_] := N[(N[(0.75 * N[Power[x, -5.0], $MachinePrecision] + N[(1.875 * N[Power[x, -7.0], $MachinePrecision] + N[(N[(1.0 + N[(0.5 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.75, {x}^{-5}, \mathsf{fma}\left(1.875, {x}^{-7}, \frac{1 + \frac{0.5}{x \cdot x}}{\left|x\right|}\right)\right) \cdot \frac{e^{x \cdot x}}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (pow (pow (exp x) (sqrt x)) (sqrt x)) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (pow(pow(exp(x), sqrt(x)), sqrt(x)) / fabs(x));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.pow(Math.pow(Math.exp(x), Math.sqrt(x)), Math.sqrt(x)) / Math.abs(x));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.pow(math.pow(math.exp(x), math.sqrt(x)), math.sqrt(x)) / math.fabs(x))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(((exp(x) ^ sqrt(x)) ^ sqrt(x)) / abs(x))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * (((exp(x) ^ sqrt(x)) ^ sqrt(x)) / abs(x)); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Power[N[Power[N[Exp[x], $MachinePrecision], N[Sqrt[x], $MachinePrecision]], $MachinePrecision], N[Sqrt[x], $MachinePrecision]], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{{\left({\left(e^{x}\right)}^{\left(\sqrt{x}\right)}\right)}^{\left(\sqrt{x}\right)}}{\left|x\right|}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
pow299.4%
pow-exp99.4%
add-sqr-sqrt99.4%
pow-unpow99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (pow (sqrt (exp x)) (* x 2.0)) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (pow(sqrt(exp(x)), (x * 2.0)) / fabs(x));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.pow(Math.sqrt(Math.exp(x)), (x * 2.0)) / Math.abs(x));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.pow(math.sqrt(math.exp(x)), (x * 2.0)) / math.fabs(x))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64((sqrt(exp(x)) ^ Float64(x * 2.0)) / abs(x))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * ((sqrt(exp(x)) ^ (x * 2.0)) / abs(x)); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Power[N[Sqrt[N[Exp[x], $MachinePrecision]], $MachinePrecision], N[(x * 2.0), $MachinePrecision]], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{{\left(\sqrt{e^{x}}\right)}^{\left(x \cdot 2\right)}}{\left|x\right|}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
pow299.4%
pow-exp99.4%
add-sqr-sqrt99.4%
unpow-prod-down99.4%
Applied egg-rr99.4%
pow-sqr99.4%
*-commutative99.4%
Simplified99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (exp (pow x 2.0)) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (exp(pow(x, 2.0)) / fabs(x));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.exp(Math.pow(x, 2.0)) / Math.abs(x));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.exp(math.pow(x, 2.0)) / math.fabs(x))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(exp((x ^ 2.0)) / abs(x))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * (exp((x ^ 2.0)) / abs(x)); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Exp[N[Power[x, 2.0], $MachinePrecision]], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{e^{{x}^{2}}}{\left|x\right|}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (pow (exp x) x) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (pow(exp(x), x) / fabs(x));
}
public static double code(double x) {
return Math.sqrt((1.0 / Math.PI)) * (Math.pow(Math.exp(x), x) / Math.abs(x));
}
def code(x): return math.sqrt((1.0 / math.pi)) * (math.pow(math.exp(x), x) / math.fabs(x))
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64((exp(x) ^ x) / abs(x))) end
function tmp = code(x) tmp = sqrt((1.0 / pi)) * ((exp(x) ^ x) / abs(x)); end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[Power[N[Exp[x], $MachinePrecision], x], $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{{\left(e^{x}\right)}^{x}}{\left|x\right|}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
pow299.4%
pow-exp99.4%
Applied egg-rr99.4%
Final simplification99.4%
(FPCore (x) :precision binary64 (* (sqrt (/ 1.0 PI)) (/ (fma x x 1.0) (fabs x))))
double code(double x) {
return sqrt((1.0 / ((double) M_PI))) * (fma(x, x, 1.0) / fabs(x));
}
function code(x) return Float64(sqrt(Float64(1.0 / pi)) * Float64(fma(x, x, 1.0) / abs(x))) end
code[x_] := N[(N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision] * N[(N[(x * x + 1.0), $MachinePrecision] / N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{1}{\pi}} \cdot \frac{\mathsf{fma}\left(x, x, 1\right)}{\left|x\right|}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
Taylor expanded in x around 0 51.3%
+-commutative51.3%
unpow251.3%
fma-define51.3%
Simplified51.3%
Final simplification51.3%
(FPCore (x) :precision binary64 (* (/ 1.0 (fabs x)) (sqrt (/ 1.0 PI))))
double code(double x) {
return (1.0 / fabs(x)) * sqrt((1.0 / ((double) M_PI)));
}
public static double code(double x) {
return (1.0 / Math.abs(x)) * Math.sqrt((1.0 / Math.PI));
}
def code(x): return (1.0 / math.fabs(x)) * math.sqrt((1.0 / math.pi))
function code(x) return Float64(Float64(1.0 / abs(x)) * sqrt(Float64(1.0 / pi))) end
function tmp = code(x) tmp = (1.0 / abs(x)) * sqrt((1.0 / pi)); end
code[x_] := N[(N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\left|x\right|} \cdot \sqrt{\frac{1}{\pi}}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around inf 99.4%
Taylor expanded in x around 0 2.3%
Final simplification2.3%
(FPCore (x) :precision binary64 (sqrt (* (/ (pow x -14.0) PI) 3.515625)))
double code(double x) {
return sqrt(((pow(x, -14.0) / ((double) M_PI)) * 3.515625));
}
public static double code(double x) {
return Math.sqrt(((Math.pow(x, -14.0) / Math.PI) * 3.515625));
}
def code(x): return math.sqrt(((math.pow(x, -14.0) / math.pi) * 3.515625))
function code(x) return sqrt(Float64(Float64((x ^ -14.0) / pi) * 3.515625)) end
function tmp = code(x) tmp = sqrt((((x ^ -14.0) / pi) * 3.515625)); end
code[x_] := N[Sqrt[N[(N[(N[Power[x, -14.0], $MachinePrecision] / Pi), $MachinePrecision] * 3.515625), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{{x}^{-14}}{\pi} \cdot 3.515625}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 1.7%
*-commutative1.7%
associate-*r/1.7%
*-rgt-identity1.7%
Simplified1.7%
add-sqr-sqrt1.7%
sqrt-unprod1.7%
*-commutative1.7%
*-commutative1.7%
swap-sqr1.7%
Applied egg-rr1.7%
associate-*l*1.7%
associate-*l/1.7%
*-lft-identity1.7%
associate-*l/1.7%
Simplified1.7%
Final simplification1.7%
(FPCore (x) :precision binary64 (* 1.875 (/ (pow x -7.0) (sqrt PI))))
double code(double x) {
return 1.875 * (pow(x, -7.0) / sqrt(((double) M_PI)));
}
public static double code(double x) {
return 1.875 * (Math.pow(x, -7.0) / Math.sqrt(Math.PI));
}
def code(x): return 1.875 * (math.pow(x, -7.0) / math.sqrt(math.pi))
function code(x) return Float64(1.875 * Float64((x ^ -7.0) / sqrt(pi))) end
function tmp = code(x) tmp = 1.875 * ((x ^ -7.0) / sqrt(pi)); end
code[x_] := N[(1.875 * N[(N[Power[x, -7.0], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1.875 \cdot \frac{{x}^{-7}}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
add-log-exp100.0%
*-un-lft-identity100.0%
log-prod100.0%
metadata-eval100.0%
add-log-exp100.0%
inv-pow100.0%
pow-pow100.0%
add-sqr-sqrt100.0%
fabs-sqr100.0%
add-sqr-sqrt100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Taylor expanded in x around 0 1.7%
*-commutative1.7%
associate-*r/1.7%
*-rgt-identity1.7%
Simplified1.7%
*-un-lft-identity1.7%
*-un-lft-identity1.7%
associate-*l/1.7%
pow-flip1.7%
metadata-eval1.7%
sqrt-div1.7%
metadata-eval1.7%
un-div-inv1.7%
Applied egg-rr1.7%
*-lft-identity1.7%
Simplified1.7%
Final simplification1.7%
herbie shell --seed 2024096
(FPCore (x)
:name "Jmat.Real.erfi, branch x greater than or equal to 5"
:precision binary64
:pre (>= x 0.5)
(* (* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x)))) (+ (+ (+ (/ 1.0 (fabs x)) (* (/ 1.0 2.0) (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 3.0 4.0) (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 15.0 8.0) (* (* (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x)))))))