
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t_0 \cdot t_0\right) \cdot t_0\\
t_2 := \left(t_1 \cdot t_0\right) \cdot t_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t_0 + \frac{1}{2} \cdot t_1\right) + \frac{3}{4} \cdot t_2\right) + \frac{15}{8} \cdot \left(\left(t_2 \cdot t_0\right) \cdot t_0\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t_0 \cdot t_0\right) \cdot t_0\\
t_2 := \left(t_1 \cdot t_0\right) \cdot t_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t_0 + \frac{1}{2} \cdot t_1\right) + \frac{3}{4} \cdot t_2\right) + \frac{15}{8} \cdot \left(\left(t_2 \cdot t_0\right) \cdot t_0\right)\right)
\end{array}
\end{array}
(FPCore (x) :precision binary64 (* (exp (* x x)) (/ (pow PI -0.5) x)))
double code(double x) {
return exp((x * x)) * (pow(((double) M_PI), -0.5) / x);
}
public static double code(double x) {
return Math.exp((x * x)) * (Math.pow(Math.PI, -0.5) / x);
}
def code(x): return math.exp((x * x)) * (math.pow(math.pi, -0.5) / x)
function code(x) return Float64(exp(Float64(x * x)) * Float64((pi ^ -0.5) / x)) end
function tmp = code(x) tmp = exp((x * x)) * ((pi ^ -0.5) / x); end
code[x_] := N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] * N[(N[Power[Pi, -0.5], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
e^{x \cdot x} \cdot \frac{{\pi}^{-0.5}}{x}
\end{array}
Initial program 100.0%
Simplified100.0%
Taylor expanded in x around inf 100.0%
associate-*r/100.0%
metadata-eval100.0%
unpow1100.0%
sqr-pow100.0%
fabs-sqr100.0%
sqr-pow100.0%
unpow1100.0%
+-commutative100.0%
associate-*r/100.0%
metadata-eval100.0%
unpow1100.0%
sqr-pow100.0%
fabs-sqr100.0%
sqr-pow100.0%
unpow1100.0%
unpow1100.0%
sqr-pow100.0%
fabs-sqr100.0%
sqr-pow100.0%
unpow1100.0%
Simplified100.0%
Taylor expanded in x around inf 100.0%
associate-*l/100.0%
*-lft-identity100.0%
Simplified100.0%
add-log-exp3.5%
*-un-lft-identity3.5%
log-prod3.5%
metadata-eval3.5%
add-log-exp100.0%
inv-pow100.0%
sqrt-pow1100.0%
metadata-eval100.0%
Applied egg-rr100.0%
+-lft-identity100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (sqrt (* (/ 0.25 PI) (pow x -6.0))))
double code(double x) {
return sqrt(((0.25 / ((double) M_PI)) * pow(x, -6.0)));
}
public static double code(double x) {
return Math.sqrt(((0.25 / Math.PI) * Math.pow(x, -6.0)));
}
def code(x): return math.sqrt(((0.25 / math.pi) * math.pow(x, -6.0)))
function code(x) return sqrt(Float64(Float64(0.25 / pi) * (x ^ -6.0))) end
function tmp = code(x) tmp = sqrt(((0.25 / pi) * (x ^ -6.0))); end
code[x_] := N[Sqrt[N[(N[(0.25 / Pi), $MachinePrecision] * N[Power[x, -6.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\sqrt{\frac{0.25}{\pi} \cdot {x}^{-6}}
\end{array}
Initial program 100.0%
Simplified100.0%
Taylor expanded in x around 0 37.5%
associate-*r*37.5%
*-commutative37.5%
associate-/r*38.7%
associate-*r/38.7%
unpow138.7%
sqr-pow38.7%
fabs-sqr38.7%
sqr-pow38.7%
unpow138.7%
associate-*r/38.7%
metadata-eval38.7%
associate-/r*37.5%
pow-plus37.5%
metadata-eval37.5%
Simplified37.5%
Taylor expanded in x around 0 1.8%
associate-*r*1.8%
associate-*r/1.8%
metadata-eval1.8%
*-commutative1.8%
Simplified1.8%
*-commutative1.8%
sqrt-div1.8%
metadata-eval1.8%
un-div-inv1.8%
div-inv1.8%
pow-flip1.8%
metadata-eval1.8%
Applied egg-rr1.8%
add-sqr-sqrt1.8%
sqrt-unprod1.8%
frac-times1.8%
*-commutative1.8%
*-commutative1.8%
swap-sqr1.8%
pow-prod-up1.8%
metadata-eval1.8%
metadata-eval1.8%
add-sqr-sqrt1.8%
Applied egg-rr1.8%
*-commutative1.8%
associate-/l*1.7%
associate-/r/1.8%
Simplified1.8%
Final simplification1.8%
(FPCore (x) :precision binary64 (/ 0.5 (* (sqrt PI) (pow x 3.0))))
double code(double x) {
return 0.5 / (sqrt(((double) M_PI)) * pow(x, 3.0));
}
public static double code(double x) {
return 0.5 / (Math.sqrt(Math.PI) * Math.pow(x, 3.0));
}
def code(x): return 0.5 / (math.sqrt(math.pi) * math.pow(x, 3.0))
function code(x) return Float64(0.5 / Float64(sqrt(pi) * (x ^ 3.0))) end
function tmp = code(x) tmp = 0.5 / (sqrt(pi) * (x ^ 3.0)); end
code[x_] := N[(0.5 / N[(N[Sqrt[Pi], $MachinePrecision] * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{\sqrt{\pi} \cdot {x}^{3}}
\end{array}
Initial program 100.0%
Simplified100.0%
Taylor expanded in x around 0 37.5%
associate-*r*37.5%
*-commutative37.5%
associate-/r*38.7%
associate-*r/38.7%
unpow138.7%
sqr-pow38.7%
fabs-sqr38.7%
sqr-pow38.7%
unpow138.7%
associate-*r/38.7%
metadata-eval38.7%
associate-/r*37.5%
pow-plus37.5%
metadata-eval37.5%
Simplified37.5%
Taylor expanded in x around 0 1.8%
associate-*r*1.8%
associate-*r/1.8%
metadata-eval1.8%
*-commutative1.8%
Simplified1.8%
expm1-log1p-u1.8%
expm1-udef1.6%
sqrt-div1.6%
metadata-eval1.6%
frac-times1.6%
metadata-eval1.6%
Applied egg-rr1.6%
expm1-def1.8%
expm1-log1p1.8%
*-commutative1.8%
Simplified1.8%
Final simplification1.8%
(FPCore (x) :precision binary64 (/ (* 0.5 (pow x -3.0)) (sqrt PI)))
double code(double x) {
return (0.5 * pow(x, -3.0)) / sqrt(((double) M_PI));
}
public static double code(double x) {
return (0.5 * Math.pow(x, -3.0)) / Math.sqrt(Math.PI);
}
def code(x): return (0.5 * math.pow(x, -3.0)) / math.sqrt(math.pi)
function code(x) return Float64(Float64(0.5 * (x ^ -3.0)) / sqrt(pi)) end
function tmp = code(x) tmp = (0.5 * (x ^ -3.0)) / sqrt(pi); end
code[x_] := N[(N[(0.5 * N[Power[x, -3.0], $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5 \cdot {x}^{-3}}{\sqrt{\pi}}
\end{array}
Initial program 100.0%
Simplified100.0%
Taylor expanded in x around 0 37.5%
associate-*r*37.5%
*-commutative37.5%
associate-/r*38.7%
associate-*r/38.7%
unpow138.7%
sqr-pow38.7%
fabs-sqr38.7%
sqr-pow38.7%
unpow138.7%
associate-*r/38.7%
metadata-eval38.7%
associate-/r*37.5%
pow-plus37.5%
metadata-eval37.5%
Simplified37.5%
Taylor expanded in x around 0 1.8%
associate-*r*1.8%
associate-*r/1.8%
metadata-eval1.8%
*-commutative1.8%
Simplified1.8%
*-commutative1.8%
sqrt-div1.8%
metadata-eval1.8%
un-div-inv1.8%
div-inv1.8%
pow-flip1.8%
metadata-eval1.8%
Applied egg-rr1.8%
Final simplification1.8%
herbie shell --seed 2023331
(FPCore (x)
:name "Jmat.Real.erfi, branch x greater than or equal to 5"
:precision binary64
:pre (>= x 0.5)
(* (* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x)))) (+ (+ (+ (/ 1.0 (fabs x)) (* (/ 1.0 2.0) (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 3.0 4.0) (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 15.0 8.0) (* (* (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x)))))))