
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x)))
(t_1 (* (* t_0 t_0) t_0))
(t_2 (* (* t_1 t_0) t_0)))
(*
(* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x))))
(+
(+ (+ t_0 (* (/ 1.0 2.0) t_1)) (* (/ 3.0 4.0) t_2))
(* (/ 15.0 8.0) (* (* t_2 t_0) t_0))))))
double code(double x) {
double t_0 = 1.0 / fabs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / sqrt(((double) M_PI))) * exp((fabs(x) * fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
public static double code(double x) {
double t_0 = 1.0 / Math.abs(x);
double t_1 = (t_0 * t_0) * t_0;
double t_2 = (t_1 * t_0) * t_0;
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((Math.abs(x) * Math.abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)));
}
def code(x): t_0 = 1.0 / math.fabs(x) t_1 = (t_0 * t_0) * t_0 t_2 = (t_1 * t_0) * t_0 return ((1.0 / math.sqrt(math.pi)) * math.exp((math.fabs(x) * math.fabs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0)))
function code(x) t_0 = Float64(1.0 / abs(x)) t_1 = Float64(Float64(t_0 * t_0) * t_0) t_2 = Float64(Float64(t_1 * t_0) * t_0) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(abs(x) * abs(x)))) * Float64(Float64(Float64(t_0 + Float64(Float64(1.0 / 2.0) * t_1)) + Float64(Float64(3.0 / 4.0) * t_2)) + Float64(Float64(15.0 / 8.0) * Float64(Float64(t_2 * t_0) * t_0)))) end
function tmp = code(x) t_0 = 1.0 / abs(x); t_1 = (t_0 * t_0) * t_0; t_2 = (t_1 * t_0) * t_0; tmp = ((1.0 / sqrt(pi)) * exp((abs(x) * abs(x)))) * (((t_0 + ((1.0 / 2.0) * t_1)) + ((3.0 / 4.0) * t_2)) + ((15.0 / 8.0) * ((t_2 * t_0) * t_0))); end
code[x_] := Block[{t$95$0 = N[(1.0 / N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(t$95$1 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]}, N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(t$95$0 + N[(N[(1.0 / 2.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(3.0 / 4.0), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision] + N[(N[(15.0 / 8.0), $MachinePrecision] * N[(N[(t$95$2 * t$95$0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
t_1 := \left(t\_0 \cdot t\_0\right) \cdot t\_0\\
t_2 := \left(t\_1 \cdot t\_0\right) \cdot t\_0\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{\left|x\right| \cdot \left|x\right|}\right) \cdot \left(\left(\left(t\_0 + \frac{1}{2} \cdot t\_1\right) + \frac{3}{4} \cdot t\_2\right) + \frac{15}{8} \cdot \left(\left(t\_2 \cdot t\_0\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (/ 1.0 (fabs x))))
(*
(* (/ 1.0 (sqrt PI)) (exp (* x x)))
(+
(+
(/ (+ (/ (/ 0.5 x) x) 1.0) x)
(* (/ 3.0 4.0) (* (ratio-of-squares (/ 1.0 x) x) t_0)))
(* (/ 15.0 8.0) (* (ratio-of-squares (/ -1.0 (* x x)) x) t_0))))))\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{1}{\left|x\right|}\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{x \cdot x}\right) \cdot \left(\left(\frac{\frac{\frac{0.5}{x}}{x} + 1}{x} + \frac{3}{4} \cdot \left(\mathsf{ratio\_of\_squares}\left(\left(\frac{1}{x}\right), x\right) \cdot t\_0\right)\right) + \frac{15}{8} \cdot \left(\mathsf{ratio\_of\_squares}\left(\left(\frac{-1}{x \cdot x}\right), x\right) \cdot t\_0\right)\right)
\end{array}
\end{array}
Initial program 100.0%
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
associate-*r/N/A
frac-timesN/A
associate-*r*N/A
metadata-evalN/A
*-rgt-identityN/A
Applied rewrites100.0%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
associate-*l*N/A
frac-timesN/A
metadata-evalN/A
sqr-abs-revN/A
pow2N/A
frac-timesN/A
Applied rewrites100.0%
lift-+.f64N/A
lift-fabs.f64N/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
Applied rewrites100.0%
lift-*.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-ratio-of-squares.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
associate-*l*N/A
frac-timesN/A
metadata-evalN/A
pow2N/A
pow-flipN/A
pow2N/A
pow-divN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
pow-flipN/A
frac-timesN/A
metadata-evalN/A
sqr-abs-revN/A
pow2N/A
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (* (/ 1.0 (sqrt PI)) (exp (* x x))) (pow x -1.0)))
double code(double x) {
return ((1.0 / sqrt(((double) M_PI))) * exp((x * x))) * pow(x, -1.0);
}
public static double code(double x) {
return ((1.0 / Math.sqrt(Math.PI)) * Math.exp((x * x))) * Math.pow(x, -1.0);
}
def code(x): return ((1.0 / math.sqrt(math.pi)) * math.exp((x * x))) * math.pow(x, -1.0)
function code(x) return Float64(Float64(Float64(1.0 / sqrt(pi)) * exp(Float64(x * x))) * (x ^ -1.0)) end
function tmp = code(x) tmp = ((1.0 / sqrt(pi)) * exp((x * x))) * (x ^ -1.0); end
code[x_] := N[(N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[Power[x, -1.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{1}{\sqrt{\pi}} \cdot e^{x \cdot x}\right) \cdot {x}^{-1}
\end{array}
Initial program 100.0%
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
associate-*r/N/A
frac-timesN/A
associate-*r*N/A
metadata-evalN/A
*-rgt-identityN/A
Applied rewrites100.0%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
associate-*l*N/A
frac-timesN/A
metadata-evalN/A
sqr-abs-revN/A
pow2N/A
frac-timesN/A
Applied rewrites100.0%
lift-+.f64N/A
lift-fabs.f64N/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
lift-/.f64N/A
lift-fabs.f64N/A
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (* (/ (exp (* x x)) (sqrt PI)) (/ (/ (/ 0.5 x) x) x)))
double code(double x) {
return (exp((x * x)) / sqrt(((double) M_PI))) * (((0.5 / x) / x) / x);
}
public static double code(double x) {
return (Math.exp((x * x)) / Math.sqrt(Math.PI)) * (((0.5 / x) / x) / x);
}
def code(x): return (math.exp((x * x)) / math.sqrt(math.pi)) * (((0.5 / x) / x) / x)
function code(x) return Float64(Float64(exp(Float64(x * x)) / sqrt(pi)) * Float64(Float64(Float64(0.5 / x) / x) / x)) end
function tmp = code(x) tmp = (exp((x * x)) / sqrt(pi)) * (((0.5 / x) / x) / x); end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(0.5 / x), $MachinePrecision] / x), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\sqrt{\pi}} \cdot \frac{\frac{\frac{0.5}{x}}{x}}{x}
\end{array}
Initial program 100.0%
lift-+.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-rgt1-inN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
metadata-evalN/A
frac-timesN/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
pow2N/A
sqr-abs-revN/A
frac-timesN/A
associate-*r*N/A
Applied rewrites32.5%
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
metadata-evalN/A
pow-flipN/A
associate-*r/N/A
sqr-powN/A
pow-prod-downN/A
sqr-abs-revN/A
pow-prod-downN/A
sqr-powN/A
metadata-evalN/A
unpow3N/A
pow2N/A
associate-/r*N/A
pow2N/A
sqr-abs-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lower-/.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6432.5
Applied rewrites32.5%
lift-*.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
associate-*l/N/A
lower-/.f64N/A
1-expN/A
sqr-abs-revN/A
pow2N/A
prod-expN/A
lower-exp.f64N/A
lower-+.f64N/A
pow2N/A
lower-*.f6432.5
Applied rewrites32.5%
Final simplification32.5%
(FPCore (x) :precision binary64 (* (/ (exp (* x x)) (sqrt PI)) (* (/ -1.0 (* (* (- x) x) x)) 0.5)))
double code(double x) {
return (exp((x * x)) / sqrt(((double) M_PI))) * ((-1.0 / ((-x * x) * x)) * 0.5);
}
public static double code(double x) {
return (Math.exp((x * x)) / Math.sqrt(Math.PI)) * ((-1.0 / ((-x * x) * x)) * 0.5);
}
def code(x): return (math.exp((x * x)) / math.sqrt(math.pi)) * ((-1.0 / ((-x * x) * x)) * 0.5)
function code(x) return Float64(Float64(exp(Float64(x * x)) / sqrt(pi)) * Float64(Float64(-1.0 / Float64(Float64(Float64(-x) * x) * x)) * 0.5)) end
function tmp = code(x) tmp = (exp((x * x)) / sqrt(pi)) * ((-1.0 / ((-x * x) * x)) * 0.5); end
code[x_] := N[(N[(N[Exp[N[(x * x), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(-1.0 / N[(N[((-x) * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x \cdot x}}{\sqrt{\pi}} \cdot \left(\frac{-1}{\left(\left(-x\right) \cdot x\right) \cdot x} \cdot 0.5\right)
\end{array}
Initial program 100.0%
lift-+.f64N/A
lift-*.f64N/A
lift-/.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-rgt1-inN/A
lower-*.f64N/A
Applied rewrites100.0%
Taylor expanded in x around 0
metadata-evalN/A
frac-timesN/A
metadata-evalN/A
associate-*r/N/A
metadata-evalN/A
pow2N/A
sqr-abs-revN/A
frac-timesN/A
associate-*r*N/A
Applied rewrites32.5%
lift-pow.f64N/A
metadata-evalN/A
pow-flipN/A
metadata-evalN/A
unpow3N/A
sqr-abs-revN/A
pow2N/A
frac-timesN/A
frac-2negN/A
metadata-evalN/A
pow2N/A
sqr-abs-revN/A
distribute-lft-neg-outN/A
lift-*.f64N/A
lift-neg.f64N/A
frac-timesN/A
metadata-evalN/A
lower-/.f64N/A
lower-*.f6430.9
Applied rewrites30.9%
lift-*.f64N/A
lift-/.f64N/A
lift-exp.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
associate-*l/N/A
lower-/.f64N/A
1-expN/A
sqr-abs-revN/A
pow2N/A
prod-expN/A
lower-exp.f64N/A
lower-+.f64N/A
pow2N/A
lower-*.f6430.9
Applied rewrites30.9%
Final simplification30.9%
herbie shell --seed 2025065
(FPCore (x)
:name "Jmat.Real.erfi, branch x greater than or equal to 5"
:precision binary64
:pre (>= x 0.5)
(* (* (/ 1.0 (sqrt PI)) (exp (* (fabs x) (fabs x)))) (+ (+ (+ (/ 1.0 (fabs x)) (* (/ 1.0 2.0) (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 3.0 4.0) (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))))) (* (/ 15.0 8.0) (* (* (* (* (* (* (/ 1.0 (fabs x)) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x))) (/ 1.0 (fabs x)))))))