
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+
(* (fma (* x_m x_m) 0.6666666666666666 2.0) x_m)
(*
(/ 1.0 5.0)
(* (* (* (* (fabs x_m) (fabs x_m)) (fabs x_m)) (fabs x_m)) (fabs x_m))))
(* (pow (fabs x_m) 7.0) 0.047619047619047616)))))x_m = fabs(x);
double code(double x_m) {
return fabs(((1.0 / sqrt(((double) M_PI))) * (((fma((x_m * x_m), 0.6666666666666666, 2.0) * x_m) + ((1.0 / 5.0) * ((((fabs(x_m) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)))) + (pow(fabs(x_m), 7.0) * 0.047619047619047616))));
}
x_m = abs(x) function code(x_m) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(fma(Float64(x_m * x_m), 0.6666666666666666, 2.0) * x_m) + Float64(Float64(1.0 / 5.0) * Float64(Float64(Float64(Float64(abs(x_m) * abs(x_m)) * abs(x_m)) * abs(x_m)) * abs(x_m)))) + Float64((abs(x_m) ^ 7.0) * 0.047619047619047616)))) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * N[(N[(N[(N[(N[Abs[x$95$m], $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Power[N[Abs[x$95$m], $MachinePrecision], 7.0], $MachinePrecision] * 0.047619047619047616), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.6666666666666666, 2\right) \cdot x\_m + \frac{1}{5} \cdot \left(\left(\left(\left(\left|x\_m\right| \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right)\right) + {\left(\left|x\_m\right|\right)}^{7} \cdot 0.047619047619047616\right)\right|
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-fabs.f64N/A
lower-pow.f6499.9
lift-/.f64N/A
metadata-eval99.9
Applied rewrites99.9%
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
pow2N/A
associate-*r*N/A
distribute-rgt-inN/A
*-commutativeN/A
Applied rewrites99.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+
(* (fma (* x_m x_m) 0.6666666666666666 2.0) x_m)
(*
0.2
(* (* (* (* (fabs x_m) (fabs x_m)) (fabs x_m)) (fabs x_m)) (fabs x_m))))
(* (pow (fabs x_m) 5.0) (* (* x_m x_m) 0.047619047619047616))))))x_m = fabs(x);
double code(double x_m) {
return fabs(((1.0 / sqrt(((double) M_PI))) * (((fma((x_m * x_m), 0.6666666666666666, 2.0) * x_m) + (0.2 * ((((fabs(x_m) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)))) + (pow(fabs(x_m), 5.0) * ((x_m * x_m) * 0.047619047619047616)))));
}
x_m = abs(x) function code(x_m) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(fma(Float64(x_m * x_m), 0.6666666666666666, 2.0) * x_m) + Float64(0.2 * Float64(Float64(Float64(Float64(abs(x_m) * abs(x_m)) * abs(x_m)) * abs(x_m)) * abs(x_m)))) + Float64((abs(x_m) ^ 5.0) * Float64(Float64(x_m * x_m) * 0.047619047619047616))))) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision] + N[(0.2 * N[(N[(N[(N[(N[Abs[x$95$m], $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[Power[N[Abs[x$95$m], $MachinePrecision], 5.0], $MachinePrecision] * N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.047619047619047616), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.6666666666666666, 2\right) \cdot x\_m + 0.2 \cdot \left(\left(\left(\left(\left|x\_m\right| \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right)\right) + {\left(\left|x\_m\right|\right)}^{5} \cdot \left(\left(x\_m \cdot x\_m\right) \cdot 0.047619047619047616\right)\right)\right|
\end{array}
Initial program 99.8%
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
associate-*l*N/A
lower-*.f64N/A
Applied rewrites99.8%
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
pow2N/A
associate-*r*N/A
distribute-rgt-inN/A
*-commutativeN/A
Applied rewrites99.8%
lift-/.f64N/A
metadata-eval99.8
Applied rewrites99.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+
(* (fma (* x_m x_m) 0.6666666666666666 2.0) x_m)
(*
(/ 1.0 5.0)
(* (* (* (* (fabs x_m) (fabs x_m)) (fabs x_m)) (fabs x_m)) (fabs x_m))))
(*
(/ 1.0 21.0)
(*
(* (* (* (* x_m x_m) (* x_m x_m)) (fabs x_m)) (fabs x_m))
(fabs x_m)))))))x_m = fabs(x);
double code(double x_m) {
return fabs(((1.0 / sqrt(((double) M_PI))) * (((fma((x_m * x_m), 0.6666666666666666, 2.0) * x_m) + ((1.0 / 5.0) * ((((fabs(x_m) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)) * fabs(x_m)))) + ((1.0 / 21.0) * (((((x_m * x_m) * (x_m * x_m)) * fabs(x_m)) * fabs(x_m)) * fabs(x_m))))));
}
x_m = abs(x) function code(x_m) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(fma(Float64(x_m * x_m), 0.6666666666666666, 2.0) * x_m) + Float64(Float64(1.0 / 5.0) * Float64(Float64(Float64(Float64(abs(x_m) * abs(x_m)) * abs(x_m)) * abs(x_m)) * abs(x_m)))) + Float64(Float64(1.0 / 21.0) * Float64(Float64(Float64(Float64(Float64(x_m * x_m) * Float64(x_m * x_m)) * abs(x_m)) * abs(x_m)) * abs(x_m)))))) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * N[(N[(N[(N[(N[Abs[x$95$m], $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.6666666666666666, 2\right) \cdot x\_m + \frac{1}{5} \cdot \left(\left(\left(\left(\left|x\_m\right| \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right)\right) + \frac{1}{21} \cdot \left(\left(\left(\left(\left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right) \cdot \left|x\_m\right|\right)\right)\right|
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
lower-*.f6499.8
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lower-*.f6499.8
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
lower-*.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
pow2N/A
associate-*r*N/A
distribute-rgt-inN/A
*-commutativeN/A
Applied rewrites99.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(let* ((t_0 (* (* x_m x_m) x_m)))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+
(* (fma (* x_m x_m) 0.6666666666666666 2.0) x_m)
(* 0.2 (* (* (* x_m x_m) (* x_m x_m)) (fabs x_m))))
(* (/ 1.0 21.0) (* (* t_0 t_0) (fabs x_m))))))))x_m = fabs(x);
double code(double x_m) {
double t_0 = (x_m * x_m) * x_m;
return fabs(((1.0 / sqrt(((double) M_PI))) * (((fma((x_m * x_m), 0.6666666666666666, 2.0) * x_m) + (0.2 * (((x_m * x_m) * (x_m * x_m)) * fabs(x_m)))) + ((1.0 / 21.0) * ((t_0 * t_0) * fabs(x_m))))));
}
x_m = abs(x) function code(x_m) t_0 = Float64(Float64(x_m * x_m) * x_m) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(fma(Float64(x_m * x_m), 0.6666666666666666, 2.0) * x_m) + Float64(0.2 * Float64(Float64(Float64(x_m * x_m) * Float64(x_m * x_m)) * abs(x_m)))) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_0 * t_0) * abs(x_m)))))) end
x_m = N[Abs[x], $MachinePrecision]
code[x$95$m_] := Block[{t$95$0 = N[(N[(x$95$m * x$95$m), $MachinePrecision] * x$95$m), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision] + N[(0.2 * N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision]), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$0 * t$95$0), $MachinePrecision] * N[Abs[x$95$m], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
t_0 := \left(x\_m \cdot x\_m\right) \cdot x\_m\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.6666666666666666, 2\right) \cdot x\_m + 0.2 \cdot \left(\left(\left(x\_m \cdot x\_m\right) \cdot \left(x\_m \cdot x\_m\right)\right) \cdot \left|x\_m\right|\right)\right) + \frac{1}{21} \cdot \left(\left(t\_0 \cdot t\_0\right) \cdot \left|x\_m\right|\right)\right)\right|
\end{array}
\end{array}
Initial program 99.8%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
lift-*.f64N/A
associate-*r*N/A
lift-*.f64N/A
swap-sqrN/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-absN/A
unswap-sqrN/A
Applied rewrites99.8%
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
pow2N/A
associate-*r*N/A
distribute-rgt-inN/A
*-commutativeN/A
Applied rewrites99.8%
lift-/.f64N/A
metadata-eval99.8
Applied rewrites99.8%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
lift-fabs.f64N/A
sqr-abs-revN/A
pow2N/A
lift-fabs.f64N/A
associate-*l*N/A
sqr-abs-revN/A
pow2N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6499.8
Applied rewrites99.8%
x_m = (fabs.f64 x)
(FPCore (x_m)
:precision binary64
(/
(fabs
(*
(fma
(fma
(fma (* x_m x_m) 0.047619047619047616 0.2)
(* x_m x_m)
0.6666666666666666)
(* x_m x_m)
2.0)
x_m))
(sqrt PI)))x_m = fabs(x);
double code(double x_m) {
return fabs((fma(fma(fma((x_m * x_m), 0.047619047619047616, 0.2), (x_m * x_m), 0.6666666666666666), (x_m * x_m), 2.0) * x_m)) / sqrt(((double) M_PI));
}
x_m = abs(x) function code(x_m) return Float64(abs(Float64(fma(fma(fma(Float64(x_m * x_m), 0.047619047619047616, 0.2), Float64(x_m * x_m), 0.6666666666666666), Float64(x_m * x_m), 2.0) * x_m)) / sqrt(pi)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Abs[N[(N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.047619047619047616 + 0.2), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\left|\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.047619047619047616, 0.2\right), x\_m \cdot x\_m, 0.6666666666666666\right), x\_m \cdot x\_m, 2\right) \cdot x\_m\right|}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-fabs.f64N/A
lower-pow.f6499.9
lift-/.f64N/A
metadata-eval99.9
Applied rewrites99.9%
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.4%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (fabs (* (fma (fma (* x_m x_m) 0.2 0.6666666666666666) (* x_m x_m) 2.0) x_m)) (sqrt PI)))
x_m = fabs(x);
double code(double x_m) {
return fabs((fma(fma((x_m * x_m), 0.2, 0.6666666666666666), (x_m * x_m), 2.0) * x_m)) / sqrt(((double) M_PI));
}
x_m = abs(x) function code(x_m) return Float64(abs(Float64(fma(fma(Float64(x_m * x_m), 0.2, 0.6666666666666666), Float64(x_m * x_m), 2.0) * x_m)) / sqrt(pi)) end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Abs[N[(N[(N[(N[(x$95$m * x$95$m), $MachinePrecision] * 0.2 + 0.6666666666666666), $MachinePrecision] * N[(x$95$m * x$95$m), $MachinePrecision] + 2.0), $MachinePrecision] * x$95$m), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\left|\mathsf{fma}\left(\mathsf{fma}\left(x\_m \cdot x\_m, 0.2, 0.6666666666666666\right), x\_m \cdot x\_m, 2\right) \cdot x\_m\right|}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-fabs.f64N/A
lower-pow.f6499.9
lift-/.f64N/A
metadata-eval99.9
Applied rewrites99.9%
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.4%
Taylor expanded in x around 0
*-commutativeN/A
*-commutativeN/A
*-commutativeN/A
pow2N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
pow2N/A
+-commutativeN/A
lower-*.f64N/A
Applied rewrites93.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (if (<= x_m 1.7) (/ (fabs (* 2.0 x_m)) (sqrt PI)) (fabs (* (/ 1.0 (sqrt PI)) (* (* x_m x_m) (* 0.6666666666666666 x_m))))))
x_m = fabs(x);
double code(double x_m) {
double tmp;
if (x_m <= 1.7) {
tmp = fabs((2.0 * x_m)) / sqrt(((double) M_PI));
} else {
tmp = fabs(((1.0 / sqrt(((double) M_PI))) * ((x_m * x_m) * (0.6666666666666666 * x_m))));
}
return tmp;
}
x_m = Math.abs(x);
public static double code(double x_m) {
double tmp;
if (x_m <= 1.7) {
tmp = Math.abs((2.0 * x_m)) / Math.sqrt(Math.PI);
} else {
tmp = Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((x_m * x_m) * (0.6666666666666666 * x_m))));
}
return tmp;
}
x_m = math.fabs(x) def code(x_m): tmp = 0 if x_m <= 1.7: tmp = math.fabs((2.0 * x_m)) / math.sqrt(math.pi) else: tmp = math.fabs(((1.0 / math.sqrt(math.pi)) * ((x_m * x_m) * (0.6666666666666666 * x_m)))) return tmp
x_m = abs(x) function code(x_m) tmp = 0.0 if (x_m <= 1.7) tmp = Float64(abs(Float64(2.0 * x_m)) / sqrt(pi)); else tmp = abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(x_m * x_m) * Float64(0.6666666666666666 * x_m)))); end return tmp end
x_m = abs(x); function tmp_2 = code(x_m) tmp = 0.0; if (x_m <= 1.7) tmp = abs((2.0 * x_m)) / sqrt(pi); else tmp = abs(((1.0 / sqrt(pi)) * ((x_m * x_m) * (0.6666666666666666 * x_m)))); end tmp_2 = tmp; end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := If[LessEqual[x$95$m, 1.7], N[(N[Abs[N[(2.0 * x$95$m), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision], N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(x$95$m * x$95$m), $MachinePrecision] * N[(0.6666666666666666 * x$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
x_m = \left|x\right|
\\
\begin{array}{l}
\mathbf{if}\;x\_m \leq 1.7:\\
\;\;\;\;\frac{\left|2 \cdot x\_m\right|}{\sqrt{\pi}}\\
\mathbf{else}:\\
\;\;\;\;\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(x\_m \cdot x\_m\right) \cdot \left(0.6666666666666666 \cdot x\_m\right)\right)\right|\\
\end{array}
\end{array}
if x < 1.69999999999999996Initial program 99.8%
Taylor expanded in x around 0
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-fabs.f64N/A
lower-pow.f6499.8
lift-/.f64N/A
metadata-eval99.8
Applied rewrites99.8%
Applied rewrites99.2%
Taylor expanded in x around 0
Applied rewrites98.3%
Taylor expanded in x around 0
Applied rewrites98.3%
if 1.69999999999999996 < x Initial program 99.8%
lift-+.f64N/A
lift-*.f64N/A
lift-fabs.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
lift-fabs.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
*-commutativeN/A
lower-fma.f6499.8
Applied rewrites99.8%
Taylor expanded in x around inf
pow2N/A
sqr-abs-revN/A
pow3N/A
*-commutativeN/A
lower-*.f64N/A
lower-pow.f64N/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow170.2
Applied rewrites70.2%
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
pow3N/A
pow2N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
associate-*r*N/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
pow2N/A
lift-*.f6470.2
Applied rewrites70.2%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
pow2N/A
associate-*l*N/A
lower-*.f64N/A
pow2N/A
lift-*.f64N/A
lower-*.f6470.2
Applied rewrites70.2%
x_m = (fabs.f64 x) (FPCore (x_m) :precision binary64 (/ (fabs (* 2.0 x_m)) (sqrt PI)))
x_m = fabs(x);
double code(double x_m) {
return fabs((2.0 * x_m)) / sqrt(((double) M_PI));
}
x_m = Math.abs(x);
public static double code(double x_m) {
return Math.abs((2.0 * x_m)) / Math.sqrt(Math.PI);
}
x_m = math.fabs(x) def code(x_m): return math.fabs((2.0 * x_m)) / math.sqrt(math.pi)
x_m = abs(x) function code(x_m) return Float64(abs(Float64(2.0 * x_m)) / sqrt(pi)) end
x_m = abs(x); function tmp = code(x_m) tmp = abs((2.0 * x_m)) / sqrt(pi); end
x_m = N[Abs[x], $MachinePrecision] code[x$95$m_] := N[(N[Abs[N[(2.0 * x$95$m), $MachinePrecision]], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x_m = \left|x\right|
\\
\frac{\left|2 \cdot x\_m\right|}{\sqrt{\pi}}
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
metadata-evalN/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-fabs.f64N/A
lower-pow.f6499.9
lift-/.f64N/A
metadata-eval99.9
Applied rewrites99.9%
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites98.5%
Taylor expanded in x around 0
Applied rewrites67.8%
herbie shell --seed 2025092
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))