
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))
double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
\end{array}
(FPCore (x)
:precision binary64
(*
(fabs x)
(fabs
(/
(+
(fma 0.2 (pow x 4.0) (* 0.047619047619047616 (pow x 6.0)))
(fma 0.6666666666666666 (* x x) 2.0))
(sqrt PI)))))
double code(double x) {
return fabs(x) * fabs(((fma(0.2, pow(x, 4.0), (0.047619047619047616 * pow(x, 6.0))) + fma(0.6666666666666666, (x * x), 2.0)) / sqrt(((double) M_PI))));
}
function code(x) return Float64(abs(x) * abs(Float64(Float64(fma(0.2, (x ^ 4.0), Float64(0.047619047619047616 * (x ^ 6.0))) + fma(0.6666666666666666, Float64(x * x), 2.0)) / sqrt(pi)))) end
code[x_] := N[(N[Abs[x], $MachinePrecision] * N[Abs[N[(N[(N[(0.2 * N[Power[x, 4.0], $MachinePrecision] + N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left|x\right| \cdot \left|\frac{\mathsf{fma}\left(0.2, {x}^{4}, 0.047619047619047616 \cdot {x}^{6}\right) + \mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.9%
(FPCore (x)
:precision binary64
(*
x
(fabs
(/
(+
(fma 0.6666666666666666 (* x x) 2.0)
(* (pow x 4.0) (+ 0.2 (* 0.047619047619047616 (* x x)))))
(sqrt PI)))))
double code(double x) {
return x * fabs(((fma(0.6666666666666666, (x * x), 2.0) + (pow(x, 4.0) * (0.2 + (0.047619047619047616 * (x * x))))) / sqrt(((double) M_PI))));
}
function code(x) return Float64(x * abs(Float64(Float64(fma(0.6666666666666666, Float64(x * x), 2.0) + Float64((x ^ 4.0) * Float64(0.2 + Float64(0.047619047619047616 * Float64(x * x))))) / sqrt(pi)))) end
code[x_] := N[(x * N[Abs[N[(N[(N[(0.6666666666666666 * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision] + N[(N[Power[x, 4.0], $MachinePrecision] * N[(0.2 + N[(0.047619047619047616 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left|\frac{\mathsf{fma}\left(0.6666666666666666, x \cdot x, 2\right) + {x}^{4} \cdot \left(0.2 + 0.047619047619047616 \cdot \left(x \cdot x\right)\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.9%
Taylor expanded in x around 0 99.9%
add-sqr-sqrt34.5%
fabs-sqr34.5%
add-sqr-sqrt36.2%
*-un-lft-identity36.2%
Applied egg-rr36.2%
*-lft-identity36.2%
Simplified36.2%
pow236.2%
Applied egg-rr36.2%
Final simplification36.2%
(FPCore (x)
:precision binary64
(fabs
(*
x
(*
(+ 2.0 (* (pow x 4.0) (fma (* x x) 0.047619047619047616 0.2)))
(sqrt (/ 1.0 PI))))))
double code(double x) {
return fabs((x * ((2.0 + (pow(x, 4.0) * fma((x * x), 0.047619047619047616, 0.2))) * sqrt((1.0 / ((double) M_PI))))));
}
function code(x) return abs(Float64(x * Float64(Float64(2.0 + Float64((x ^ 4.0) * fma(Float64(x * x), 0.047619047619047616, 0.2))) * sqrt(Float64(1.0 / pi))))) end
code[x_] := N[Abs[N[(x * N[(N[(2.0 + N[(N[Power[x, 4.0], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.047619047619047616 + 0.2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Sqrt[N[(1.0 / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|x \cdot \left(\left(2 + {x}^{4} \cdot \mathsf{fma}\left(x \cdot x, 0.047619047619047616, 0.2\right)\right) \cdot \sqrt{\frac{1}{\pi}}\right)\right|
\end{array}
Initial program 99.8%
Simplified99.4%
Taylor expanded in x around 0 99.0%
*-commutative99.0%
*-commutative99.0%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt99.0%
*-commutative99.0%
associate-*l*99.0%
Simplified99.0%
pow236.2%
Applied egg-rr99.0%
(FPCore (x)
:precision binary64
(*
x
(fabs
(/
(+ 2.0 (* (pow x 4.0) (+ 0.2 (* 0.047619047619047616 (* x x)))))
(sqrt PI)))))
double code(double x) {
return x * fabs(((2.0 + (pow(x, 4.0) * (0.2 + (0.047619047619047616 * (x * x))))) / sqrt(((double) M_PI))));
}
public static double code(double x) {
return x * Math.abs(((2.0 + (Math.pow(x, 4.0) * (0.2 + (0.047619047619047616 * (x * x))))) / Math.sqrt(Math.PI)));
}
def code(x): return x * math.fabs(((2.0 + (math.pow(x, 4.0) * (0.2 + (0.047619047619047616 * (x * x))))) / math.sqrt(math.pi)))
function code(x) return Float64(x * abs(Float64(Float64(2.0 + Float64((x ^ 4.0) * Float64(0.2 + Float64(0.047619047619047616 * Float64(x * x))))) / sqrt(pi)))) end
function tmp = code(x) tmp = x * abs(((2.0 + ((x ^ 4.0) * (0.2 + (0.047619047619047616 * (x * x))))) / sqrt(pi))); end
code[x_] := N[(x * N[Abs[N[(N[(2.0 + N[(N[Power[x, 4.0], $MachinePrecision] * N[(0.2 + N[(0.047619047619047616 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left|\frac{2 + {x}^{4} \cdot \left(0.2 + 0.047619047619047616 \cdot \left(x \cdot x\right)\right)}{\sqrt{\pi}}\right|
\end{array}
Initial program 99.8%
Simplified99.9%
Taylor expanded in x around 0 99.9%
add-sqr-sqrt34.5%
fabs-sqr34.5%
add-sqr-sqrt36.2%
*-un-lft-identity36.2%
Applied egg-rr36.2%
*-lft-identity36.2%
Simplified36.2%
pow236.2%
Applied egg-rr36.2%
Taylor expanded in x around 0 36.0%
Final simplification36.0%
(FPCore (x) :precision binary64 (fabs (* x (* (+ (* 0.047619047619047616 (pow x 6.0)) 2.0) (pow PI -0.5)))))
double code(double x) {
return fabs((x * (((0.047619047619047616 * pow(x, 6.0)) + 2.0) * pow(((double) M_PI), -0.5))));
}
public static double code(double x) {
return Math.abs((x * (((0.047619047619047616 * Math.pow(x, 6.0)) + 2.0) * Math.pow(Math.PI, -0.5))));
}
def code(x): return math.fabs((x * (((0.047619047619047616 * math.pow(x, 6.0)) + 2.0) * math.pow(math.pi, -0.5))))
function code(x) return abs(Float64(x * Float64(Float64(Float64(0.047619047619047616 * (x ^ 6.0)) + 2.0) * (pi ^ -0.5)))) end
function tmp = code(x) tmp = abs((x * (((0.047619047619047616 * (x ^ 6.0)) + 2.0) * (pi ^ -0.5)))); end
code[x_] := N[Abs[N[(x * N[(N[(N[(0.047619047619047616 * N[Power[x, 6.0], $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision] * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|x \cdot \left(\left(0.047619047619047616 \cdot {x}^{6} + 2\right) \cdot {\pi}^{-0.5}\right)\right|
\end{array}
Initial program 99.8%
Simplified99.4%
Taylor expanded in x around 0 99.0%
*-commutative99.0%
*-commutative99.0%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt99.0%
*-commutative99.0%
associate-*l*99.0%
Simplified99.0%
Taylor expanded in x around inf 98.8%
*-un-lft-identity98.8%
inv-pow98.8%
sqrt-pow198.8%
metadata-eval98.8%
Applied egg-rr98.8%
*-lft-identity98.8%
Simplified98.8%
Final simplification98.8%
(FPCore (x)
:precision binary64
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(* x 2.0)
(* 0.047619047619047616 (* (* x x) (* (* x x) (* x (* x x)))))))))
double code(double x) {
return fabs(((1.0 / sqrt(((double) M_PI))) * ((x * 2.0) + (0.047619047619047616 * ((x * x) * ((x * x) * (x * (x * x))))))));
}
public static double code(double x) {
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((x * 2.0) + (0.047619047619047616 * ((x * x) * ((x * x) * (x * (x * x))))))));
}
def code(x): return math.fabs(((1.0 / math.sqrt(math.pi)) * ((x * 2.0) + (0.047619047619047616 * ((x * x) * ((x * x) * (x * (x * x))))))))
function code(x) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(x * 2.0) + Float64(0.047619047619047616 * Float64(Float64(x * x) * Float64(Float64(x * x) * Float64(x * Float64(x * x)))))))) end
function tmp = code(x) tmp = abs(((1.0 / sqrt(pi)) * ((x * 2.0) + (0.047619047619047616 * ((x * x) * ((x * x) * (x * (x * x)))))))); end
code[x_] := N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(x * 2.0), $MachinePrecision] + N[(0.047619047619047616 * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(x \cdot 2 + 0.047619047619047616 \cdot \left(\left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right)\right)\right|
\end{array}
Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 98.8%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt98.8%
Simplified98.8%
add-sqr-sqrt34.5%
fabs-sqr34.5%
add-sqr-sqrt36.2%
*-un-lft-identity36.2%
Applied egg-rr98.8%
*-lft-identity36.2%
Simplified98.8%
Final simplification98.8%
(FPCore (x) :precision binary64 (if (<= x 1.85) (* x (* 2.0 (pow PI -0.5))) (* (pow PI -0.5) (* 0.047619047619047616 (pow x 7.0)))))
double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 * pow(((double) M_PI), -0.5));
} else {
tmp = pow(((double) M_PI), -0.5) * (0.047619047619047616 * pow(x, 7.0));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.85) {
tmp = x * (2.0 * Math.pow(Math.PI, -0.5));
} else {
tmp = Math.pow(Math.PI, -0.5) * (0.047619047619047616 * Math.pow(x, 7.0));
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.85: tmp = x * (2.0 * math.pow(math.pi, -0.5)) else: tmp = math.pow(math.pi, -0.5) * (0.047619047619047616 * math.pow(x, 7.0)) return tmp
function code(x) tmp = 0.0 if (x <= 1.85) tmp = Float64(x * Float64(2.0 * (pi ^ -0.5))); else tmp = Float64((pi ^ -0.5) * Float64(0.047619047619047616 * (x ^ 7.0))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.85) tmp = x * (2.0 * (pi ^ -0.5)); else tmp = (pi ^ -0.5) * (0.047619047619047616 * (x ^ 7.0)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.85], N[(x * N[(2.0 * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Power[Pi, -0.5], $MachinePrecision] * N[(0.047619047619047616 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.85:\\
\;\;\;\;x \cdot \left(2 \cdot {\pi}^{-0.5}\right)\\
\mathbf{else}:\\
\;\;\;\;{\pi}^{-0.5} \cdot \left(0.047619047619047616 \cdot {x}^{7}\right)\\
\end{array}
\end{array}
if x < 1.8500000000000001Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 98.8%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt98.8%
Simplified98.8%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
associate-*r*70.6%
rem-exp-log70.6%
exp-neg70.6%
unpow1/270.6%
exp-prod70.6%
distribute-lft-neg-out70.6%
distribute-rgt-neg-in70.6%
metadata-eval70.6%
exp-to-pow70.6%
Simplified70.6%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
fabs-mul70.6%
unpow-170.6%
metadata-eval70.6%
pow-sqr70.6%
rem-sqrt-square70.6%
metadata-eval70.6%
pow-sqr70.6%
fabs-sqr70.6%
pow-sqr70.6%
metadata-eval70.6%
fabs-mul70.6%
associate-*r*70.6%
rem-square-sqrt34.2%
fabs-sqr34.2%
Simplified36.1%
if 1.8500000000000001 < x Initial program 99.8%
Simplified99.4%
Taylor expanded in x around 0 99.0%
*-commutative99.0%
*-commutative99.0%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt99.0%
*-commutative99.0%
associate-*l*99.0%
Simplified99.0%
Taylor expanded in x around inf 34.1%
associate-*r*34.1%
*-commutative34.1%
rem-exp-log34.1%
exp-neg34.1%
unpow1/234.1%
exp-prod34.1%
distribute-lft-neg-out34.1%
distribute-rgt-neg-in34.1%
metadata-eval34.1%
exp-to-pow34.1%
Simplified34.1%
add-sqr-sqrt3.9%
fabs-sqr3.9%
add-sqr-sqrt4.1%
expm1-log1p-u4.0%
expm1-undefine3.9%
associate-*r*3.9%
Applied egg-rr3.9%
log1p-undefine3.9%
rem-exp-log4.0%
+-commutative4.0%
associate--l+4.1%
metadata-eval4.1%
+-commutative4.1%
+-lft-identity4.1%
*-commutative4.1%
associate-*r*4.1%
*-commutative4.1%
associate-*l*4.1%
pow-plus4.1%
metadata-eval4.1%
Simplified4.1%
(FPCore (x) :precision binary64 (if (<= x 1.75) (* x (* 2.0 (pow PI -0.5))) (* 0.6666666666666666 (* (pow PI -0.5) (pow x 3.0)))))
double code(double x) {
double tmp;
if (x <= 1.75) {
tmp = x * (2.0 * pow(((double) M_PI), -0.5));
} else {
tmp = 0.6666666666666666 * (pow(((double) M_PI), -0.5) * pow(x, 3.0));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.75) {
tmp = x * (2.0 * Math.pow(Math.PI, -0.5));
} else {
tmp = 0.6666666666666666 * (Math.pow(Math.PI, -0.5) * Math.pow(x, 3.0));
}
return tmp;
}
def code(x): tmp = 0 if x <= 1.75: tmp = x * (2.0 * math.pow(math.pi, -0.5)) else: tmp = 0.6666666666666666 * (math.pow(math.pi, -0.5) * math.pow(x, 3.0)) return tmp
function code(x) tmp = 0.0 if (x <= 1.75) tmp = Float64(x * Float64(2.0 * (pi ^ -0.5))); else tmp = Float64(0.6666666666666666 * Float64((pi ^ -0.5) * (x ^ 3.0))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 1.75) tmp = x * (2.0 * (pi ^ -0.5)); else tmp = 0.6666666666666666 * ((pi ^ -0.5) * (x ^ 3.0)); end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 1.75], N[(x * N[(2.0 * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(0.6666666666666666 * N[(N[Power[Pi, -0.5], $MachinePrecision] * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.75:\\
\;\;\;\;x \cdot \left(2 \cdot {\pi}^{-0.5}\right)\\
\mathbf{else}:\\
\;\;\;\;0.6666666666666666 \cdot \left({\pi}^{-0.5} \cdot {x}^{3}\right)\\
\end{array}
\end{array}
if x < 1.75Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 98.8%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt98.8%
Simplified98.8%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
associate-*r*70.6%
rem-exp-log70.6%
exp-neg70.6%
unpow1/270.6%
exp-prod70.6%
distribute-lft-neg-out70.6%
distribute-rgt-neg-in70.6%
metadata-eval70.6%
exp-to-pow70.6%
Simplified70.6%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
fabs-mul70.6%
unpow-170.6%
metadata-eval70.6%
pow-sqr70.6%
rem-sqrt-square70.6%
metadata-eval70.6%
pow-sqr70.6%
fabs-sqr70.6%
pow-sqr70.6%
metadata-eval70.6%
fabs-mul70.6%
associate-*r*70.6%
rem-square-sqrt34.2%
fabs-sqr34.2%
Simplified36.1%
if 1.75 < x Initial program 99.8%
Simplified99.4%
Taylor expanded in x around inf 26.3%
*-commutative26.3%
unpow226.3%
rem-square-sqrt2.3%
fabs-sqr2.3%
rem-square-sqrt26.3%
unpow326.3%
Simplified26.3%
add-sqr-sqrt3.9%
fabs-sqr3.9%
add-sqr-sqrt4.2%
*-commutative4.2%
inv-pow4.2%
sqrt-pow14.2%
metadata-eval4.2%
Applied egg-rr4.2%
Final simplification36.1%
(FPCore (x) :precision binary64 (* x (* 2.0 (pow PI -0.5))))
double code(double x) {
return x * (2.0 * pow(((double) M_PI), -0.5));
}
public static double code(double x) {
return x * (2.0 * Math.pow(Math.PI, -0.5));
}
def code(x): return x * (2.0 * math.pow(math.pi, -0.5))
function code(x) return Float64(x * Float64(2.0 * (pi ^ -0.5))) end
function tmp = code(x) tmp = x * (2.0 * (pi ^ -0.5)); end
code[x_] := N[(x * N[(2.0 * N[Power[Pi, -0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(2 \cdot {\pi}^{-0.5}\right)
\end{array}
Initial program 99.8%
Simplified99.8%
Taylor expanded in x around 0 98.8%
rem-square-sqrt34.3%
fabs-sqr34.3%
rem-square-sqrt98.8%
Simplified98.8%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
associate-*r*70.6%
rem-exp-log70.6%
exp-neg70.6%
unpow1/270.6%
exp-prod70.6%
distribute-lft-neg-out70.6%
distribute-rgt-neg-in70.6%
metadata-eval70.6%
exp-to-pow70.6%
Simplified70.6%
Taylor expanded in x around 0 70.6%
*-commutative70.6%
fabs-mul70.6%
unpow-170.6%
metadata-eval70.6%
pow-sqr70.6%
rem-sqrt-square70.6%
metadata-eval70.6%
pow-sqr70.6%
fabs-sqr70.6%
pow-sqr70.6%
metadata-eval70.6%
fabs-mul70.6%
associate-*r*70.6%
rem-square-sqrt34.2%
fabs-sqr34.2%
Simplified36.1%
herbie shell --seed 2024125
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))