
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (fabs x) (fabs x)) (fabs x)))
(t_1 (* (* t_0 (fabs x)) (fabs x))))
(fabs
(*
(/ 1.0 (sqrt PI))
(+
(+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) t_0)) (* (/ 1.0 5.0) t_1))
(* (/ 1.0 21.0) (* (* t_1 (fabs x)) (fabs x))))))))double code(double x) {
double t_0 = (fabs(x) * fabs(x)) * fabs(x);
double t_1 = (t_0 * fabs(x)) * fabs(x);
return fabs(((1.0 / sqrt(((double) M_PI))) * ((((2.0 * fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * fabs(x)) * fabs(x))))));
}
public static double code(double x) {
double t_0 = (Math.abs(x) * Math.abs(x)) * Math.abs(x);
double t_1 = (t_0 * Math.abs(x)) * Math.abs(x);
return Math.abs(((1.0 / Math.sqrt(Math.PI)) * ((((2.0 * Math.abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * Math.abs(x)) * Math.abs(x))))));
}
def code(x): t_0 = (math.fabs(x) * math.fabs(x)) * math.fabs(x) t_1 = (t_0 * math.fabs(x)) * math.fabs(x) return math.fabs(((1.0 / math.sqrt(math.pi)) * ((((2.0 * math.fabs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * math.fabs(x)) * math.fabs(x))))))
function code(x) t_0 = Float64(Float64(abs(x) * abs(x)) * abs(x)) t_1 = Float64(Float64(t_0 * abs(x)) * abs(x)) return abs(Float64(Float64(1.0 / sqrt(pi)) * Float64(Float64(Float64(Float64(2.0 * abs(x)) + Float64(Float64(2.0 / 3.0) * t_0)) + Float64(Float64(1.0 / 5.0) * t_1)) + Float64(Float64(1.0 / 21.0) * Float64(Float64(t_1 * abs(x)) * abs(x)))))) end
function tmp = code(x) t_0 = (abs(x) * abs(x)) * abs(x); t_1 = (t_0 * abs(x)) * abs(x); tmp = abs(((1.0 / sqrt(pi)) * ((((2.0 * abs(x)) + ((2.0 / 3.0) * t_0)) + ((1.0 / 5.0) * t_1)) + ((1.0 / 21.0) * ((t_1 * abs(x)) * abs(x)))))); end
code[x_] := Block[{t$95$0 = N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(N[(N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] + N[(N[(2.0 / 3.0), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 5.0), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision] + N[(N[(1.0 / 21.0), $MachinePrecision] * N[(N[(t$95$1 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
t_0 := \left(\left|x\right| \cdot \left|x\right|\right) \cdot \left|x\right|\\
t_1 := \left(t\_0 \cdot \left|x\right|\right) \cdot \left|x\right|\\
\left|\frac{1}{\sqrt{\pi}} \cdot \left(\left(\left(2 \cdot \left|x\right| + \frac{2}{3} \cdot t\_0\right) + \frac{1}{5} \cdot t\_1\right) + \frac{1}{21} \cdot \left(\left(t\_1 \cdot \left|x\right|\right) \cdot \left|x\right|\right)\right)\right|
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* (* x x) x) x)))
(fabs
(*
(/ 1.0 (sqrt PI))
(fma
(* t_0 (fabs x))
(* (* x x) 0.047619047619047616)
(fma
(* 0.2 (fabs x))
t_0
(* (fabs x) (fma (* x x) 0.6666666666666666 2.0))))))))double code(double x) {
double t_0 = ((x * x) * x) * x;
return fabs(((1.0 / sqrt(((double) M_PI))) * fma((t_0 * fabs(x)), ((x * x) * 0.047619047619047616), fma((0.2 * fabs(x)), t_0, (fabs(x) * fma((x * x), 0.6666666666666666, 2.0))))));
}
function code(x) t_0 = Float64(Float64(Float64(x * x) * x) * x) return abs(Float64(Float64(1.0 / sqrt(pi)) * fma(Float64(t_0 * abs(x)), Float64(Float64(x * x) * 0.047619047619047616), fma(Float64(0.2 * abs(x)), t_0, Float64(abs(x) * fma(Float64(x * x), 0.6666666666666666, 2.0)))))) end
code[x_] := Block[{t$95$0 = N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]}, N[Abs[N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[(N[(t$95$0 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.047619047619047616), $MachinePrecision] + N[(N[(0.2 * N[Abs[x], $MachinePrecision]), $MachinePrecision] * t$95$0 + N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
t_0 := \left(\left(x \cdot x\right) \cdot x\right) \cdot x\\
\left|\frac{1}{\sqrt{\pi}} \cdot \mathsf{fma}\left(t\_0 \cdot \left|x\right|, \left(x \cdot x\right) \cdot 0.047619047619047616, \mathsf{fma}\left(0.2 \cdot \left|x\right|, t\_0, \left|x\right| \cdot \mathsf{fma}\left(x \cdot x, 0.6666666666666666, 2\right)\right)\right)\right|
\end{array}
Initial program 99.8%
Applied rewrites99.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (* (* x x) x)))
(*
(/ 1.0 (sqrt PI))
(fabs
(fma
(fabs x)
(fma (* 0.2 (* x x)) (* x x) (* (* t_0 t_0) 0.047619047619047616))
(* (fabs x) (fma (* x x) 0.6666666666666666 2.0)))))))double code(double x) {
double t_0 = (x * x) * x;
return (1.0 / sqrt(((double) M_PI))) * fabs(fma(fabs(x), fma((0.2 * (x * x)), (x * x), ((t_0 * t_0) * 0.047619047619047616)), (fabs(x) * fma((x * x), 0.6666666666666666, 2.0))));
}
function code(x) t_0 = Float64(Float64(x * x) * x) return Float64(Float64(1.0 / sqrt(pi)) * abs(fma(abs(x), fma(Float64(0.2 * Float64(x * x)), Float64(x * x), Float64(Float64(t_0 * t_0) * 0.047619047619047616)), Float64(abs(x) * fma(Float64(x * x), 0.6666666666666666, 2.0))))) end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]}, N[(N[(1.0 / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * N[Abs[N[(N[Abs[x], $MachinePrecision] * N[(N[(0.2 * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(x * x), $MachinePrecision] + N[(N[(t$95$0 * t$95$0), $MachinePrecision] * 0.047619047619047616), $MachinePrecision]), $MachinePrecision] + N[(N[Abs[x], $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * 0.6666666666666666 + 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot x\\
\frac{1}{\sqrt{\pi}} \cdot \left|\mathsf{fma}\left(\left|x\right|, \mathsf{fma}\left(0.2 \cdot \left(x \cdot x\right), x \cdot x, \left(t\_0 \cdot t\_0\right) \cdot 0.047619047619047616\right), \left|x\right| \cdot \mathsf{fma}\left(x \cdot x, 0.6666666666666666, 2\right)\right)\right|
\end{array}
Initial program 99.8%
Applied rewrites99.8%
(FPCore (x)
:precision binary64
(*
(fabs
(fma
(* (fma (* 0.047619047619047616 x) x 0.2) (* (* (* (fabs x) x) x) x))
x
(* (fma (* 0.6666666666666666 x) x 2.0) (fabs x))))
0.5641895835477563))double code(double x) {
return fabs(fma((fma((0.047619047619047616 * x), x, 0.2) * (((fabs(x) * x) * x) * x)), x, (fma((0.6666666666666666 * x), x, 2.0) * fabs(x)))) * 0.5641895835477563;
}
function code(x) return Float64(abs(fma(Float64(fma(Float64(0.047619047619047616 * x), x, 0.2) * Float64(Float64(Float64(abs(x) * x) * x) * x)), x, Float64(fma(Float64(0.6666666666666666 * x), x, 2.0) * abs(x)))) * 0.5641895835477563) end
code[x_] := N[(N[Abs[N[(N[(N[(N[(0.047619047619047616 * x), $MachinePrecision] * x + 0.2), $MachinePrecision] * N[(N[(N[(N[Abs[x], $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * x + N[(N[(N[(0.6666666666666666 * x), $MachinePrecision] * x + 2.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * 0.5641895835477563), $MachinePrecision]
\left|\mathsf{fma}\left(\mathsf{fma}\left(0.047619047619047616 \cdot x, x, 0.2\right) \cdot \left(\left(\left(\left|x\right| \cdot x\right) \cdot x\right) \cdot x\right), x, \mathsf{fma}\left(0.6666666666666666 \cdot x, x, 2\right) \cdot \left|x\right|\right)\right| \cdot 0.5641895835477563
Initial program 99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Evaluated real constant99.8%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.8
Applied rewrites99.8%
(FPCore (x)
:precision binary64
(*
0.5641895835477563
(fabs
(fma
(fma (* 0.047619047619047616 x) x 0.2)
(* (* (* (fabs x) x) x) (* x x))
(* (fma (* 0.6666666666666666 x) x 2.0) (fabs x))))))double code(double x) {
return 0.5641895835477563 * fabs(fma(fma((0.047619047619047616 * x), x, 0.2), (((fabs(x) * x) * x) * (x * x)), (fma((0.6666666666666666 * x), x, 2.0) * fabs(x))));
}
function code(x) return Float64(0.5641895835477563 * abs(fma(fma(Float64(0.047619047619047616 * x), x, 0.2), Float64(Float64(Float64(abs(x) * x) * x) * Float64(x * x)), Float64(fma(Float64(0.6666666666666666 * x), x, 2.0) * abs(x))))) end
code[x_] := N[(0.5641895835477563 * N[Abs[N[(N[(N[(0.047619047619047616 * x), $MachinePrecision] * x + 0.2), $MachinePrecision] * N[(N[(N[(N[Abs[x], $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(0.6666666666666666 * x), $MachinePrecision] * x + 2.0), $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
0.5641895835477563 \cdot \left|\mathsf{fma}\left(\mathsf{fma}\left(0.047619047619047616 \cdot x, x, 0.2\right), \left(\left(\left|x\right| \cdot x\right) \cdot x\right) \cdot \left(x \cdot x\right), \mathsf{fma}\left(0.6666666666666666 \cdot x, x, 2\right) \cdot \left|x\right|\right)\right|
Initial program 99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Evaluated real constant99.8%
lift-fma.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
*-commutativeN/A
*-commutativeN/A
lift-fabs.f64N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites99.8%
(FPCore (x)
:precision binary64
(*
0.5641895835477563
(fabs
(fma
(* (* (fabs x) x) (* (* x x) x))
(fma (* 0.047619047619047616 x) x 0.2)
(* 2.0 (fabs x))))))double code(double x) {
return 0.5641895835477563 * fabs(fma(((fabs(x) * x) * ((x * x) * x)), fma((0.047619047619047616 * x), x, 0.2), (2.0 * fabs(x))));
}
function code(x) return Float64(0.5641895835477563 * abs(fma(Float64(Float64(abs(x) * x) * Float64(Float64(x * x) * x)), fma(Float64(0.047619047619047616 * x), x, 0.2), Float64(2.0 * abs(x))))) end
code[x_] := N[(0.5641895835477563 * N[Abs[N[(N[(N[(N[Abs[x], $MachinePrecision] * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * N[(N[(0.047619047619047616 * x), $MachinePrecision] * x + 0.2), $MachinePrecision] + N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
0.5641895835477563 \cdot \left|\mathsf{fma}\left(\left(\left|x\right| \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot x\right), \mathsf{fma}\left(0.047619047619047616 \cdot x, x, 0.2\right), 2 \cdot \left|x\right|\right)\right|
Initial program 99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Evaluated real constant99.8%
Taylor expanded in x around 0
Applied rewrites99.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (fabs (fabs x))))
(if (<= (fabs x) 0.32)
(fabs (* t_0 1.1283791670955126))
(fabs (/ (* (pow t_0 7.0) 0.047619047619047616) (sqrt PI))))))double code(double x) {
double t_0 = fabs(fabs(x));
double tmp;
if (fabs(x) <= 0.32) {
tmp = fabs((t_0 * 1.1283791670955126));
} else {
tmp = fabs(((pow(t_0, 7.0) * 0.047619047619047616) / sqrt(((double) M_PI))));
}
return tmp;
}
public static double code(double x) {
double t_0 = Math.abs(Math.abs(x));
double tmp;
if (Math.abs(x) <= 0.32) {
tmp = Math.abs((t_0 * 1.1283791670955126));
} else {
tmp = Math.abs(((Math.pow(t_0, 7.0) * 0.047619047619047616) / Math.sqrt(Math.PI)));
}
return tmp;
}
def code(x): t_0 = math.fabs(math.fabs(x)) tmp = 0 if math.fabs(x) <= 0.32: tmp = math.fabs((t_0 * 1.1283791670955126)) else: tmp = math.fabs(((math.pow(t_0, 7.0) * 0.047619047619047616) / math.sqrt(math.pi))) return tmp
function code(x) t_0 = abs(abs(x)) tmp = 0.0 if (abs(x) <= 0.32) tmp = abs(Float64(t_0 * 1.1283791670955126)); else tmp = abs(Float64(Float64((t_0 ^ 7.0) * 0.047619047619047616) / sqrt(pi))); end return tmp end
function tmp_2 = code(x) t_0 = abs(abs(x)); tmp = 0.0; if (abs(x) <= 0.32) tmp = abs((t_0 * 1.1283791670955126)); else tmp = abs((((t_0 ^ 7.0) * 0.047619047619047616) / sqrt(pi))); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Abs[N[Abs[x], $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.32], N[Abs[N[(t$95$0 * 1.1283791670955126), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[(N[Power[t$95$0, 7.0], $MachinePrecision] * 0.047619047619047616), $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
t_0 := \left|\left|x\right|\right|\\
\mathbf{if}\;\left|x\right| \leq 0.32:\\
\;\;\;\;\left|t\_0 \cdot 1.1283791670955126\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\frac{{t\_0}^{7} \cdot 0.047619047619047616}{\sqrt{\pi}}\right|\\
\end{array}
if x < 0.320000000000000007Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6467.2
Applied rewrites67.2%
Evaluated real constant67.5%
lift-*.f64N/A
count-2-revN/A
lift-/.f64N/A
mult-flipN/A
lift-/.f64N/A
mult-flipN/A
distribute-lft-outN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval67.7
Applied rewrites67.7%
if 0.320000000000000007 < x Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around inf
lower-*.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6436.9
Applied rewrites36.9%
Applied rewrites36.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (fabs (fabs x))))
(if (<= (fabs x) 0.32)
(fabs (* t_0 1.1283791670955126))
(fabs (* (/ (pow t_0 7.0) (sqrt PI)) 0.047619047619047616)))))double code(double x) {
double t_0 = fabs(fabs(x));
double tmp;
if (fabs(x) <= 0.32) {
tmp = fabs((t_0 * 1.1283791670955126));
} else {
tmp = fabs(((pow(t_0, 7.0) / sqrt(((double) M_PI))) * 0.047619047619047616));
}
return tmp;
}
public static double code(double x) {
double t_0 = Math.abs(Math.abs(x));
double tmp;
if (Math.abs(x) <= 0.32) {
tmp = Math.abs((t_0 * 1.1283791670955126));
} else {
tmp = Math.abs(((Math.pow(t_0, 7.0) / Math.sqrt(Math.PI)) * 0.047619047619047616));
}
return tmp;
}
def code(x): t_0 = math.fabs(math.fabs(x)) tmp = 0 if math.fabs(x) <= 0.32: tmp = math.fabs((t_0 * 1.1283791670955126)) else: tmp = math.fabs(((math.pow(t_0, 7.0) / math.sqrt(math.pi)) * 0.047619047619047616)) return tmp
function code(x) t_0 = abs(abs(x)) tmp = 0.0 if (abs(x) <= 0.32) tmp = abs(Float64(t_0 * 1.1283791670955126)); else tmp = abs(Float64(Float64((t_0 ^ 7.0) / sqrt(pi)) * 0.047619047619047616)); end return tmp end
function tmp_2 = code(x) t_0 = abs(abs(x)); tmp = 0.0; if (abs(x) <= 0.32) tmp = abs((t_0 * 1.1283791670955126)); else tmp = abs((((t_0 ^ 7.0) / sqrt(pi)) * 0.047619047619047616)); end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[Abs[N[Abs[x], $MachinePrecision]], $MachinePrecision]}, If[LessEqual[N[Abs[x], $MachinePrecision], 0.32], N[Abs[N[(t$95$0 * 1.1283791670955126), $MachinePrecision]], $MachinePrecision], N[Abs[N[(N[(N[Power[t$95$0, 7.0], $MachinePrecision] / N[Sqrt[Pi], $MachinePrecision]), $MachinePrecision] * 0.047619047619047616), $MachinePrecision]], $MachinePrecision]]]
\begin{array}{l}
t_0 := \left|\left|x\right|\right|\\
\mathbf{if}\;\left|x\right| \leq 0.32:\\
\;\;\;\;\left|t\_0 \cdot 1.1283791670955126\right|\\
\mathbf{else}:\\
\;\;\;\;\left|\frac{{t\_0}^{7}}{\sqrt{\pi}} \cdot 0.047619047619047616\right|\\
\end{array}
if x < 0.320000000000000007Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6467.2
Applied rewrites67.2%
Evaluated real constant67.5%
lift-*.f64N/A
count-2-revN/A
lift-/.f64N/A
mult-flipN/A
lift-/.f64N/A
mult-flipN/A
distribute-lft-outN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval67.7
Applied rewrites67.7%
if 0.320000000000000007 < x Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around inf
lower-*.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6436.9
Applied rewrites36.9%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6436.9
lift-*.f64N/A
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
lift-fabs.f64N/A
rem-sqrt-square-revN/A
pow1/2N/A
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
sqrt-pow2N/A
rem-sqrt-square-revN/A
lift-fabs.f64N/A
lift-pow.f6436.9
Applied rewrites36.9%
(FPCore (x) :precision binary64 (fabs (/ (fma (pow (fabs x) 7.0) -0.047619047619047616 (* -2.0 (fabs x))) -1.772453850905516)))
double code(double x) {
return fabs((fma(pow(fabs(x), 7.0), -0.047619047619047616, (-2.0 * fabs(x))) / -1.772453850905516));
}
function code(x) return abs(Float64(fma((abs(x) ^ 7.0), -0.047619047619047616, Float64(-2.0 * abs(x))) / -1.772453850905516)) end
code[x_] := N[Abs[N[(N[(N[Power[N[Abs[x], $MachinePrecision], 7.0], $MachinePrecision] * -0.047619047619047616 + N[(-2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / -1.772453850905516), $MachinePrecision]], $MachinePrecision]
\left|\frac{\mathsf{fma}\left({\left(\left|x\right|\right)}^{7}, -0.047619047619047616, -2 \cdot \left|x\right|\right)}{-1.772453850905516}\right|
Initial program 99.8%
Applied rewrites99.4%
Taylor expanded in x around 0
lower--.f64N/A
lower-*.f64N/A
lower-pow.f64N/A
lower-fabs.f64N/A
lower-*.f64N/A
lower-fabs.f6498.4
Applied rewrites98.4%
lift--.f64N/A
sub-flipN/A
lift-*.f64N/A
*-commutativeN/A
lift-pow.f64N/A
lift-fabs.f64N/A
rem-sqrt-square-revN/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow-prod-upN/A
metadata-evalN/A
lift-pow.f64N/A
pow1/2N/A
rem-sqrt-square-revN/A
lift-fabs.f64N/A
lift-*.f64N/A
lower-fma.f64N/A
Applied rewrites98.4%
Evaluated real constant98.6%
(FPCore (x) :precision binary64 (if (<= (fabs x) 5e-25) (fabs (* (fabs (fabs x)) 1.1283791670955126)) (fabs (* 2.0 (sqrt (/ (* (fabs x) (fabs x)) PI))))))
double code(double x) {
double tmp;
if (fabs(x) <= 5e-25) {
tmp = fabs((fabs(fabs(x)) * 1.1283791670955126));
} else {
tmp = fabs((2.0 * sqrt(((fabs(x) * fabs(x)) / ((double) M_PI)))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if (Math.abs(x) <= 5e-25) {
tmp = Math.abs((Math.abs(Math.abs(x)) * 1.1283791670955126));
} else {
tmp = Math.abs((2.0 * Math.sqrt(((Math.abs(x) * Math.abs(x)) / Math.PI))));
}
return tmp;
}
def code(x): tmp = 0 if math.fabs(x) <= 5e-25: tmp = math.fabs((math.fabs(math.fabs(x)) * 1.1283791670955126)) else: tmp = math.fabs((2.0 * math.sqrt(((math.fabs(x) * math.fabs(x)) / math.pi)))) return tmp
function code(x) tmp = 0.0 if (abs(x) <= 5e-25) tmp = abs(Float64(abs(abs(x)) * 1.1283791670955126)); else tmp = abs(Float64(2.0 * sqrt(Float64(Float64(abs(x) * abs(x)) / pi)))); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (abs(x) <= 5e-25) tmp = abs((abs(abs(x)) * 1.1283791670955126)); else tmp = abs((2.0 * sqrt(((abs(x) * abs(x)) / pi)))); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[Abs[x], $MachinePrecision], 5e-25], N[Abs[N[(N[Abs[N[Abs[x], $MachinePrecision]], $MachinePrecision] * 1.1283791670955126), $MachinePrecision]], $MachinePrecision], N[Abs[N[(2.0 * N[Sqrt[N[(N[(N[Abs[x], $MachinePrecision] * N[Abs[x], $MachinePrecision]), $MachinePrecision] / Pi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\left|x\right| \leq 5 \cdot 10^{-25}:\\
\;\;\;\;\left|\left|\left|x\right|\right| \cdot 1.1283791670955126\right|\\
\mathbf{else}:\\
\;\;\;\;\left|2 \cdot \sqrt{\frac{\left|x\right| \cdot \left|x\right|}{\pi}}\right|\\
\end{array}
if x < 4.99999999999999962e-25Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6467.2
Applied rewrites67.2%
Evaluated real constant67.5%
lift-*.f64N/A
count-2-revN/A
lift-/.f64N/A
mult-flipN/A
lift-/.f64N/A
mult-flipN/A
distribute-lft-outN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval67.7
Applied rewrites67.7%
if 4.99999999999999962e-25 < x Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6467.2
Applied rewrites67.2%
lift-/.f64N/A
lift-fabs.f64N/A
rem-sqrt-square-revN/A
lift-sqrt.f64N/A
sqrt-undivN/A
lower-sqrt.f64N/A
lower-/.f64N/A
lift-*.f6453.0
Applied rewrites53.0%
(FPCore (x) :precision binary64 (* 0.5641895835477563 (fabs (* 2.0 (fabs x)))))
double code(double x) {
return 0.5641895835477563 * fabs((2.0 * fabs(x)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 0.5641895835477563d0 * abs((2.0d0 * abs(x)))
end function
public static double code(double x) {
return 0.5641895835477563 * Math.abs((2.0 * Math.abs(x)));
}
def code(x): return 0.5641895835477563 * math.fabs((2.0 * math.fabs(x)))
function code(x) return Float64(0.5641895835477563 * abs(Float64(2.0 * abs(x)))) end
function tmp = code(x) tmp = 0.5641895835477563 * abs((2.0 * abs(x))); end
code[x_] := N[(0.5641895835477563 * N[Abs[N[(2.0 * N[Abs[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
0.5641895835477563 \cdot \left|2 \cdot \left|x\right|\right|
Initial program 99.8%
Applied rewrites99.8%
Applied rewrites99.8%
Evaluated real constant99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-fabs.f6467.7
Applied rewrites67.7%
(FPCore (x) :precision binary64 (fabs (* (fabs x) 1.1283791670955126)))
double code(double x) {
return fabs((fabs(x) * 1.1283791670955126));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = abs((abs(x) * 1.1283791670955126d0))
end function
public static double code(double x) {
return Math.abs((Math.abs(x) * 1.1283791670955126));
}
def code(x): return math.fabs((math.fabs(x) * 1.1283791670955126))
function code(x) return abs(Float64(abs(x) * 1.1283791670955126)) end
function tmp = code(x) tmp = abs((abs(x) * 1.1283791670955126)); end
code[x_] := N[Abs[N[(N[Abs[x], $MachinePrecision] * 1.1283791670955126), $MachinePrecision]], $MachinePrecision]
\left|\left|x\right| \cdot 1.1283791670955126\right|
Initial program 99.8%
Applied rewrites99.8%
Taylor expanded in x around 0
lower-*.f64N/A
lower-/.f64N/A
lower-fabs.f64N/A
lower-sqrt.f64N/A
lower-PI.f6467.2
Applied rewrites67.2%
Evaluated real constant67.5%
lift-*.f64N/A
count-2-revN/A
lift-/.f64N/A
mult-flipN/A
lift-/.f64N/A
mult-flipN/A
distribute-lft-outN/A
lower-*.f64N/A
metadata-evalN/A
metadata-evalN/A
metadata-eval67.7
Applied rewrites67.7%
herbie shell --seed 2025173
(FPCore (x)
:name "Jmat.Real.erfi, branch x less than or equal to 0.5"
:precision binary64
:pre (<= x 0.5)
(fabs (* (/ 1.0 (sqrt PI)) (+ (+ (+ (* 2.0 (fabs x)) (* (/ 2.0 3.0) (* (* (fabs x) (fabs x)) (fabs x)))) (* (/ 1.0 5.0) (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)))) (* (/ 1.0 21.0) (* (* (* (* (* (* (fabs x) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)) (fabs x)))))))