
(FPCore (x.re x.im) :precision binary64 (+ (* (- (* x.re x.re) (* x.im x.im)) x.im) (* (+ (* x.re x.im) (* x.im x.re)) x.re)))
double code(double x_46_re, double x_46_im) {
return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re);
}
real(8) function code(x_46re, x_46im)
real(8), intent (in) :: x_46re
real(8), intent (in) :: x_46im
code = (((x_46re * x_46re) - (x_46im * x_46im)) * x_46im) + (((x_46re * x_46im) + (x_46im * x_46re)) * x_46re)
end function
public static double code(double x_46_re, double x_46_im) {
return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re);
}
def code(x_46_re, x_46_im): return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re)
function code(x_46_re, x_46_im) return Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im * x_46_im)) * x_46_im) + Float64(Float64(Float64(x_46_re * x_46_im) + Float64(x_46_im * x_46_re)) * x_46_re)) end
function tmp = code(x_46_re, x_46_im) tmp = (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re); end
code[x$46$re_, x$46$im_] := N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im * x$46$im), $MachinePrecision]), $MachinePrecision] * x$46$im), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im), $MachinePrecision] + N[(x$46$im * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x.re \cdot x.re - x.im \cdot x.im\right) \cdot x.im + \left(x.re \cdot x.im + x.im \cdot x.re\right) \cdot x.re
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x.re x.im) :precision binary64 (+ (* (- (* x.re x.re) (* x.im x.im)) x.im) (* (+ (* x.re x.im) (* x.im x.re)) x.re)))
double code(double x_46_re, double x_46_im) {
return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re);
}
real(8) function code(x_46re, x_46im)
real(8), intent (in) :: x_46re
real(8), intent (in) :: x_46im
code = (((x_46re * x_46re) - (x_46im * x_46im)) * x_46im) + (((x_46re * x_46im) + (x_46im * x_46re)) * x_46re)
end function
public static double code(double x_46_re, double x_46_im) {
return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re);
}
def code(x_46_re, x_46_im): return (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re)
function code(x_46_re, x_46_im) return Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im * x_46_im)) * x_46_im) + Float64(Float64(Float64(x_46_re * x_46_im) + Float64(x_46_im * x_46_re)) * x_46_re)) end
function tmp = code(x_46_re, x_46_im) tmp = (((x_46_re * x_46_re) - (x_46_im * x_46_im)) * x_46_im) + (((x_46_re * x_46_im) + (x_46_im * x_46_re)) * x_46_re); end
code[x$46$re_, x$46$im_] := N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im * x$46$im), $MachinePrecision]), $MachinePrecision] * x$46$im), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im), $MachinePrecision] + N[(x$46$im * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x.re \cdot x.re - x.im \cdot x.im\right) \cdot x.im + \left(x.re \cdot x.im + x.im \cdot x.re\right) \cdot x.re
\end{array}
x.im\_m = (fabs.f64 x.im)
x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im)
(FPCore (x.im_s x.re x.im_m)
:precision binary64
(let* ((t_0
(+
(* (- (* x.re x.re) (* x.im_m x.im_m)) x.im_m)
(* (+ (* x.re x.im_m) (* x.im_m x.re)) x.re))))
(*
x.im_s
(if (<= t_0 1e-307)
(* (- x.im_m) (fma x.im_m x.im_m (* -3.0 (* x.re x.re))))
(if (<= t_0 INFINITY)
(* (* x.re x.im_m) (* 3.0 x.re))
(fma (- x.re x.im_m) (* x.im_m (+ x.re x.im_m)) (* 2.0 x.im_m)))))))x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if (t_0 <= 1e-307) {
tmp = -x_46_im_m * fma(x_46_im_m, x_46_im_m, (-3.0 * (x_46_re * x_46_re)));
} else if (t_0 <= ((double) INFINITY)) {
tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re);
} else {
tmp = fma((x_46_re - x_46_im_m), (x_46_im_m * (x_46_re + x_46_im_m)), (2.0 * x_46_im_m));
}
return x_46_im_s * tmp;
}
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) t_0 = Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im_m * x_46_im_m)) * x_46_im_m) + Float64(Float64(Float64(x_46_re * x_46_im_m) + Float64(x_46_im_m * x_46_re)) * x_46_re)) tmp = 0.0 if (t_0 <= 1e-307) tmp = Float64(Float64(-x_46_im_m) * fma(x_46_im_m, x_46_im_m, Float64(-3.0 * Float64(x_46_re * x_46_re)))); elseif (t_0 <= Inf) tmp = Float64(Float64(x_46_re * x_46_im_m) * Float64(3.0 * x_46_re)); else tmp = fma(Float64(x_46_re - x_46_im_m), Float64(x_46_im_m * Float64(x_46_re + x_46_im_m)), Float64(2.0 * x_46_im_m)); end return Float64(x_46_im_s * tmp) end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := Block[{t$95$0 = N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$im$95$m), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] + N[(x$46$im$95$m * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]}, N[(x$46$im$95$s * If[LessEqual[t$95$0, 1e-307], N[((-x$46$im$95$m) * N[(x$46$im$95$m * x$46$im$95$m + N[(-3.0 * N[(x$46$re * x$46$re), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, Infinity], N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] * N[(3.0 * x$46$re), $MachinePrecision]), $MachinePrecision], N[(N[(x$46$re - x$46$im$95$m), $MachinePrecision] * N[(x$46$im$95$m * N[(x$46$re + x$46$im$95$m), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x$46$im$95$m), $MachinePrecision]), $MachinePrecision]]]), $MachinePrecision]]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
\begin{array}{l}
t_0 := \left(x.re \cdot x.re - x.im\_m \cdot x.im\_m\right) \cdot x.im\_m + \left(x.re \cdot x.im\_m + x.im\_m \cdot x.re\right) \cdot x.re\\
x.im\_s \cdot \begin{array}{l}
\mathbf{if}\;t\_0 \leq 10^{-307}:\\
\;\;\;\;\left(-x.im\_m\right) \cdot \mathsf{fma}\left(x.im\_m, x.im\_m, -3 \cdot \left(x.re \cdot x.re\right)\right)\\
\mathbf{elif}\;t\_0 \leq \infty:\\
\;\;\;\;\left(x.re \cdot x.im\_m\right) \cdot \left(3 \cdot x.re\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x.re - x.im\_m, x.im\_m \cdot \left(x.re + x.im\_m\right), 2 \cdot x.im\_m\right)\\
\end{array}
\end{array}
\end{array}
if (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < 9.99999999999999909e-308Initial program 92.9%
Taylor expanded in x.re around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
count-2-revN/A
distribute-lft-inN/A
count-2-revN/A
distribute-lft-inN/A
fp-cancel-sign-sub-invN/A
mul-1-negN/A
cube-multN/A
unpow2N/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-*.f64N/A
lower-neg.f64N/A
distribute-lft1-inN/A
Applied rewrites92.9%
if 9.99999999999999909e-308 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < +inf.0Initial program 92.8%
Taylor expanded in x.re around inf
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
distribute-rgt1-inN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f6443.4
Applied rewrites43.4%
Applied rewrites43.5%
if +inf.0 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) Initial program 0.0%
lift-*.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower--.f6425.0
Applied rewrites25.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-+.f6425.0
Applied rewrites25.0%
Applied rewrites100.0%
x.im\_m = (fabs.f64 x.im)
x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im)
(FPCore (x.im_s x.re x.im_m)
:precision binary64
(let* ((t_0
(+
(* (- (* x.re x.re) (* x.im_m x.im_m)) x.im_m)
(* (+ (* x.re x.im_m) (* x.im_m x.re)) x.re))))
(*
x.im_s
(if (<= t_0 -1e-289)
(* (- x.im_m) (* x.im_m x.im_m))
(if (<= t_0 INFINITY)
(* (* x.re x.im_m) (* 3.0 x.re))
(fma (- x.re x.im_m) (* x.im_m (+ x.re x.im_m)) (* 2.0 x.im_m)))))))x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if (t_0 <= -1e-289) {
tmp = -x_46_im_m * (x_46_im_m * x_46_im_m);
} else if (t_0 <= ((double) INFINITY)) {
tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re);
} else {
tmp = fma((x_46_re - x_46_im_m), (x_46_im_m * (x_46_re + x_46_im_m)), (2.0 * x_46_im_m));
}
return x_46_im_s * tmp;
}
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) t_0 = Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im_m * x_46_im_m)) * x_46_im_m) + Float64(Float64(Float64(x_46_re * x_46_im_m) + Float64(x_46_im_m * x_46_re)) * x_46_re)) tmp = 0.0 if (t_0 <= -1e-289) tmp = Float64(Float64(-x_46_im_m) * Float64(x_46_im_m * x_46_im_m)); elseif (t_0 <= Inf) tmp = Float64(Float64(x_46_re * x_46_im_m) * Float64(3.0 * x_46_re)); else tmp = fma(Float64(x_46_re - x_46_im_m), Float64(x_46_im_m * Float64(x_46_re + x_46_im_m)), Float64(2.0 * x_46_im_m)); end return Float64(x_46_im_s * tmp) end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := Block[{t$95$0 = N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$im$95$m), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] + N[(x$46$im$95$m * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]}, N[(x$46$im$95$s * If[LessEqual[t$95$0, -1e-289], N[((-x$46$im$95$m) * N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision], If[LessEqual[t$95$0, Infinity], N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] * N[(3.0 * x$46$re), $MachinePrecision]), $MachinePrecision], N[(N[(x$46$re - x$46$im$95$m), $MachinePrecision] * N[(x$46$im$95$m * N[(x$46$re + x$46$im$95$m), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x$46$im$95$m), $MachinePrecision]), $MachinePrecision]]]), $MachinePrecision]]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
\begin{array}{l}
t_0 := \left(x.re \cdot x.re - x.im\_m \cdot x.im\_m\right) \cdot x.im\_m + \left(x.re \cdot x.im\_m + x.im\_m \cdot x.re\right) \cdot x.re\\
x.im\_s \cdot \begin{array}{l}
\mathbf{if}\;t\_0 \leq -1 \cdot 10^{-289}:\\
\;\;\;\;\left(-x.im\_m\right) \cdot \left(x.im\_m \cdot x.im\_m\right)\\
\mathbf{elif}\;t\_0 \leq \infty:\\
\;\;\;\;\left(x.re \cdot x.im\_m\right) \cdot \left(3 \cdot x.re\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x.re - x.im\_m, x.im\_m \cdot \left(x.re + x.im\_m\right), 2 \cdot x.im\_m\right)\\
\end{array}
\end{array}
\end{array}
if (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < -1e-289Initial program 89.3%
Taylor expanded in x.re around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
count-2-revN/A
distribute-lft-inN/A
count-2-revN/A
distribute-lft-inN/A
fp-cancel-sign-sub-invN/A
mul-1-negN/A
cube-multN/A
unpow2N/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-*.f64N/A
lower-neg.f64N/A
distribute-lft1-inN/A
Applied rewrites89.2%
Taylor expanded in x.re around 0
Applied rewrites46.9%
if -1e-289 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < +inf.0Initial program 95.5%
Taylor expanded in x.re around inf
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
distribute-rgt1-inN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f6465.2
Applied rewrites65.2%
Applied rewrites65.2%
if +inf.0 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) Initial program 0.0%
lift-*.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower--.f6425.0
Applied rewrites25.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-+.f6425.0
Applied rewrites25.0%
Applied rewrites100.0%
x.im\_m = (fabs.f64 x.im)
x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im)
(FPCore (x.im_s x.re x.im_m)
:precision binary64
(let* ((t_0
(+
(* (- (* x.re x.re) (* x.im_m x.im_m)) x.im_m)
(* (+ (* x.re x.im_m) (* x.im_m x.re)) x.re))))
(*
x.im_s
(if (or (<= t_0 -1e-289) (not (<= t_0 INFINITY)))
(* (- x.im_m) (* x.im_m x.im_m))
(* (* x.re x.im_m) (* 3.0 x.re))))))x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if ((t_0 <= -1e-289) || !(t_0 <= ((double) INFINITY))) {
tmp = -x_46_im_m * (x_46_im_m * x_46_im_m);
} else {
tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re);
}
return x_46_im_s * tmp;
}
x.im\_m = Math.abs(x_46_im);
x.im\_s = Math.copySign(1.0, x_46_im);
public static double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if ((t_0 <= -1e-289) || !(t_0 <= Double.POSITIVE_INFINITY)) {
tmp = -x_46_im_m * (x_46_im_m * x_46_im_m);
} else {
tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re);
}
return x_46_im_s * tmp;
}
x.im\_m = math.fabs(x_46_im) x.im\_s = math.copysign(1.0, x_46_im) def code(x_46_im_s, x_46_re, x_46_im_m): t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re) tmp = 0 if (t_0 <= -1e-289) or not (t_0 <= math.inf): tmp = -x_46_im_m * (x_46_im_m * x_46_im_m) else: tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re) return x_46_im_s * tmp
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) t_0 = Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im_m * x_46_im_m)) * x_46_im_m) + Float64(Float64(Float64(x_46_re * x_46_im_m) + Float64(x_46_im_m * x_46_re)) * x_46_re)) tmp = 0.0 if ((t_0 <= -1e-289) || !(t_0 <= Inf)) tmp = Float64(Float64(-x_46_im_m) * Float64(x_46_im_m * x_46_im_m)); else tmp = Float64(Float64(x_46_re * x_46_im_m) * Float64(3.0 * x_46_re)); end return Float64(x_46_im_s * tmp) end
x.im\_m = abs(x_46_im); x.im\_s = sign(x_46_im) * abs(1.0); function tmp_2 = code(x_46_im_s, x_46_re, x_46_im_m) t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re); tmp = 0.0; if ((t_0 <= -1e-289) || ~((t_0 <= Inf))) tmp = -x_46_im_m * (x_46_im_m * x_46_im_m); else tmp = (x_46_re * x_46_im_m) * (3.0 * x_46_re); end tmp_2 = x_46_im_s * tmp; end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := Block[{t$95$0 = N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$im$95$m), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] + N[(x$46$im$95$m * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]}, N[(x$46$im$95$s * If[Or[LessEqual[t$95$0, -1e-289], N[Not[LessEqual[t$95$0, Infinity]], $MachinePrecision]], N[((-x$46$im$95$m) * N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] * N[(3.0 * x$46$re), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
\begin{array}{l}
t_0 := \left(x.re \cdot x.re - x.im\_m \cdot x.im\_m\right) \cdot x.im\_m + \left(x.re \cdot x.im\_m + x.im\_m \cdot x.re\right) \cdot x.re\\
x.im\_s \cdot \begin{array}{l}
\mathbf{if}\;t\_0 \leq -1 \cdot 10^{-289} \lor \neg \left(t\_0 \leq \infty\right):\\
\;\;\;\;\left(-x.im\_m\right) \cdot \left(x.im\_m \cdot x.im\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\left(x.re \cdot x.im\_m\right) \cdot \left(3 \cdot x.re\right)\\
\end{array}
\end{array}
\end{array}
if (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < -1e-289 or +inf.0 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) Initial program 69.5%
Taylor expanded in x.re around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
count-2-revN/A
distribute-lft-inN/A
count-2-revN/A
distribute-lft-inN/A
fp-cancel-sign-sub-invN/A
mul-1-negN/A
cube-multN/A
unpow2N/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-*.f64N/A
lower-neg.f64N/A
distribute-lft1-inN/A
Applied rewrites82.9%
Taylor expanded in x.re around 0
Applied rewrites53.2%
if -1e-289 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < +inf.0Initial program 95.5%
Taylor expanded in x.re around inf
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
distribute-rgt1-inN/A
metadata-evalN/A
associate-*r*N/A
*-commutativeN/A
lower-*.f64N/A
lower-*.f6465.2
Applied rewrites65.2%
Applied rewrites65.2%
Final simplification59.3%
x.im\_m = (fabs.f64 x.im)
x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im)
(FPCore (x.im_s x.re x.im_m)
:precision binary64
(let* ((t_0
(+
(* (- (* x.re x.re) (* x.im_m x.im_m)) x.im_m)
(* (+ (* x.re x.im_m) (* x.im_m x.re)) x.re))))
(*
x.im_s
(if (or (<= t_0 -1e-289) (not (<= t_0 INFINITY)))
(* (- x.im_m) (* x.im_m x.im_m))
(* (* x.im_m x.re) x.re)))))x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if ((t_0 <= -1e-289) || !(t_0 <= ((double) INFINITY))) {
tmp = -x_46_im_m * (x_46_im_m * x_46_im_m);
} else {
tmp = (x_46_im_m * x_46_re) * x_46_re;
}
return x_46_im_s * tmp;
}
x.im\_m = Math.abs(x_46_im);
x.im\_s = Math.copySign(1.0, x_46_im);
public static double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re);
double tmp;
if ((t_0 <= -1e-289) || !(t_0 <= Double.POSITIVE_INFINITY)) {
tmp = -x_46_im_m * (x_46_im_m * x_46_im_m);
} else {
tmp = (x_46_im_m * x_46_re) * x_46_re;
}
return x_46_im_s * tmp;
}
x.im\_m = math.fabs(x_46_im) x.im\_s = math.copysign(1.0, x_46_im) def code(x_46_im_s, x_46_re, x_46_im_m): t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re) tmp = 0 if (t_0 <= -1e-289) or not (t_0 <= math.inf): tmp = -x_46_im_m * (x_46_im_m * x_46_im_m) else: tmp = (x_46_im_m * x_46_re) * x_46_re return x_46_im_s * tmp
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) t_0 = Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im_m * x_46_im_m)) * x_46_im_m) + Float64(Float64(Float64(x_46_re * x_46_im_m) + Float64(x_46_im_m * x_46_re)) * x_46_re)) tmp = 0.0 if ((t_0 <= -1e-289) || !(t_0 <= Inf)) tmp = Float64(Float64(-x_46_im_m) * Float64(x_46_im_m * x_46_im_m)); else tmp = Float64(Float64(x_46_im_m * x_46_re) * x_46_re); end return Float64(x_46_im_s * tmp) end
x.im\_m = abs(x_46_im); x.im\_s = sign(x_46_im) * abs(1.0); function tmp_2 = code(x_46_im_s, x_46_re, x_46_im_m) t_0 = (((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re); tmp = 0.0; if ((t_0 <= -1e-289) || ~((t_0 <= Inf))) tmp = -x_46_im_m * (x_46_im_m * x_46_im_m); else tmp = (x_46_im_m * x_46_re) * x_46_re; end tmp_2 = x_46_im_s * tmp; end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := Block[{t$95$0 = N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$im$95$m), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] + N[(x$46$im$95$m * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision]}, N[(x$46$im$95$s * If[Or[LessEqual[t$95$0, -1e-289], N[Not[LessEqual[t$95$0, Infinity]], $MachinePrecision]], N[((-x$46$im$95$m) * N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision], N[(N[(x$46$im$95$m * x$46$re), $MachinePrecision] * x$46$re), $MachinePrecision]]), $MachinePrecision]]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
\begin{array}{l}
t_0 := \left(x.re \cdot x.re - x.im\_m \cdot x.im\_m\right) \cdot x.im\_m + \left(x.re \cdot x.im\_m + x.im\_m \cdot x.re\right) \cdot x.re\\
x.im\_s \cdot \begin{array}{l}
\mathbf{if}\;t\_0 \leq -1 \cdot 10^{-289} \lor \neg \left(t\_0 \leq \infty\right):\\
\;\;\;\;\left(-x.im\_m\right) \cdot \left(x.im\_m \cdot x.im\_m\right)\\
\mathbf{else}:\\
\;\;\;\;\left(x.im\_m \cdot x.re\right) \cdot x.re\\
\end{array}
\end{array}
\end{array}
if (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < -1e-289 or +inf.0 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) Initial program 69.5%
Taylor expanded in x.re around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
count-2-revN/A
distribute-lft-inN/A
count-2-revN/A
distribute-lft-inN/A
fp-cancel-sign-sub-invN/A
mul-1-negN/A
cube-multN/A
unpow2N/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-*.f64N/A
lower-neg.f64N/A
distribute-lft1-inN/A
Applied rewrites82.9%
Taylor expanded in x.re around 0
Applied rewrites53.2%
if -1e-289 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < +inf.0Initial program 95.5%
Taylor expanded in x.re around inf
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
lower-*.f6465.2
Applied rewrites65.2%
lift-+.f64N/A
flip3-+N/A
lower-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
count-2N/A
lower-*.f64N/A
lower-pow.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
Applied rewrites13.7%
Applied rewrites49.6%
Final simplification51.4%
x.im\_m = (fabs.f64 x.im)
x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im)
(FPCore (x.im_s x.re x.im_m)
:precision binary64
(*
x.im_s
(if (<=
(+
(* (- (* x.re x.re) (* x.im_m x.im_m)) x.im_m)
(* (+ (* x.re x.im_m) (* x.im_m x.re)) x.re))
INFINITY)
(+
(* (+ x.im_m x.re) (* (- x.re x.im_m) x.im_m))
(* (* x.re (+ x.im_m x.im_m)) x.re))
(fma (- x.re x.im_m) (* x.im_m (+ x.re x.im_m)) (* 2.0 x.im_m)))))x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
double tmp;
if (((((x_46_re * x_46_re) - (x_46_im_m * x_46_im_m)) * x_46_im_m) + (((x_46_re * x_46_im_m) + (x_46_im_m * x_46_re)) * x_46_re)) <= ((double) INFINITY)) {
tmp = ((x_46_im_m + x_46_re) * ((x_46_re - x_46_im_m) * x_46_im_m)) + ((x_46_re * (x_46_im_m + x_46_im_m)) * x_46_re);
} else {
tmp = fma((x_46_re - x_46_im_m), (x_46_im_m * (x_46_re + x_46_im_m)), (2.0 * x_46_im_m));
}
return x_46_im_s * tmp;
}
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) tmp = 0.0 if (Float64(Float64(Float64(Float64(x_46_re * x_46_re) - Float64(x_46_im_m * x_46_im_m)) * x_46_im_m) + Float64(Float64(Float64(x_46_re * x_46_im_m) + Float64(x_46_im_m * x_46_re)) * x_46_re)) <= Inf) tmp = Float64(Float64(Float64(x_46_im_m + x_46_re) * Float64(Float64(x_46_re - x_46_im_m) * x_46_im_m)) + Float64(Float64(x_46_re * Float64(x_46_im_m + x_46_im_m)) * x_46_re)); else tmp = fma(Float64(x_46_re - x_46_im_m), Float64(x_46_im_m * Float64(x_46_re + x_46_im_m)), Float64(2.0 * x_46_im_m)); end return Float64(x_46_im_s * tmp) end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := N[(x$46$im$95$s * If[LessEqual[N[(N[(N[(N[(x$46$re * x$46$re), $MachinePrecision] - N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$im$95$m), $MachinePrecision] + N[(N[(N[(x$46$re * x$46$im$95$m), $MachinePrecision] + N[(x$46$im$95$m * x$46$re), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision], Infinity], N[(N[(N[(x$46$im$95$m + x$46$re), $MachinePrecision] * N[(N[(x$46$re - x$46$im$95$m), $MachinePrecision] * x$46$im$95$m), $MachinePrecision]), $MachinePrecision] + N[(N[(x$46$re * N[(x$46$im$95$m + x$46$im$95$m), $MachinePrecision]), $MachinePrecision] * x$46$re), $MachinePrecision]), $MachinePrecision], N[(N[(x$46$re - x$46$im$95$m), $MachinePrecision] * N[(x$46$im$95$m * N[(x$46$re + x$46$im$95$m), $MachinePrecision]), $MachinePrecision] + N[(2.0 * x$46$im$95$m), $MachinePrecision]), $MachinePrecision]]), $MachinePrecision]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
x.im\_s \cdot \begin{array}{l}
\mathbf{if}\;\left(x.re \cdot x.re - x.im\_m \cdot x.im\_m\right) \cdot x.im\_m + \left(x.re \cdot x.im\_m + x.im\_m \cdot x.re\right) \cdot x.re \leq \infty:\\
\;\;\;\;\left(x.im\_m + x.re\right) \cdot \left(\left(x.re - x.im\_m\right) \cdot x.im\_m\right) + \left(x.re \cdot \left(x.im\_m + x.im\_m\right)\right) \cdot x.re\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x.re - x.im\_m, x.im\_m \cdot \left(x.re + x.im\_m\right), 2 \cdot x.im\_m\right)\\
\end{array}
\end{array}
if (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) < +inf.0Initial program 92.9%
lift-*.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower--.f6499.8
Applied rewrites99.8%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-+.f6499.8
Applied rewrites99.8%
if +inf.0 < (+.f64 (*.f64 (-.f64 (*.f64 x.re x.re) (*.f64 x.im x.im)) x.im) (*.f64 (+.f64 (*.f64 x.re x.im) (*.f64 x.im x.re)) x.re)) Initial program 0.0%
lift-*.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
difference-of-squaresN/A
associate-*l*N/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-*.f64N/A
lower--.f6425.0
Applied rewrites25.0%
lift-+.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-+.f6425.0
Applied rewrites25.0%
Applied rewrites100.0%
x.im\_m = (fabs.f64 x.im) x.im\_s = (copysign.f64 #s(literal 1 binary64) x.im) (FPCore (x.im_s x.re x.im_m) :precision binary64 (* x.im_s (* (- x.im_m) (* x.im_m x.im_m))))
x.im\_m = fabs(x_46_im);
x.im\_s = copysign(1.0, x_46_im);
double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
return x_46_im_s * (-x_46_im_m * (x_46_im_m * x_46_im_m));
}
x.im\_m = abs(x_46im)
x.im\_s = copysign(1.0d0, x_46im)
real(8) function code(x_46im_s, x_46re, x_46im_m)
real(8), intent (in) :: x_46im_s
real(8), intent (in) :: x_46re
real(8), intent (in) :: x_46im_m
code = x_46im_s * (-x_46im_m * (x_46im_m * x_46im_m))
end function
x.im\_m = Math.abs(x_46_im);
x.im\_s = Math.copySign(1.0, x_46_im);
public static double code(double x_46_im_s, double x_46_re, double x_46_im_m) {
return x_46_im_s * (-x_46_im_m * (x_46_im_m * x_46_im_m));
}
x.im\_m = math.fabs(x_46_im) x.im\_s = math.copysign(1.0, x_46_im) def code(x_46_im_s, x_46_re, x_46_im_m): return x_46_im_s * (-x_46_im_m * (x_46_im_m * x_46_im_m))
x.im\_m = abs(x_46_im) x.im\_s = copysign(1.0, x_46_im) function code(x_46_im_s, x_46_re, x_46_im_m) return Float64(x_46_im_s * Float64(Float64(-x_46_im_m) * Float64(x_46_im_m * x_46_im_m))) end
x.im\_m = abs(x_46_im); x.im\_s = sign(x_46_im) * abs(1.0); function tmp = code(x_46_im_s, x_46_re, x_46_im_m) tmp = x_46_im_s * (-x_46_im_m * (x_46_im_m * x_46_im_m)); end
x.im\_m = N[Abs[x$46$im], $MachinePrecision]
x.im\_s = N[With[{TMP1 = Abs[1.0], TMP2 = Sign[x$46$im]}, TMP1 * If[TMP2 == 0, 1, TMP2]], $MachinePrecision]
code[x$46$im$95$s_, x$46$re_, x$46$im$95$m_] := N[(x$46$im$95$s * N[((-x$46$im$95$m) * N[(x$46$im$95$m * x$46$im$95$m), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
x.im\_m = \left|x.im\right|
\\
x.im\_s = \mathsf{copysign}\left(1, x.im\right)
\\
x.im\_s \cdot \left(\left(-x.im\_m\right) \cdot \left(x.im\_m \cdot x.im\_m\right)\right)
\end{array}
Initial program 82.7%
Taylor expanded in x.re around 0
+-commutativeN/A
distribute-rgt-inN/A
associate-*r*N/A
count-2-revN/A
distribute-lft-inN/A
count-2-revN/A
distribute-lft-inN/A
fp-cancel-sign-sub-invN/A
mul-1-negN/A
cube-multN/A
unpow2N/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-*.f64N/A
lower-neg.f64N/A
distribute-lft1-inN/A
Applied rewrites89.3%
Taylor expanded in x.re around 0
Applied rewrites63.3%
(FPCore (x.re x.im) :precision binary64 (+ (* (* x.re x.im) (* 2.0 x.re)) (* (* x.im (- x.re x.im)) (+ x.re x.im))))
double code(double x_46_re, double x_46_im) {
return ((x_46_re * x_46_im) * (2.0 * x_46_re)) + ((x_46_im * (x_46_re - x_46_im)) * (x_46_re + x_46_im));
}
real(8) function code(x_46re, x_46im)
real(8), intent (in) :: x_46re
real(8), intent (in) :: x_46im
code = ((x_46re * x_46im) * (2.0d0 * x_46re)) + ((x_46im * (x_46re - x_46im)) * (x_46re + x_46im))
end function
public static double code(double x_46_re, double x_46_im) {
return ((x_46_re * x_46_im) * (2.0 * x_46_re)) + ((x_46_im * (x_46_re - x_46_im)) * (x_46_re + x_46_im));
}
def code(x_46_re, x_46_im): return ((x_46_re * x_46_im) * (2.0 * x_46_re)) + ((x_46_im * (x_46_re - x_46_im)) * (x_46_re + x_46_im))
function code(x_46_re, x_46_im) return Float64(Float64(Float64(x_46_re * x_46_im) * Float64(2.0 * x_46_re)) + Float64(Float64(x_46_im * Float64(x_46_re - x_46_im)) * Float64(x_46_re + x_46_im))) end
function tmp = code(x_46_re, x_46_im) tmp = ((x_46_re * x_46_im) * (2.0 * x_46_re)) + ((x_46_im * (x_46_re - x_46_im)) * (x_46_re + x_46_im)); end
code[x$46$re_, x$46$im_] := N[(N[(N[(x$46$re * x$46$im), $MachinePrecision] * N[(2.0 * x$46$re), $MachinePrecision]), $MachinePrecision] + N[(N[(x$46$im * N[(x$46$re - x$46$im), $MachinePrecision]), $MachinePrecision] * N[(x$46$re + x$46$im), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x.re \cdot x.im\right) \cdot \left(2 \cdot x.re\right) + \left(x.im \cdot \left(x.re - x.im\right)\right) \cdot \left(x.re + x.im\right)
\end{array}
herbie shell --seed 2024329
(FPCore (x.re x.im)
:name "math.cube on complex, imaginary part"
:precision binary64
:alt
(! :herbie-platform default (+ (* (* x.re x.im) (* 2 x.re)) (* (* x.im (- x.re x.im)) (+ x.re x.im))))
(+ (* (- (* x.re x.re) (* x.im x.im)) x.im) (* (+ (* x.re x.im) (* x.im x.re)) x.re)))