Exemple #1
0
/* Like tensor_compress, but also compress into one dimension any
   group of dimensions that form a contiguous block of indices with
   some stride.  (This can safely be done for transform vector sizes.) */
tensor *X(tensor_compress_contiguous)(const tensor *sz)
{
     int i, rnk;
     tensor *sz2, *x;

     if (X(tensor_sz)(sz) == 0) 
	  return X(mktensor)(RNK_MINFTY);

     sz2 = X(tensor_compress)(sz);
     A(FINITE_RNK(sz2->rnk));

     if (sz2->rnk < 2)		/* nothing to compress */
          return sz2;

     for (i = rnk = 1; i < sz2->rnk; ++i)
          if (!strides_contig(sz2->dims + i - 1, sz2->dims + i))
               ++rnk;

     x = X(mktensor)(rnk);
     x->dims[0] = sz2->dims[0];
     for (i = rnk = 1; i < sz2->rnk; ++i) {
          if (strides_contig(sz2->dims + i - 1, sz2->dims + i)) {
               x->dims[rnk - 1].n *= sz2->dims[i].n;
               x->dims[rnk - 1].is = sz2->dims[i].is;
               x->dims[rnk - 1].os = sz2->dims[i].os;
          } else {
               A(rnk < x->rnk);
               x->dims[rnk++] = sz2->dims[i];
          }
     }

     X(tensor_destroy)(sz2);
     return x;
}
Exemple #2
0
/* Like tensor_compress, but also compress into one dimension any
   group of dimensions that form a contiguous block of indices with
   some stride.  (This can safely be done for transform vector sizes.) */
tensor *X(tensor_compress_contiguous)(const tensor *sz)
{
     int i, rnk;
     tensor *sz2, *x;

     if (X(tensor_sz)(sz) == 0) 
	  return X(mktensor)(RNK_MINFTY);

     sz2 = really_compress(sz);
     A(FINITE_RNK(sz2->rnk));

     if (sz2->rnk <= 1) { /* nothing to compress. */ 
	  if (0) {
	       /* this call is redundant, because "sz->rnk <= 1" implies
		  that the tensor is already canonical, but I am writing
		  it explicitly because "logically" we need to canonicalize
		  the tensor before returning. */
	       canonicalize(sz2);
	  }
          return sz2;
     }

     /* sort in descending order of |istride|, so that compressible
	dimensions appear contigously */
     qsort(sz2->dims, (unsigned)sz2->rnk, sizeof(iodim),
		(int (*)(const void *, const void *))compare_by_istride);

     /* compute what the rank will be after compression */
     for (i = rnk = 1; i < sz2->rnk; ++i)
          if (!strides_contig(sz2->dims + i - 1, sz2->dims + i))
               ++rnk;

     /* merge adjacent dimensions whenever possible */
     x = X(mktensor)(rnk);
     x->dims[0] = sz2->dims[0];
     for (i = rnk = 1; i < sz2->rnk; ++i) {
          if (strides_contig(sz2->dims + i - 1, sz2->dims + i)) {
               x->dims[rnk - 1].n *= sz2->dims[i].n;
               x->dims[rnk - 1].is = sz2->dims[i].is;
               x->dims[rnk - 1].os = sz2->dims[i].os;
          } else {
               A(rnk < x->rnk);
               x->dims[rnk++] = sz2->dims[i];
          }
     }

     X(tensor_destroy)(sz2);

     /* reduce to canonical form */
     canonicalize(x);
     return x;
}