Exemple #1
0
  template <class ostr, class allocator> ostr& stats(ostr &o, allocator &a) {
    o << "TRMemoryAllocator stats:\n";
    for (int i=minbits; i < maxbits; i++) {
      if (statsArray[i-minbits]) {
        int32_t sz=0;
        void *p = freelist[i-minbits];
        while (p) {
          sz += bucketsize(i);
          p = *(void **)p;
        }
        o <<  "bucket[" << i-minbits << "] (" << bucketsize(i) << ") : allocated = "
          << statsArray[i-minbits] <<  " bytes, freelist size = " << sz << "\n" ;
      }
    }

    return o;
  }
Exemple #2
0
  void *allocate(size_t size, const char *name=NULL, int ignore=0) {
    uint32_t b = bucket(size);

    if (b>=maxbits) {
      stats_largeAlloc += size;
      return  memory->allocateMemory(size, kind, TR_Memory::CS2);
    }

    if (freelist[b-minbits]) {
      void *ret = freelist[b-minbits];
      memcpy(&freelist[b-minbits],freelist[b-minbits], sizeof(void *));

      return ret;
    }

    // See if we have segments in the freelists of the larger segments.
    // Get a free segment from the fist available larger size and
    // add it to the freelist of the current bucket.

    if (scavenge) {
      for (uint32_t i=b+1; i < maxbits; i++) {
        uint32_t chunksize=0; uint32_t elements=0;
        if (freelist[i-minbits]) {
          chunksize = bucketsize(i);
          elements = (1 << (i-b)); // (2^i / 2^b)

          // remove this segment from its freelist
          char *freechunk = (char *) freelist[i-minbits];
          memcpy(&freelist[i-minbits],freelist[i-minbits], sizeof(void *));

          // set the link on the last element to NULL
          memset(freechunk+bucketsize(b)*(elements-1), 0, sizeof(void *));

          // Set the head of the new freelist
          freelist[b-minbits] = freechunk+bucketsize(b);

          // Now link up remaining elements to form the freelist
          for (int i=elements-2; i > 0; i--) {
            void *target = freechunk+bucketsize(b)*i;
            void *source = freechunk+(bucketsize(b)*(i+1));
            memcpy(target, &source, sizeof(void *));
          }

          return ((void *)freechunk);
        }
      }
    }

    statsArray[b-minbits] += bucketsize(b);
    return memory->allocateMemory(bucketsize(b), kind, TR_Memory::CS2);
  }
Exemple #3
0
  static uint32_t bucket(size_t size) {
    uint32_t i=minbits;

    while (i<maxbits && bucketsize(i) < size) i+=1;
    return i;
  }
Exemple #4
0
bool FrBWTIndex::compress()
{
   if (m_compressed)
      return true ;
   m_bucketsize = DEFAULT_BUCKET_SIZE ;
   m_maxdelta = 255 - m_bucketsize ;
   m_numbuckets = (numItems() + bucketsize() - 1) / bucketsize() ;

   // figure out how big the pool of absolute pointers will be
   uint32_t prev_succ = ~0 ;
   size_t abs_pointers = 0 ;
   size_t comp_EORs = 0 ;
   m_poolsize = 0 ;
   FrAdviseMemoryUse(m_items,bytesPerPointer()*numItems(),FrMADV_SEQUENTIAL) ;
   for (size_t i = 0 ; i < numItems() ; i++)
      {
      if ((i % m_bucketsize) == 0)
	 {
	 abs_pointers = 0 ;
	 comp_EORs = 0 ;
	 }
      uint32_t succ = getUncompSuccessor(i) ;
      if (succ == m_EOR || (succ > m_EOR && m_eor_state == FrBWT_MergeEOR))
	 comp_EORs++ ; // will be stored without using an absolute pointer
      else if (succ <= prev_succ ||
	       succ - prev_succ > m_maxdelta ||
	       ((i+1)%m_bucketsize == 0 && (abs_pointers + comp_EORs == 0)))
	 {      // above enforces at least one absolute pointer per bucket
	 abs_pointers++ ;
	 m_poolsize++ ;
	 }
      prev_succ = succ ;
      }

   size_t bpp = bytesPerPointer() ;
   // now that we know how big the pool is, check whether we will actually
   //   save any space by compressing
   if ((m_poolsize + m_numbuckets) * bpp + numItems() >= numItems() * bpp)
      return false ;			// can't (usefully) compress

   // allocate the various buffers for the compressed data
   m_buckets = FrNewN(char,bpp * m_numbuckets) ;
   unsigned char *comp_items = FrNewN(unsigned char,numItems()) ;
   m_bucket_pool = FrNewN(char,bpp * m_poolsize) ;
   if (comp_items && m_buckets && m_bucket_pool)
      {
      size_t bucket = 0 ;
      size_t ptr_count = 0 ;
      size_t ptr_index = 0 ;
      prev_succ = ~0 ;
      for (size_t i = 0 ; i < numItems() ; i++)
	 {
	 if ((i % m_bucketsize) == 0)
	    {
	    FrStoreLong(ptr_count,m_buckets + bpp * bucket++) ;
	    ptr_index = 0 ;
	    comp_EORs = 0 ;
	    }
	 if ((i % CHUNK_SIZE) == 0 && i > 0)
	    {
	    // let OS know we're done with another chunk of m_items
	    FrDontNeedMemory(m_items + bpp*(i-CHUNK_SIZE), bpp*CHUNK_SIZE,
			     (i > CHUNK_SIZE)) ;
	    // and tell it to prefetch the next chunk
	    FrWillNeedMemory(m_items + bpp*i, bpp*CHUNK_SIZE) ;
	    }
	 uint32_t succ = getUncompSuccessor(i) ;
	 if (succ == m_EOR ||
	     (succ > m_EOR && m_eor_state == FrBWT_MergeEOR))
	    {
	    comp_items[i] = COMPRESSED_EOR ;
	    comp_EORs++ ;
	    }
	 else if (succ <= prev_succ ||
		  succ - prev_succ > m_maxdelta ||
		  ((i+1)%m_bucketsize == 0 && (ptr_index + comp_EORs == 0)))
	    // (above ensures at least one abs.ptr per bucket)
	    {
	    FrStoreLong(succ,m_bucket_pool + bpp * ptr_count++);
	    comp_items[i] = (unsigned char)(m_maxdelta + (++ptr_index)) ;
	    }
	 else
	    comp_items[i] = (unsigned char)(succ - prev_succ) ;
	 prev_succ = succ ;
	 }
      assertq(ptr_count == m_poolsize) ;
      if (!m_fmap)
	 FrFree(m_items) ;
      m_items = comp_items ;
      m_compressed = true ;
      return true ;
      }
   else	// memory alloc failed
      {
      FrWarning("out of memory while compressing index, "
		"will remain uncompressed") ;
      FrFree(comp_items)  ;
      FrFree(m_buckets) ;	m_buckets = 0 ;
      FrFree(m_bucket_pool) ;	m_bucket_pool = 0 ;
      m_numbuckets = 0 ;
      m_poolsize = 0 ;
      return false ;
      }
}