Beispiel #1
0
char* MemoryPool::AllocateFallback(size_t bytes){
	if (bytes > _block_size/3){
		// Object is more than 1/3 of our block size.  Allocate it separately
		// to avoid wasting too much space in leftover bytes.
		char* result = AllocateNewBlock(bytes);
		return result;
	}

	char* result = AllocateNewBlock(_block_size);
	_memory_remain = _block_size - bytes;
	_alloc_ptr = result + bytes;
	
	return result;
}
Beispiel #2
0
//
// DTrack::DTrack
//
// Constructor
//
DTrack::DTrack(const char *nameIn, U32 blockSizeIn, U32 cacheSizeIn)
{
  // Setup tracker parameters
  name = nameIn;
  blockSize = blockSizeIn;

  // Should we automatically calculate the cachesize
  if (!cacheSizeIn)
  {
    // Having cache at same size as block seems to give good performance
    cacheSize = blockSize;
  }
  else
  {
    // Accept supplied value
    cacheSize = cacheSizeIn; 
  } 

  // Initialize the block list
  blocks.SetNodeMember(&Block::node);

  // Allocate cache
  cache = new U32*[cacheSize];

  // Set initial id count past reserved space
  idCount = DTRACK_INITIALID;

  // No items in the cache
  cacheCount = 0;

  // Kick-start the system with one block
  AllocateNewBlock();
}
Beispiel #3
0
        ///////////////////////////////////////////////////////////////////////////////
        // MallocAligned
        //
        void* StackAllocator::MallocAligned(size_t nSize, size_t nAlignment, size_t nAlignmentOffset, bool bBoundsCheck)
        {
            assert((nAlignment & (nAlignment - 1)) == 0);
            assert((nAlignmentOffset & 7) == 0);
            #if PPM_INCREMENTAL_OBJECTS_ENABLED
                assert(mpCurrentObjectEnd == mpCurrentObjectBegin);
            #endif

            // Malloc() relies on the current pointer being aligned to kMinAlignment,
            // so we must ensure at least that much here.
            nAlignment = ((nAlignment - 1) | kMinAlignmentMask) + 1;
            nSize        = (nSize + kMinAlignmentMask) & kMinAlignmentMaskInverse;

            char* pAlignedObjectBegin = (char*)(((size_t)((mpCurrentObjectBegin + nAlignmentOffset) + (nAlignment - 1)) & ~(nAlignment - 1)) - nAlignmentOffset);
            assert(pAlignedObjectBegin >= mpCurrentObjectBegin); // Wraparound would be bad. Possibly do something about it.

            // In an optimized build, the bBoundsCheck test is optimized away as long as it is passed as a constant.
            if(bBoundsCheck && ((pAlignedObjectBegin + nSize) > mpCurrentBlockEnd))
            {
                if(AllocateNewBlock(nSize + nAlignment + nAlignmentOffset)) // Add extra space so that we know we can align in the new block.
                {
                    pAlignedObjectBegin = (char*)(((size_t)((mpCurrentObjectBegin + nAlignmentOffset) + (nAlignment - 1)) & ~(nAlignment - 1)) - nAlignmentOffset);
                    assert((pAlignedObjectBegin + nSize) <= mpCurrentBlockEnd);
                }
                else
                    return NULL;
            }

            mpCurrentObjectBegin = pAlignedObjectBegin + nSize;
            #if PPM_INCREMENTAL_OBJECTS_ENABLED
                mpCurrentObjectEnd = mpCurrentObjectBegin;
            #endif
            return pAlignedObjectBegin;
        }
Beispiel #4
0
char* Arena::AllocateFallback(size_t bytes) {
  if (bytes > kBlockSize / 4) {
    // Object is more than a quarter of our block size.  Allocate it separately
    // to avoid wasting too much space in leftover bytes.
    char* result = AllocateNewBlock(bytes);
    return result;
  }

  // We waste the remaining space in the current block.
  alloc_ptr_ = AllocateNewBlock(kBlockSize);
  alloc_bytes_remaining_ = kBlockSize;

  char* result = alloc_ptr_;
  alloc_ptr_ += bytes;
  alloc_bytes_remaining_ -= bytes;
  return result;
}
Beispiel #5
0
//
// DTrack::HitCache
//
// Get an available position from the cache (never fails)
//
U32* DTrack::HitCache()
{
  // Is there an item in the cache
  if (cacheCount)
  {
    // This is a cache hit
    stateInfo.cacheHits++;
  }
  else
  {
    ASSERT(blocks.GetCount() * blockSize >= stateInfo.items);

    // Is the remaining number of items below the free limit
    if ((blocks.GetCount() * blockSize) - stateInfo.items < DTRACK_FREELIMIT)
    {
      // So allocate a new block
      AllocateNewBlock();
    }
    else
    {
      // This is a cache miss
      stateInfo.cacheMisses++;

      // Stop scanning when cache is full
      Bool cacheFull = FALSE;

      // Scan each block
      for (NList<Block>::Iterator b(&blocks); *b && !cacheFull; b++)
      {
        // Check each item in block
        for (U32 i = 0; i < blockSize && !cacheFull; i++)
        {
          // Is this position empty
          if ((*b)->data[i] == DTRACK_EMPTY)
          {
            // Add to cache
            cacheFull = CacheAdd(&(*b)->data[i]);
          }
        }
      }
    }
  }

  // Must be positions in cache now
  ASSERT(cacheCount);

  // Return top item
  return (cache[--cacheCount]);
}