/*=========================================================================== METHOD: sSharedBuffer (Public Method) DESCRIPTION: Constructor (assume ownership of passed in buffer) PARAMETERS: dataLen [ I ] - The length of the above buffer (should be > 1) pDataToOwn [ I ] - The data buffer to assume ownership of (should be non-zero) dataType [ I ] - Type of data (not used internal to class) NOTE: The order is intentionally reversed from the previous constructor to avoid any cases of mistaken identity (copy versus assume ownership) RETURN VALUE: None ===========================================================================*/ sSharedBuffer::sSharedBuffer( ULONG dataLen, PBYTE pDataToOwn, ULONG dataType ) : mpData( 0 ), mSize( 0 ), mType( dataType ), mRefCount( 0 ) { // Data actually exists? if (pDataToOwn != 0) { // Yes, length not too small/not too big? if (IsValidSize( dataLen ) == true) { // Yes, assume ownership of the passed in buffer mpData = pDataToOwn; mSize = dataLen; } else { // This data buffer is not acceptable to us, but we have assumed // ownership of the memory which we will now free delete [] pDataToOwn; } } }
/*=========================================================================== METHOD: sSharedBuffer (Public Method) DESCRIPTION: Constructor (copy passed in buffer) PARAMETERS: pDataToCopy [ I ] - The data buffer to copy (should be non-zero) dataLen [ I ] - The length of the above buffer (should be > 1) dataType [ I ] - Type of data (not used internal to class) RETURN VALUE: None ===========================================================================*/ sSharedBuffer::sSharedBuffer( const BYTE * pDataToCopy, ULONG dataLen, ULONG dataType ) : mpData( 0 ), mSize( 0 ), mType( dataType ), mRefCount( 0 ) { // Length not too small/not too big? if (IsValidSize( dataLen ) == true) { // Yes, data actually exists? if (pDataToCopy != 0) { // Yes, try to allocate memory mpData = new BYTE[dataLen]; if (mpData != 0) { // Now copy into our allocation memcpy( (PVOID)mpData, (LPCVOID)pDataToCopy, (SIZE_T)dataLen ); // Now set the size, we do this last so that our double // deletion logic is only applied if we had an allocation // in the first place mSize = dataLen; } } } }
/** * warn if any invariant doesn't hold. **/ void Validate(uintptr_t id) const { if(!performSanityChecks) return; // note: RangeList::Validate implicitly checks the prev and next // fields by iterating over the list. // note: we can't check for prev != next because we're called for // footers as well, and they don't have valid pointers. ENSURE(IsValidSize(m_size)); ENSURE(IsFreedBlock(id)); }
void Deallocate(u8* p, size_t size) { ENSURE((uintptr_t)p % allocationAlignment == 0); ENSURE(IsValidSize(size)); ENSURE(pool_contains(&m_pool, p)); ENSURE(pool_contains(&m_pool, p+size-1)); Validate(); m_stats.OnDeallocate(size); Coalesce(p, size); AddToFreelist(p, size); Validate(); }
void* TakeAndSplitFreeBlock(size_t size) { u8* p; size_t leftoverSize = 0; { FreedBlock* freedBlock = m_segregatedRangeLists.Find(size); if(!freedBlock) return 0; p = (u8*)freedBlock; leftoverSize = freedBlock->Size() - size; RemoveFromFreelist(freedBlock); } if(IsValidSize(leftoverSize)) AddToFreelist(p+size, leftoverSize); return p; }
void* Allocate(size_t size) throw() { ENSURE(IsValidSize(size)); Validate(); void* p = TakeAndSplitFreeBlock(size); if(!p) { p = pool_alloc(&m_pool, size); if(!p) // both failed; don't throw bad_alloc because return 0; // this often happens with the file cache. } // (NB: we must not update the statistics if allocation failed) m_stats.OnAllocate(size); Validate(); ENSURE((uintptr_t)p % allocationAlignment == 0); return p; }