示例#1
0
/*
	Allocate a new virtual object from virtual space, which can grow up to maxBytes (including the
	virtual allocation overhead) but which has an initial size of initialBytes (NOT including the
	virtual allocation overhead). Should the allocation request fail, then a memory exception is 
	generated.
*/
MWORD* __stdcall AllocateVirtualSpace(MWORD maxBytes, MWORD initialBytes)
{
	unsigned reserveBytes = _ROUND2(maxBytes + dwPageSize, dwAllocationGranularity);
	ASSERT(reserveBytes % dwAllocationGranularity == 0);
	void* pReservation = ::VirtualAlloc(NULL, reserveBytes, MEM_RESERVE, PAGE_NOACCESS);
	if (pReservation)
	{

#ifdef _DEBUG
		// Let's see whether we got the rounding correct!
		MEMORY_BASIC_INFORMATION mbi;
		VERIFY(::VirtualQuery(pReservation, &mbi, sizeof(mbi)) == sizeof(mbi));
		ASSERT(mbi.AllocationBase == pReservation);
		ASSERT(mbi.BaseAddress == pReservation);
		ASSERT(mbi.AllocationProtect == PAGE_NOACCESS);
		//	ASSERT(mbi.Protect == PAGE_NOACCESS);
		ASSERT(mbi.RegionSize == reserveBytes);
		ASSERT(mbi.State == MEM_RESERVE);
		ASSERT(mbi.Type == MEM_PRIVATE);
#endif

		// We expect the initial byte size to be a integral number of pages, and it must also take account
		// of the virtual allocation overhead (currently 4 bytes)
		initialBytes = _ROUND2(initialBytes + sizeof(VirtualObjectHeader), dwPageSize);
		ASSERT(initialBytes % dwPageSize == 0);

		// Note that VirtualAlloc initializes the committed memory to zeroes.
		VirtualObjectHeader* pLocation = static_cast<VirtualObjectHeader*>(::VirtualAlloc(pReservation, initialBytes, MEM_COMMIT, PAGE_READWRITE));
		if (pLocation)
		{

#ifdef _DEBUG
			// Let's see whether we got the rounding correct!
			VERIFY(::VirtualQuery(pLocation, &mbi, sizeof(mbi)) == sizeof(mbi));
			ASSERT(mbi.AllocationBase == pLocation);
			ASSERT(mbi.BaseAddress == pLocation);
			ASSERT(mbi.AllocationProtect == PAGE_NOACCESS);
			ASSERT(mbi.Protect == PAGE_READWRITE);
			ASSERT(mbi.RegionSize == initialBytes);
			ASSERT(mbi.State == MEM_COMMIT);
			ASSERT(mbi.Type == MEM_PRIVATE);
#endif

			// Use first slot to hold the maximum size for the object
			pLocation->setMaxAllocation(maxBytes);
			return reinterpret_cast<MWORD*>(pLocation + 1);
		}
	}

	return nullptr;
}
示例#2
0
// Resize an object in VirtualSpace (commit/decommit some memory)
// N.B. Assumes that there are no ref. counted object above shrinkTo (primarily intended for
// Process stacks)
POBJECT ObjectMemory::resizeVirtual(OTE* ote, MWORD newByteSize)
{
	ASSERT(ote->heapSpace() == OTEFlags::VirtualSpace);

	VariantObject* pObject = static_cast<VariantObject*>(ote->m_location);
	VirtualObject* pVObj = reinterpret_cast<VirtualObject*>(pObject);
	VirtualObjectHeader* pBase = pVObj->getHeader();
	unsigned maxByteSize = pBase->getMaxAllocation(); maxByteSize;
	unsigned currentTotalByteSize = pBase->getCurrentAllocation();
	ASSERT(_ROUND2(currentTotalByteSize, dwPageSize) == currentTotalByteSize);
	unsigned newTotalByteSize = _ROUND2(newByteSize + sizeof(VirtualObjectHeader), dwPageSize);
	// Minimum virtual allocation is one page (4k normally)
	ASSERT(newTotalByteSize >= dwPageSize);
	
	if (newTotalByteSize > currentTotalByteSize)
	{
		// The object is increasing in size - commit some more memory
		ASSERT(newByteSize <= maxByteSize);
		unsigned allocSize = newTotalByteSize - currentTotalByteSize;
		ASSERT(_ROUND2(allocSize, dwPageSize) == allocSize);
		if (!::VirtualAlloc(reinterpret_cast<BYTE*>(pBase) + currentTotalByteSize, allocSize, MEM_COMMIT, PAGE_READWRITE))
			return 0;	// Request to resize failed
	}
	else if (newTotalByteSize < currentTotalByteSize)
	{
		const Behavior* behavior = ote->m_oteClass->m_location; behavior;
		// The object is shrinking - decommit some memory
		ASSERT(newByteSize > (ObjectHeaderSize+behavior->fixedFields())*sizeof(MWORD));

		MWORD* pCeiling = reinterpret_cast<MWORD*>(reinterpret_cast<BYTE*>(pBase) + newTotalByteSize);

		// Determine the size of the committed region above shrinkTo
		MEMORY_BASIC_INFORMATION mbi;
		VERIFY(::VirtualQuery(pCeiling, &mbi, sizeof(mbi)) == sizeof(mbi));
		ASSERT(mbi.AllocationBase == pBase);
		if (mbi.State == MEM_COMMIT)
		{
			// Decommit memory above new ceiling
			VERIFY(::VirtualFree(pCeiling, mbi.RegionSize, MEM_DECOMMIT));
		}
	}
	
	// And resize the object as far as Smalltalk is concerned to the nearest page boundary
	// above and including shrinkTo
	//pBase->setCurrentAllocation(newTotalByteSize);
	ote->setSize(newByteSize);

	return pObject;
}
示例#3
0
// N.B. Like the other instantiate methods in ObjectMemory, this method for instantiating
// objects in virtual space (used for allocating Processes, for example), does not adjust
// the ref. count of the class, because this is often unecessary, and does not adjust the
// sizes to allow for fixed fields - callers must do this
VirtualOTE* ObjectMemory::newVirtualObject(BehaviorOTE* classPointer, MWORD initialSize, MWORD maxSize)
{
	#ifdef _DEBUG
	{
		ASSERT(isBehavior(Oop(classPointer)));
		Behavior& behavior = *classPointer->m_location;
		ASSERT(behavior.isIndexable());
	}
	#endif

	// Trim the sizes to acceptable bounds
	if (initialSize <= dwOopsPerPage)
		initialSize = dwOopsPerPage;
	else
		initialSize = _ROUND2(initialSize, dwOopsPerPage);

	if (maxSize < initialSize)
		maxSize = initialSize;
	else
		maxSize = _ROUND2(maxSize, dwOopsPerPage);

	// We have to allow for the virtual allocation overhead. The allocation function will add in
	// space for this. The maximum size should include this, the initial size should not
	initialSize -= sizeof(VirtualObjectHeader)/sizeof(MWORD);

	unsigned byteSize = initialSize*sizeof(MWORD);
	VariantObject* pLocation = reinterpret_cast<VariantObject*>(AllocateVirtualSpace(maxSize * sizeof(MWORD), byteSize));
	if (pLocation)
	{
		// No need to alter ref. count of process class, as it is sticky

		// Fill space with nils for initial values
		const Oop nil = Oop(Pointers.Nil);
		const unsigned loopEnd = initialSize;
		for (unsigned i = 0; i < loopEnd; i++)
			pLocation->m_fields[i] = nil;

		OTE* ote = ObjectMemory::allocateOop(static_cast<POBJECT>(pLocation));
		ote->setSize(byteSize);
		ote->m_oteClass = classPointer;
		classPointer->countUp();
		ote->m_flags = m_spaceOTEBits[OTEFlags::VirtualSpace];
		ASSERT(ote->isPointers());

		return reinterpret_cast<VirtualOTE*>(ote);
	}

	return nullptr;
}
示例#4
0
template <MWORD ImageNullTerms> HRESULT ObjectMemory::LoadPointers(ibinstream& imageFile, const ImageHeader* pHeader, size_t& cbRead)
{
	ASSERT(pHeader->nGlobalPointers == NumPointers);

	::ZeroMemory(m_pConstObjs, CONSTSPACESIZE);

	size_t cbPerm = 0;
	BYTE* pNextConst = reinterpret_cast<BYTE*>(m_pConstObjs);
	int i;
	for (i = 0; i < NumPermanent; i++)
	{
		VariantObject* pConstObj = reinterpret_cast<VariantObject*>(pNextConst);

		OTE* ote = m_pOT + i;
		MWORD bytesToRead;
		MWORD allocSize;
		if (ote->isNullTerminated())
		{
			MWORD byteSize = ote->getSize();
			allocSize = byteSize + NULLTERMSIZE;
			bytesToRead = byteSize + ImageNullTerms;
		}
		else
		{
			allocSize = bytesToRead = ote->getSize();
		}

		if (bytesToRead > 0)
		{
			// Now load the rest of the object (size includes itself)
			if (!imageFile.read(&(pConstObj->m_fields), bytesToRead))
				return ImageReadError(imageFile);
		}
		else
		{
			if (allocSize == 0) pConstObj = NULL;
		}

		cbPerm += bytesToRead;
		pNextConst += _ROUND2(allocSize, 4);

		markObject(ote);
		Oop* oldLocation = reinterpret_cast<Oop*>(ote->m_location);
		ote->m_location = pConstObj;

		ote->beSticky();
		// Repair the object
		FixupObject(ote, oldLocation, pHeader);
	}

#ifdef _DEBUG
	TRACESTREAM << i<< L" permanent objects loaded totalling " << cbPerm<< L" bytes" << std::endl;
#endif

	memcpy(const_cast<VMPointers*>(&Pointers), &_Pointers, sizeof(Pointers));

	cbRead += cbPerm;
	return S_OK;
}
示例#5
0
// Rarely used, so don't inline it
POBJECT ObjectMemory::allocLargeObject(MWORD objectSize, OTE*& ote)
{
#ifdef MEMSTATS
	++m_nLargeAllocated;
#endif

	POBJECT pObj = static_cast<POBJECT>(allocChunk(_ROUND2(objectSize, sizeof(DWORD))));

	// allocateOop expects crit section to be used
	ote = allocateOop(pObj);
	ote->setSize(objectSize);
	ote->m_flags.m_space = OTEFlags::NormalSpace;
	return pObj;
}
示例#6
0
int __cdecl _heap_grow_region (
	REG1 unsigned index,
	size_t size
	)
{
	size_t left;
	REG2 size_t growsize;
	void * base;
	struct _heap_region_ *pHeapRegions;
	struct _heap_region_ *pHRTmp;


	/*
	 * Init some variables
	 * left = space left in region
	 * base = base of next section of region to validate
	 */

	pHeapRegions = (struct _heap_region_ *)(*hHeapRegions);
	pHRTmp = pHeapRegions + index;
	left = pHRTmp->_totalsize - pHRTmp->_currsize;

	base = (char *) (pHRTmp->_regbase) + pHRTmp->_currsize;

	/*
	 * Make sure we can satisfy request
	 */
	growsize = _ROUND2(size, _GRANULARITY);

	if (left < growsize)
		{
		size_t sizeTmp;
		
		sizeTmp = growsize-left+1+ pHRTmp->_totalsize;
		sizeTmp = _ROUND2(sizeTmp, _GRANULARITY);
		SetPtrSize(pHRTmp->_regbase, sizeTmp);
		pHeapRegions = (struct _heap_region_ *)(*hHeapRegions);
		pHRTmp = pHeapRegions + index;
		if (*pMemErr != 0)
			{
			goto error;
			}
		pHRTmp->_totalsize = sizeTmp;
		left = sizeTmp - pHRTmp->_currsize;
		base = (char *) (pHRTmp->_regbase) + pHRTmp->_currsize;
		}

	/*
	 * Update the region data base
	 */

	pHRTmp->_currsize += growsize;


#ifdef DEBUG
	/*
	 * The current size should never be greater than the total size
	 */

	if ((pHeapRegions + index)->_currsize > (pHeapRegions + index)->_totalsize)
		_heap_abort();
#endif


	/*
	 * Add the memory to the heap
	 */

	if (_heap_addblock(base, growsize) != 0)
		_heap_abort();


	/*
	 * Good return
	 */

	/* done:   unreferenced label to be removed */
		return(0);

	/*
	 * Error return
	 */

	error:
		return(-1);

}
示例#7
0
int __cdecl _heap_grow (
	REG1 size_t size
	)
{
	REG2 int index;
	int free_entry = -1;

	/*
	 * Bump size to include header and round to nearest page boundary.
	 */

	size += _HDRSIZE;
	size = _ROUND2(size,_PAGESIZE_);

	/*
	 * Loop through the region table looking for an existing region
	 * we can grow.  Remember the index of the first null region entry.
	 *
	 * size = size of grow request
	 */

	for (index = 0; index < _HEAP_REGIONMAX; index++) {

		if ( (_heap_regions[index]._totalsize -
		    _heap_regions[index]._currsize) >= size )

			/*
			 * Grow this region to satisfy the request.
			 */

			return( _heap_grow_region(index, size) );


		if ( (free_entry == -1) &&
		    (_heap_regions[index]._regbase == NULL) )

			/*
			 * Remember 1st free table entry for later
			 */

			free_entry = index;

	}

	/*
	 * Could not find any existing regions to grow.  Try to
	 * get a new region.
	 *
	 * size = size of grow request
	 * free_entry = index of first free entry in table
	 */

	if ( free_entry >= 0 )

		/*
		 * Get a new region to satisfy the request.
		 */

		return( _heap_new_region(free_entry, size) );

	else
		/*
		 * No free table entries: return an error.
		 */

		return(-1);

}
示例#8
0
static int __cdecl _heap_new_region (
	REG1 unsigned index,
	size_t size
	)
{
	void * region;
	REG2 unsigned int regsize;
	struct _heap_region_ *pHeapRegions;

#ifdef DEBUG

	int i;

	/*
	 * Make sure the size has been rounded to a page boundary
	 */

//      if (size & (_PAGESIZE_-1))
//              _heap_abort();

	/*
	 * Make sure there's a free slot in the table
	 */

	pHeapRegions = (struct _heap_region_ *)(*hHeapRegions);
	i=0; 
	while (i < _heap_region_table_cur) {
		if (pHeapRegions->_regbase == NULL)
			break;
		pHeapRegions++;
		i++;
	}

	if (i >= _heap_region_table_cur)
		_heap_abort();

#endif

	/*
	 * Round the heap region size to a page boundary (in case
	 * the user played with it).
	 */

	regsize = _ROUND2(_heap_regionsize, _PAGESIZE_);

	/*
	 * See if region is big enough for request
	 */

	if (regsize < size)
		regsize = size;

	/*
	 * Go get the new region
	 */

#ifdef _M_MPPC
	if ((region = NewPtr(regsize + 8)) == NULL)
#else
	if ((region = NewPtr(regsize + 4)) == NULL)
#endif	
		{
		goto error;
		}

	/*
	 * Put the new region in the table.
	 */

	pHeapRegions = (struct _heap_region_ *)*hHeapRegions;
	if (((unsigned long)region & 0x3) != 0)
		{
		/* we are not allocating at 4 bytes bound */
		(pHeapRegions + index)->_regbase = (void *)_ROUND2((size_t)region, _GRANULARITY);
		}
	else
		{
		(pHeapRegions + index)->_regbase = region;
		}

	(pHeapRegions + index)->_regbaseCopy = region;
	(pHeapRegions + index)->_totalsize = regsize;
	(pHeapRegions + index)->_currsize = 0;


	/*
	 * Grow the region to satisfy the size request.
	 */

	if (_heap_grow_region(index, size) != 0) {

		/*
		 * Ouch.  Allocated a region but couldn't commit
		 * any pages in it.  Free region and return error.
		 */

		_heap_free_region(index);
		goto error;
	}


	/*
	 * Good return
	 */

	/* done:   unreferenced label to be removed */
		return(0);

	/*
	 * Error return
	 */

	error:
		return(-1);

}
示例#9
0
int __cdecl _heap_grow (
	REG1 size_t size
	)
{
	REG2 int index;
	struct _heap_region_ *pHeapRegions;
	int free_entry = -1;
	size_t sizeTmp;
	Handle hTemp;

	/*
	 * Bump size to include header and round to nearest page boundary.
	 */

	size += _HDRSIZE;
	size = _ROUND2(size, _GRANULARITY);

	/*
	 * Loop through the region table looking for an existing region
	 * we can grow.  Remember the index of the first null region entry.
	 *
	 * size = size of grow request
	 */

	for ( index=index_start ; index < _heap_region_table_cur; index++ ) {

		pHeapRegions = (struct _heap_region_ *)(*hHeapRegions);
			/*
			 * Grow this region to satisfy the request.
			 */
		if ( (pHeapRegions+index)->_regbase != NULL)
			{
			if (_heap_grow_region(index, size) != -1)
				{
				index_start = index;
				return 0;
				}
			}

		pHeapRegions = (struct _heap_region_ *)(*hHeapRegions);
		if ( (free_entry == -1) &&
		    ((pHeapRegions+index)->_regbase == NULL) )

			/*
			 * Remember 1st free table entry for later
			 */
			{
			free_entry = index;
			break;
			}

	}

	/*
	 * Could not find any existing regions to grow.  Try to
	 * get a new region.
	 *
	 * size = size of grow request
	 * free_entry = index of first free entry in table
	 */

	if ( free_entry == -1)
		/*
		 * No free table entries: grow heap region table.
		 */
		{
		sizeTmp = sizeof(struct _heap_region_)*(_heap_region_table_cur+_HEAP_REGIONMAX);
		if (hHeapRegions)
			{
			SetHandleSize(hHeapRegions, sizeTmp);
			}
		if (hHeapRegions== NULL || *pMemErr != 0)
			{
			/*
			 * grow failed
			 */
			hTemp = NewHandle(sizeTmp);
			if (hTemp == NULL)
				{
				return (-1);
				}
			HLock(hTemp);
			if (hHeapRegions != NULL)
				{
				BlockMove(*hHeapRegions, *hTemp, sizeof(struct _heap_region_)*_heap_region_table_cur);
				DisposeHandle(hHeapRegions);
				}
			hHeapRegions = hTemp;
			}
		/*
		 * set rest of the table to zero
		 */
		memset(*hHeapRegions + sizeof(struct _heap_region_)*_heap_region_table_cur, 0, sizeof(struct _heap_region_)*_HEAP_REGIONMAX);
		free_entry = _heap_region_table_cur;
		_heap_region_table_cur += _HEAP_REGIONMAX;
		}
	/*
	 * Get a new region to satisfy the request.
	 */

	return( _heap_new_region(free_entry, size) );
}
示例#10
0
int __cdecl _heap_grow_region (
	REG1 unsigned index,
	size_t size
	)
{
	size_t left;
	REG2 size_t growsize;
	void * base;
	unsigned dosretval;


	/*
	 * Init some variables
	 * left = space left in region
	 * base = base of next section of region to validate
	 */

	left = _heap_regions[index]._totalsize -
		_heap_regions[index]._currsize;

	base = (char *) _heap_regions[index]._regbase +
		_heap_regions[index]._currsize;

	/*
	 * Make sure we can satisfy request
	 */

	if (left < size)
		goto error;

	/*
	 * Round size up to next _heap_growsize boundary.
	 * (Must round _heap_growsize itself to page boundary, in
	 * case user set it himself).
	 */

	growsize = _ROUND2(_heap_growsize, _PAGESIZE_);
	growsize = _ROUND(size, growsize);

	if (left < growsize)
		growsize = left;

	/*
	 * Validate the new portion of the region
	 */

        if (!VirtualAlloc(base, growsize, MEM_COMMIT, PAGE_READWRITE))
                dosretval = GetLastError();
        else
                dosretval = 0;

        if (dosretval)
		/*
		 * Error committing pages.  If out of memory, return
		 * error, else abort.
		 */

		if (dosretval == ERROR_NOT_ENOUGH_MEMORY)
			goto error;
		else
			_heap_abort();


	/*
	 * Update the region data base
	 */

	_heap_regions[index]._currsize += growsize;


#ifdef DEBUG
	/*
	 * The current size should never be greater than the total size
	 */

	if (_heap_regions[index]._currsize > _heap_regions[index]._totalsize)
		_heap_abort();
#endif


	/*
	 * Add the memory to the heap
	 */

	if (_heap_addblock(base, growsize) != 0)
		_heap_abort();


	/*
	 * Good return
	 */

	/* done:   unreferenced label to be removed */
		return(0);

	/*
	 * Error return
	 */

	error:
		return(-1);

}
示例#11
0
static int __cdecl _heap_new_region (
	REG1 unsigned index,
	size_t size
	)
{
	void * region;
	REG2 unsigned int regsize;

#ifdef DEBUG

	int i;

	/*
	 * Make sure the size has been rounded to a page boundary
	 */

	if (size & (_PAGESIZE_-1))
		_heap_abort();

	/*
	 * Make sure there's a free slot in the table
	 */

	for (i=0; i < _HEAP_REGIONMAX; i++) {
		if (_heap_regions[i]._regbase == NULL)
			break;
	}

	if (i >= _HEAP_REGIONMAX)
		_heap_abort();

#endif

	/*
	 * Round the heap region size to a page boundary (in case
	 * the user played with it).
	 */

	regsize = _ROUND2(_heap_regionsize, _PAGESIZE_);

	/*
	 * To acommodate large users, request twice
	 * as big a region next time around.
	 */

	if ( _heap_regionsize < _heap_maxregsize )
		_heap_regionsize *= 2 ;

	/*
	 * See if region is big enough for request
	 */

	if (regsize < size)
		regsize = size;

	/*
	 * Go get the new region
	 */

        if (!(region = VirtualAlloc(NULL, regsize, MEM_RESERVE,
        PAGE_READWRITE)))
                goto error;

	/*
	 * Put the new region in the table.
	 */

	 _heap_regions[index]._regbase = region;
	 _heap_regions[index]._totalsize = regsize;
	 _heap_regions[index]._currsize = 0;


	/*
	 * Grow the region to satisfy the size request.
	 */

	if (_heap_grow_region(index, size) != 0) {

		/*
		 * Ouch.  Allocated a region but couldn't commit
		 * any pages in it.  Free region and return error.
		 */

		_heap_free_region(index);
		goto error;
	}


	/*
	 * Good return
	 */

	/* done:   unreferenced label to be removed */
		return(0);

	/*
	 * Error return
	 */

	error:
		return(-1);

}
示例#12
0
// Perform a compacting GC
size_t ObjectMemory::compact(Oop* const sp)
{
	TRACE("Compacting OT, size %d, free %d, ...\n", m_nOTSize, m_pOT + m_nOTSize - m_pFreePointerList);
	EmptyZct(sp);

	// First perform a normal GC
	reclaimInaccessibleObjects(GCNormal);

	Interpreter::freePools();

	// Walk the OT from the bottom to locate free entries, and from the top to locate candidates to move
	// 

	size_t moved = 0;
	OTE* last = m_pOT + m_nOTSize - 1;
	OTE* first = m_pOT;
#pragma warning(push)
#pragma warning(disable : 4127)
	while(true)
#pragma warning(pop)
	{
		// Look for a tail ender
		while (last > first && last->isFree())
			last--;
		// Look for a free slot
		while (first < last && !first->isFree())
			first++;
		if (first == last)
			break;	// Met in the middle, we're done
		
		HARDASSERT(first->isFree());
		HARDASSERT(!last->isFree());

		// Copy the tail ender over the free slot
		*first = *last;
		moved++;
		// Leave forwarding pointer in the old slot
		last->m_location = reinterpret_cast<POBJECT>(first);
		last->beFree();
		last->m_count = 0;
		// Advance last as we've moved this slot
		last--;
	}

	HARDASSERT(last == first);
	// At this point, last == first, and the first free slot will be that after last

	TRACE("%d OTEs compacted\n", moved);

	// Now we can update the objects using the forwarding pointers in the old slots

	// We must remove the const. spaces memory protect for the duration of the pointer update
	ProtectConstSpace(PAGE_READWRITE);

	// New head of free list is first OTE after the single contiguous block of used OTEs
	// Need to set this before compacting as 
	m_pFreePointerList = last+1;

	// Now run through the new OT and update the Oops in the objects
	OTE* pOTE = m_pOT;
	while (pOTE <= last)
	{
		compactObject(pOTE);
		pOTE++;
	}

	// Note that this copies VMPointers to cache area
	ProtectConstSpace(PAGE_READONLY);

	// We must inform the interpreter that it needs to update any cached Oops from the forward pointers
	// before we rebuild the free list (which will destroy those pointers to the new OTEs)
	Interpreter::OnCompact();

	// The last used slot will be the slot before the first entry in the free list
	// Using this, round up from the last used slot to the to commit granularity, then uncommit any later slots
	// 
	
	OTE* end = (OTE*)_ROUND2(reinterpret_cast<ULONG_PTR>(m_pFreePointerList + 1), dwAllocationGranularity);

#ifdef _DEBUG
	m_nFreeOTEs = end - m_pFreePointerList;
#endif

	SIZE_T bytesToDecommit = reinterpret_cast<ULONG_PTR>(m_pOT + m_nOTSize) - reinterpret_cast<ULONG_PTR>(end);
	::VirtualFree(end, bytesToDecommit, MEM_DECOMMIT);
	m_nOTSize = end - m_pOT;

	// Now fix up the free list
	OTE* cur = m_pFreePointerList;
	while (cur < end)
	{
		HARDASSERT(cur->isFree());
		cur->m_location = reinterpret_cast<POBJECT>(cur + 1);
		cur++;
	}

	// Could do this before or after check refs, since that can account for Zct state
	PopulateZct(sp);

	CHECKREFERENCES

	HeapCompact();

	TRACE("... OT compacted, size %d, free %d.\n", m_nOTSize, end - m_pFreePointerList);

	Interpreter::scheduleFinalization();

	return m_pFreePointerList - m_pOT;
}
示例#13
0
	void ObjectMemory::DumpStats()
	{
		tracelock lock(TRACESTREAM);

		TRACESTREAM << std::endl<< L"Object Memory Statistics:" << std::endl
			<< L"------------------------------" << std::endl;

		CheckPoint();
		_CrtMemDumpStatistics(&CRTMemState);

#ifdef _DEBUG
		checkPools();
#endif

		TRACESTREAM << std::endl<< L"Pool Statistics:" << std::endl
			 << L"------------------" << std::endl << std::dec
			  << NumPools<< L" pools in the interval ("
			  << m_pools[0].getSize()<< L" to: "
			  << m_pools[NumPools-1].getSize()<< L" by: "
			  << PoolGranularity << L')' << std::endl << std::endl;

		int pageWaste=0;
		int totalPages=0;
		int totalFreeBytes=0;
		int totalChunks=0;
		int totalFreeChunks=0;
		for (int i=0;i<NumPools;i++)
		{
			int nSize = m_pools[i].getSize();
			int perPage = dwPageSize/nSize;
			int wastePerPage = dwPageSize - (perPage*nSize);
			int nPages = m_pools[i].getPages();
			int nChunks = perPage*nPages;
			int waste = nPages*wastePerPage;
			int nFree = m_pools[i].getFree();
			TRACE(L"%d: size %d, %d objects on %d pgs (%d per pg, %d free), waste %d (%d per page)\n",
				i, nSize, nChunks-nFree, nPages, perPage, nFree, waste, wastePerPage);
			totalChunks += nChunks;
			pageWaste += waste;
			totalPages += nPages;
			totalFreeBytes += nFree*nSize;
			totalFreeChunks += nFree;
		}

		int objectWaste = 0;
		int totalObjects = 0;
		const OTE* pEnd = m_pOT+m_nOTSize;
		for (OTE* ote=m_pOT; ote < pEnd; ote++)
		{
			if (!ote->isFree())
			{
				totalObjects++;
				if (ote->heapSpace() == OTEFlags::PoolSpace)
				{
					int size = ote->sizeOf();
					int chunkSize = _ROUND2(size, PoolGranularity);
					objectWaste += chunkSize - size;
				}
			}
		}

		int wastePercentage = (totalChunks - totalFreeChunks) == 0 
								? 0 
								: int(double(objectWaste)/
										double(totalChunks-totalFreeChunks)*100.0);

		TRACESTREAM<< L"===============================================" << std::endl;
		TRACE(L"Total objects	= %d\n"
			  "Total pool objs	= %d\n"
			  "Total chunks		= %d\n"
			  "Total Pages		= %d\n"
			  "Total Allocs		= %d\n"
			  "Total allocated	= %d\n"
			  "Page Waste		= %d bytes\n"
			  "Object Waste		= %d bytes (avg 0.%d)\n"
			  "Total Waste		= %d\n"
			  "Total free chks	= %d\n"
			  "Total Free		= %d bytes\n",
				totalObjects,
				totalChunks-totalFreeChunks,
				totalChunks,
				totalPages, 
				FixedSizePool::m_nAllocations,
				totalPages*dwPageSize, 
				pageWaste, 
				objectWaste, wastePercentage,
				pageWaste+objectWaste,
				totalFreeChunks,
				totalFreeBytes);
	}
示例#14
0
template <bool MaybeZ, bool Initialized> BytesOTE* ObjectMemory::newByteObject(BehaviorOTE* classPointer, MWORD elementCount)
{
	Behavior& byteClass = *classPointer->m_location;
	OTE* ote;

	if (!MaybeZ || !byteClass.m_instanceSpec.m_nullTerminated)
	{
		ASSERT(!classPointer->m_location->m_instanceSpec.m_nullTerminated);

		VariantByteObject* newBytes = static_cast<VariantByteObject*>(allocObject(elementCount + SizeOfPointers(0), ote));
		ASSERT((elementCount > MaxSizeOfPoolObject && ote->heapSpace() == OTEFlags::NormalSpace)
			|| ote->heapSpace() == OTEFlags::PoolSpace);

		ASSERT(ote->getSize() == elementCount + SizeOfPointers(0));

		if (Initialized)
		{
			// Byte objects are initialized to zeros (but not the header)
			// Note that we round up to initialize to the next DWORD
			// This can be useful when working on a 32-bit word machine
			ZeroMemory(newBytes->m_fields, _ROUND2(elementCount, sizeof(DWORD)));
			classPointer->countUp();
		}

		ote->m_oteClass = classPointer;
		ote->beBytes();
	}
	else
	{
		ASSERT(classPointer->m_location->m_instanceSpec.m_nullTerminated);

		MWORD objectSize;

		switch (reinterpret_cast<const StringClass&>(byteClass).Encoding)
		{
		case StringEncoding::Ansi:
		case StringEncoding::Utf8:
			objectSize = elementCount * sizeof(AnsiString::CU);
			break;
		case StringEncoding::Utf16:
			objectSize = elementCount * sizeof(Utf16String::CU);
			break;
		case StringEncoding::Utf32:
			objectSize = elementCount * sizeof(Utf32String::CU);
			break;
		default:
			__assume(false);
			break;
		}

		// TODO: Allocate the correct number of null term bytes based on the encoding
		objectSize += NULLTERMSIZE;

		VariantByteObject* newBytes = static_cast<VariantByteObject*>(allocObject(objectSize + SizeOfPointers(0), ote));
		ASSERT((objectSize > MaxSizeOfPoolObject && ote->heapSpace() == OTEFlags::NormalSpace)
			|| ote->heapSpace() == OTEFlags::PoolSpace);

		ASSERT(ote->getSize() == objectSize + SizeOfPointers(0));

		if (Initialized)
		{
			// Byte objects are initialized to zeros (but not the header)
			// Note that we round up to initialize to the next DWORD
			// This can be useful when working on a 32-bit word machine
			ZeroMemory(newBytes->m_fields, _ROUND2(objectSize, sizeof(DWORD)));
			classPointer->countUp();
		}
		else
		{
			// We still want to ensure the null terminator is set, even if not initializing the rest of the object
			*reinterpret_cast<NULLTERMTYPE*>(&newBytes->m_fields[objectSize - NULLTERMSIZE]) = 0;
		}

		ote->m_oteClass = classPointer;
		ote->beNullTerminated();
		HARDASSERT(ote->isBytes());
	}

	return reinterpret_cast<BytesOTE*>(ote);
}