示例#1
0
// There are some fixups that we can only apply after all the objects are loaded, because
// they involve reference from one object to other objects which may not be available
// during the normal load process. These fixes are applied here
void ObjectMemory::PostLoadFix()
{
	// Special case handling for Contexts because we store
	// the sp's as integers in the image file, but at
	// run-time they are expected to be direct pointers
	const OTE* pEnd = m_pOT + m_nOTSize;	// Loop invariant
	for (OTE* ote = m_pOT; ote < pEnd; ote++)
	{
		if (!ote->isFree())
		{
			if (ote->isBytes())
			{
#ifdef _DEBUG
				{
					// Its a byte object, and may be null terminated
					const Behavior* behavior = ote->m_oteClass->m_location;
					const BytesOTE* oteBytes = reinterpret_cast<const BytesOTE*>(ote);
					const VariantByteObject* object = oteBytes->m_location;
					ASSERT(behavior->m_instanceSpec.m_nullTerminated == ote->isNullTerminated());
				}
#endif
			}
			else if (ote->m_oteClass == _Pointers.ClassProcess)
			{
				ASSERT(ote->heapSpace() == OTEFlags::VirtualSpace);
				ProcessOTE* oteProcess = reinterpret_cast<ProcessOTE*>(ote);
				Process* process = oteProcess->m_location;
				process->PostLoadFix(oteProcess);
			}
		}
	}

	ProtectConstSpace(PAGE_READONLY);

#if defined(_DEBUG) && 0
	{
		// Dump out the pointers
		TRACESTREAM << NumPointers<< L" VM Pointers..." << std::endl;
		for (int i = 0; i < NumPointers; i++)
		{
			VariantObject* obj = static_cast<VariantObject*>(m_pConstObjs);
			POTE pote = POTE(obj->m_fields[i]);
			TRACESTREAM << i<< L": " << pote << std::endl;
		}
	}
#endif
}
示例#2
0
void ObjectMemory::EmptyZct()
{
	if (m_bIsReconcilingZct)
		__debugbreak();
#ifdef _DEBUG
	nDeleted = 0;

	if (!alwaysReconcileOnAdd || Interpreter::executionTrace)
		CHECKREFSNOFIX
	else
		checkStackRefs();
#endif

	// Bump the refs from the stack. Any objects remaining in the ZCT with zero counts
	// are truly garbage.
	Interpreter::IncStackRefs();

	OTE** pZct = m_pZct;
	// This tells us that we are in the process of reconcilation
	m_bIsReconcilingZct = true;
	const int nOldZctEntries = m_nZctEntries;
	m_nZctEntries = -1;

	for (int i=0;i<nOldZctEntries;i++)
	{
		OTE* ote = pZct[i];
		if (!ote->isFree() && ote->m_flags.m_count == 0)
		{
			// Note that deallocate cannot make new Zct entries
			// Because we have bumped the ref. counts of all stack ref'd objects, only true
			// garbage objects can ever have a ref. count of zero. Therefore if recursively
			// counting down throws up new zero ref. counts, these should not be added to 
			// the Zct, but deallocated. To achieve this we set a global flag to indicate
			// that we are reconciling, see AddToZct() above. 
#ifdef _DEBUG
			nDeleted++;
#endif
			recursiveFree(ote);
		}
	}

//	CHECKREFSNOFIX
}
示例#3
0
// Compact an object by updating all the Oops it contains using the
// forwarding pointers in the old OT.
void ObjectMemory::compactObject(OTE* ote)
{
	// We shouldn't come in here unless OTE already fixed for this object
	HARDASSERT(ote >= m_pOT && ote < m_pFreePointerList);

	// First fix up the class (remember that the new object pointer is stored in the
	// old one's object location slot
	compactOop(ote->m_oteClass);

	if (ote->isPointers())
	{
		VariantObject* varObj = static_cast<VariantObject*>(ote->m_location);
		const MWORD lastPointer = ote->pointersSize();
		for (MWORD i = 0; i < lastPointer; i++)
		{
			// This will get nicely optimised by the Compiler
			Oop fieldPointer = varObj->m_fields[i];

			// We don't need to visit SmallIntegers and objects we've already visited
			if (!isIntegerObject(fieldPointer))
			{
				OTE* fieldOTE = reinterpret_cast<OTE*>(fieldPointer);
				// If pointing at a free'd object ,then it has been moved
				if (fieldOTE->isFree())
				{
					// Should be one of the old OT entries, pointing outside the o
					Oop movedTo = reinterpret_cast<Oop>(fieldOTE->m_location);
					HARDASSERT(movedTo >= (Oop)m_pOT && movedTo < (Oop)m_pFreePointerList);
					// Get the new OTE from the old ...
					varObj->m_fields[i] = movedTo;
				}
			}
		}
	}
	// else, we don't even need to look at the body of byte objects any more
}
示例#4
0
template <MWORD ImageNullTerms> HRESULT ObjectMemory::LoadObjects(ibinstream & imageFile, const ImageHeader * pHeader, size_t & cbRead)
{
	// Other free OTEs will be threaded in front of the first OTE off the end
	// of the currently committed table space. We set the free list pointer
	// to that OTE rather than NULL to distinguish attemps to access off the
	// end of the current table, which then allows us to dynamically grow it
	// on demand
	OTE* pEnd = m_pOT + pHeader->nTableSize;
	m_pFreePointerList = reinterpret_cast<OTE*>(pEnd);

#ifdef _DEBUG
	unsigned numObjects = NumPermanent;	// Allow for VM registry, etc!
	m_nFreeOTEs = m_nOTSize - pHeader->nTableSize;
#endif

	size_t nDataSize = 0;
	for (OTE* ote = m_pOT + NumPermanent; ote < pEnd; ote++)
	{
		if (!ote->isFree())
		{
			MWORD byteSize = ote->getSize();

			MWORD* oldLocation = reinterpret_cast<MWORD*>(ote->m_location);

			Object* pBody;

			// Allocate space for the object, and copy into that space
			if (ote->heapSpace() == OTEFlags::VirtualSpace)
			{
				MWORD dwMaxAlloc;
				if (!imageFile.read(&dwMaxAlloc, sizeof(MWORD)))
					return ImageReadError(imageFile);
				cbRead += sizeof(MWORD);

				pBody = reinterpret_cast<Object*>(AllocateVirtualSpace(dwMaxAlloc, byteSize));
				ote->m_location = pBody;
			}
			else
			{
				if (ote->isNullTerminated())
				{
					ASSERT(!ote->isPointers());
					pBody = AllocObj(ote, byteSize + NULLTERMSIZE);
					if (NULLTERMSIZE > ImageNullTerms)
					{
						// Ensure we have a full null-terminator
						*reinterpret_cast<NULLTERMTYPE*>(static_cast<VariantByteObject*>(pBody)->m_fields+byteSize) = 0;
					}
					byteSize += ImageNullTerms;
				}
				else
				{
					pBody = AllocObj(ote, byteSize);
				}

			}

			markObject(ote);
			if (!imageFile.read(pBody, byteSize))
				return ImageReadError(imageFile);

			cbRead += byteSize;
			FixupObject(ote, oldLocation, pHeader);

#ifdef _DEBUG
			numObjects++;
#endif
		}
		else
		{
			// Thread onto the free list
			ote->m_location = (reinterpret_cast<POBJECT>(m_pFreePointerList));
			m_pFreePointerList = ote;
#ifdef _DEBUG
			m_nFreeOTEs++;
#endif
		}
	}

	// Note that we don't terminate the free list with a null, because
	// it must point off into space in order to get a GPF when it
	// needs to be expanded (at which point we commit more pages)

#ifdef _DEBUG
	ASSERT(numObjects + m_nFreeOTEs == m_nOTSize);
	ASSERT(m_nFreeOTEs = CountFreeOTEs());
	TRACESTREAM << std::dec << numObjects<< L", " << m_nFreeOTEs<< L" free" << std::endl;
#endif

	cbRead += nDataSize;
	return S_OK;
}
示例#5
0
// Perform a compacting GC
size_t ObjectMemory::compact(Oop* const sp)
{
	TRACE("Compacting OT, size %d, free %d, ...\n", m_nOTSize, m_pOT + m_nOTSize - m_pFreePointerList);
	EmptyZct(sp);

	// First perform a normal GC
	reclaimInaccessibleObjects(GCNormal);

	Interpreter::freePools();

	// Walk the OT from the bottom to locate free entries, and from the top to locate candidates to move
	// 

	size_t moved = 0;
	OTE* last = m_pOT + m_nOTSize - 1;
	OTE* first = m_pOT;
#pragma warning(push)
#pragma warning(disable : 4127)
	while(true)
#pragma warning(pop)
	{
		// Look for a tail ender
		while (last > first && last->isFree())
			last--;
		// Look for a free slot
		while (first < last && !first->isFree())
			first++;
		if (first == last)
			break;	// Met in the middle, we're done
		
		HARDASSERT(first->isFree());
		HARDASSERT(!last->isFree());

		// Copy the tail ender over the free slot
		*first = *last;
		moved++;
		// Leave forwarding pointer in the old slot
		last->m_location = reinterpret_cast<POBJECT>(first);
		last->beFree();
		last->m_count = 0;
		// Advance last as we've moved this slot
		last--;
	}

	HARDASSERT(last == first);
	// At this point, last == first, and the first free slot will be that after last

	TRACE("%d OTEs compacted\n", moved);

	// Now we can update the objects using the forwarding pointers in the old slots

	// We must remove the const. spaces memory protect for the duration of the pointer update
	ProtectConstSpace(PAGE_READWRITE);

	// New head of free list is first OTE after the single contiguous block of used OTEs
	// Need to set this before compacting as 
	m_pFreePointerList = last+1;

	// Now run through the new OT and update the Oops in the objects
	OTE* pOTE = m_pOT;
	while (pOTE <= last)
	{
		compactObject(pOTE);
		pOTE++;
	}

	// Note that this copies VMPointers to cache area
	ProtectConstSpace(PAGE_READONLY);

	// We must inform the interpreter that it needs to update any cached Oops from the forward pointers
	// before we rebuild the free list (which will destroy those pointers to the new OTEs)
	Interpreter::OnCompact();

	// The last used slot will be the slot before the first entry in the free list
	// Using this, round up from the last used slot to the to commit granularity, then uncommit any later slots
	// 
	
	OTE* end = (OTE*)_ROUND2(reinterpret_cast<ULONG_PTR>(m_pFreePointerList + 1), dwAllocationGranularity);

#ifdef _DEBUG
	m_nFreeOTEs = end - m_pFreePointerList;
#endif

	SIZE_T bytesToDecommit = reinterpret_cast<ULONG_PTR>(m_pOT + m_nOTSize) - reinterpret_cast<ULONG_PTR>(end);
	::VirtualFree(end, bytesToDecommit, MEM_DECOMMIT);
	m_nOTSize = end - m_pOT;

	// Now fix up the free list
	OTE* cur = m_pFreePointerList;
	while (cur < end)
	{
		HARDASSERT(cur->isFree());
		cur->m_location = reinterpret_cast<POBJECT>(cur + 1);
		cur++;
	}

	// Could do this before or after check refs, since that can account for Zct state
	PopulateZct(sp);

	CHECKREFERENCES

	HeapCompact();

	TRACE("... OT compacted, size %d, free %d.\n", m_nOTSize, end - m_pFreePointerList);

	Interpreter::scheduleFinalization();

	return m_pFreePointerList - m_pOT;
}