Example #1
0
// N.B. Like the other instantiate methods in ObjectMemory, this method for instantiating
// objects in virtual space (used for allocating Processes, for example), does not adjust
// the ref. count of the class, because this is often unecessary, and does not adjust the
// sizes to allow for fixed fields - callers must do this
VirtualOTE* ObjectMemory::newVirtualObject(BehaviorOTE* classPointer, MWORD initialSize, MWORD maxSize)
{
	#ifdef _DEBUG
	{
		ASSERT(isBehavior(Oop(classPointer)));
		Behavior& behavior = *classPointer->m_location;
		ASSERT(behavior.isIndexable());
	}
	#endif

	// Trim the sizes to acceptable bounds
	if (initialSize <= dwOopsPerPage)
		initialSize = dwOopsPerPage;
	else
		initialSize = _ROUND2(initialSize, dwOopsPerPage);

	if (maxSize < initialSize)
		maxSize = initialSize;
	else
		maxSize = _ROUND2(maxSize, dwOopsPerPage);

	// We have to allow for the virtual allocation overhead. The allocation function will add in
	// space for this. The maximum size should include this, the initial size should not
	initialSize -= sizeof(VirtualObjectHeader)/sizeof(MWORD);

	unsigned byteSize = initialSize*sizeof(MWORD);
	VariantObject* pLocation = reinterpret_cast<VariantObject*>(AllocateVirtualSpace(maxSize * sizeof(MWORD), byteSize));
	if (pLocation)
	{
		// No need to alter ref. count of process class, as it is sticky

		// Fill space with nils for initial values
		const Oop nil = Oop(Pointers.Nil);
		const unsigned loopEnd = initialSize;
		for (unsigned i = 0; i < loopEnd; i++)
			pLocation->m_fields[i] = nil;

		OTE* ote = ObjectMemory::allocateOop(static_cast<POBJECT>(pLocation));
		ote->setSize(byteSize);
		ote->m_oteClass = classPointer;
		classPointer->countUp();
		ote->m_flags = m_spaceOTEBits[OTEFlags::VirtualSpace];
		ASSERT(ote->isPointers());

		return reinterpret_cast<VirtualOTE*>(ote);
	}

	return nullptr;
}
Example #2
0
PointersOTE* __fastcall ObjectMemory::newUninitializedPointerObject(BehaviorOTE* classPointer, MWORD oops)
{
	// Total size must fit in a DWORD bits
	ASSERT(oops < ((DWORD(1) << 30) - ObjectHeaderSize));

	// Don't worry, compiler will not really use multiply instruction here
	MWORD objectSize = SizeOfPointers(oops);
	OTE* ote;
	allocObject(objectSize, ote);
	ASSERT((objectSize > MaxSizeOfPoolObject && ote->heapSpace() == OTEFlags::NormalSpace)
			|| ote->heapSpace() == OTEFlags::PoolSpace);

	// These are stored in the object itself
	ASSERT(ote->getSize() == objectSize);
	classPointer->countUp();
	ote->m_oteClass = classPointer;

	// DO NOT Initialise the fields to nils

	ASSERT(ote->isPointers());
	
	return reinterpret_cast<PointersOTE*>(ote);
}
Example #3
0
// Uses object identity to locate the next occurrence of the argument in the receiver from
// the specified index to the specified index
Oop* __fastcall Interpreter::primitiveNextIndexOfFromTo()
{
	Oop integerPointer = stackTop();
	if (!ObjectMemoryIsIntegerObject(integerPointer))
		return primitiveFailure(0);				// to not an integer
	const SMALLINTEGER to = ObjectMemoryIntegerValueOf(integerPointer);

	integerPointer = stackValue(1);
	if (!ObjectMemoryIsIntegerObject(integerPointer))
		return primitiveFailure(1);				// from not an integer
	SMALLINTEGER from = ObjectMemoryIntegerValueOf(integerPointer);

	Oop valuePointer = stackValue(2);
	OTE* receiverPointer = reinterpret_cast<OTE*>(stackValue(3));

//	#ifdef _DEBUG
		if (ObjectMemoryIsIntegerObject(receiverPointer))
			return primitiveFailure(2);				// Not valid for SmallIntegers
//	#endif

	Oop answer = ZeroPointer;
	if (to >= from)
	{
		if (!receiverPointer->isPointers())
		{
			// Search a byte object
			BytesOTE* oteBytes = reinterpret_cast<BytesOTE*>(receiverPointer);

			if (ObjectMemoryIsIntegerObject(valuePointer))// Arg MUST be an Integer to be a member
			{
				const MWORD byteValue = ObjectMemoryIntegerValueOf(valuePointer);
				if (byteValue < 256)	// Only worth looking for 0..255
				{
					const SMALLINTEGER length = oteBytes->bytesSize();
					// We can only be in here if to>=from, so if to>=1, then => from >= 1
					// furthermore if to <= length then => from <= length
					if (from < 1 || to > length)
						return primitiveFailure(2);

					// Search is in bounds, lets do it
			
					VariantByteObject* bytes = oteBytes->m_location;

					from--;
					while (from < to)
						if (bytes->m_fields[from++] == byteValue)
						{
							answer = ObjectMemoryIntegerObjectOf(from);
							break;
						}
				}
			}
		}
		else
		{
			// Search a pointer object - but only the indexable vars
			
			PointersOTE* oteReceiver = reinterpret_cast<PointersOTE*>(receiverPointer);
			VariantObject* receiver = oteReceiver->m_location;
			Behavior* behavior = receiverPointer->m_oteClass->m_location;
			const MWORD length = oteReceiver->pointersSize();
			const MWORD fixedFields = behavior->m_instanceSpec.m_fixedFields;

			// Similar reasoning with to/from as for byte objects, but here we need to
			// take account of the fixed fields.
			if (from < 1 || (to + fixedFields > length))
				return primitiveFailure(2);	// Out of bounds

			Oop* indexedFields = receiver->m_fields + fixedFields;
			from--;
			while (from < to)
				if (indexedFields[from++] == valuePointer)
				{
					answer = ObjectMemoryIntegerObjectOf(from);
					break;
				}
		}

	}
	else
		answer = ZeroPointer; 		// Range is non-inclusive, cannot be there

	stackValue(3) = answer;
	return primitiveSuccess(3);
}
Example #4
0
template <MWORD ImageNullTerms> HRESULT ObjectMemory::LoadObjects(ibinstream & imageFile, const ImageHeader * pHeader, size_t & cbRead)
{
	// Other free OTEs will be threaded in front of the first OTE off the end
	// of the currently committed table space. We set the free list pointer
	// to that OTE rather than NULL to distinguish attemps to access off the
	// end of the current table, which then allows us to dynamically grow it
	// on demand
	OTE* pEnd = m_pOT + pHeader->nTableSize;
	m_pFreePointerList = reinterpret_cast<OTE*>(pEnd);

#ifdef _DEBUG
	unsigned numObjects = NumPermanent;	// Allow for VM registry, etc!
	m_nFreeOTEs = m_nOTSize - pHeader->nTableSize;
#endif

	size_t nDataSize = 0;
	for (OTE* ote = m_pOT + NumPermanent; ote < pEnd; ote++)
	{
		if (!ote->isFree())
		{
			MWORD byteSize = ote->getSize();

			MWORD* oldLocation = reinterpret_cast<MWORD*>(ote->m_location);

			Object* pBody;

			// Allocate space for the object, and copy into that space
			if (ote->heapSpace() == OTEFlags::VirtualSpace)
			{
				MWORD dwMaxAlloc;
				if (!imageFile.read(&dwMaxAlloc, sizeof(MWORD)))
					return ImageReadError(imageFile);
				cbRead += sizeof(MWORD);

				pBody = reinterpret_cast<Object*>(AllocateVirtualSpace(dwMaxAlloc, byteSize));
				ote->m_location = pBody;
			}
			else
			{
				if (ote->isNullTerminated())
				{
					ASSERT(!ote->isPointers());
					pBody = AllocObj(ote, byteSize + NULLTERMSIZE);
					if (NULLTERMSIZE > ImageNullTerms)
					{
						// Ensure we have a full null-terminator
						*reinterpret_cast<NULLTERMTYPE*>(static_cast<VariantByteObject*>(pBody)->m_fields+byteSize) = 0;
					}
					byteSize += ImageNullTerms;
				}
				else
				{
					pBody = AllocObj(ote, byteSize);
				}

			}

			markObject(ote);
			if (!imageFile.read(pBody, byteSize))
				return ImageReadError(imageFile);

			cbRead += byteSize;
			FixupObject(ote, oldLocation, pHeader);

#ifdef _DEBUG
			numObjects++;
#endif
		}
		else
		{
			// Thread onto the free list
			ote->m_location = (reinterpret_cast<POBJECT>(m_pFreePointerList));
			m_pFreePointerList = ote;
#ifdef _DEBUG
			m_nFreeOTEs++;
#endif
		}
	}

	// Note that we don't terminate the free list with a null, because
	// it must point off into space in order to get a GPF when it
	// needs to be expanded (at which point we commit more pages)

#ifdef _DEBUG
	ASSERT(numObjects + m_nFreeOTEs == m_nOTSize);
	ASSERT(m_nFreeOTEs = CountFreeOTEs());
	TRACESTREAM << std::dec << numObjects<< L", " << m_nFreeOTEs<< L" free" << std::endl;
#endif

	cbRead += nDataSize;
	return S_OK;
}