PointersOTE* __fastcall ObjectMemory::shallowCopy(PointersOTE* ote) { ASSERT(!ote->isBytes()); // A pointer object is a bit more tricky to copy (but not much) VariantObject* obj = ote->m_location; BehaviorOTE* classPointer = ote->m_oteClass; PointersOTE* copyPointer; MWORD size; if (ote->heapSpace() == OTEFlags::VirtualSpace) { Interpreter::resizeActiveProcess(); //size = obj->PointerSize(); size = ote->pointersSize(); VirtualObject* pVObj = reinterpret_cast<VirtualObject*>(obj); VirtualObjectHeader* pBase = pVObj->getHeader(); unsigned maxByteSize = pBase->getMaxAllocation(); unsigned currentTotalByteSize = pBase->getCurrentAllocation(); VirtualOTE* virtualCopy = ObjectMemory::newVirtualObject(classPointer, currentTotalByteSize / sizeof(MWORD), maxByteSize / sizeof(MWORD)); if (!virtualCopy) return nullptr; pVObj = virtualCopy->m_location; pBase = pVObj->getHeader(); ASSERT(pBase->getMaxAllocation() == maxByteSize); ASSERT(pBase->getCurrentAllocation() == currentTotalByteSize); virtualCopy->setSize(ote->getSize()); copyPointer = reinterpret_cast<PointersOTE*>(virtualCopy); } else { //size = obj->PointerSize(); size = ote->pointersSize(); copyPointer = newPointerObject(classPointer, size); } // Now copy over all the fields VariantObject* copy = copyPointer->m_location; ASSERT(copyPointer->pointersSize() == size); for (unsigned i = 0; i<size; i++) { copy->m_fields[i] = obj->m_fields[i]; countUp(obj->m_fields[i]); } return copyPointer; }
/* Allocate a new virtual object from virtual space, which can grow up to maxBytes (including the virtual allocation overhead) but which has an initial size of initialBytes (NOT including the virtual allocation overhead). Should the allocation request fail, then a memory exception is generated. */ MWORD* __stdcall AllocateVirtualSpace(MWORD maxBytes, MWORD initialBytes) { unsigned reserveBytes = _ROUND2(maxBytes + dwPageSize, dwAllocationGranularity); ASSERT(reserveBytes % dwAllocationGranularity == 0); void* pReservation = ::VirtualAlloc(NULL, reserveBytes, MEM_RESERVE, PAGE_NOACCESS); if (pReservation) { #ifdef _DEBUG // Let's see whether we got the rounding correct! MEMORY_BASIC_INFORMATION mbi; VERIFY(::VirtualQuery(pReservation, &mbi, sizeof(mbi)) == sizeof(mbi)); ASSERT(mbi.AllocationBase == pReservation); ASSERT(mbi.BaseAddress == pReservation); ASSERT(mbi.AllocationProtect == PAGE_NOACCESS); // ASSERT(mbi.Protect == PAGE_NOACCESS); ASSERT(mbi.RegionSize == reserveBytes); ASSERT(mbi.State == MEM_RESERVE); ASSERT(mbi.Type == MEM_PRIVATE); #endif // We expect the initial byte size to be a integral number of pages, and it must also take account // of the virtual allocation overhead (currently 4 bytes) initialBytes = _ROUND2(initialBytes + sizeof(VirtualObjectHeader), dwPageSize); ASSERT(initialBytes % dwPageSize == 0); // Note that VirtualAlloc initializes the committed memory to zeroes. VirtualObjectHeader* pLocation = static_cast<VirtualObjectHeader*>(::VirtualAlloc(pReservation, initialBytes, MEM_COMMIT, PAGE_READWRITE)); if (pLocation) { #ifdef _DEBUG // Let's see whether we got the rounding correct! VERIFY(::VirtualQuery(pLocation, &mbi, sizeof(mbi)) == sizeof(mbi)); ASSERT(mbi.AllocationBase == pLocation); ASSERT(mbi.BaseAddress == pLocation); ASSERT(mbi.AllocationProtect == PAGE_NOACCESS); ASSERT(mbi.Protect == PAGE_READWRITE); ASSERT(mbi.RegionSize == initialBytes); ASSERT(mbi.State == MEM_COMMIT); ASSERT(mbi.Type == MEM_PRIVATE); #endif // Use first slot to hold the maximum size for the object pLocation->setMaxAllocation(maxBytes); return reinterpret_cast<MWORD*>(pLocation + 1); } } return nullptr; }
// Resize an object in VirtualSpace (commit/decommit some memory) // N.B. Assumes that there are no ref. counted object above shrinkTo (primarily intended for // Process stacks) POBJECT ObjectMemory::resizeVirtual(OTE* ote, MWORD newByteSize) { ASSERT(ote->heapSpace() == OTEFlags::VirtualSpace); VariantObject* pObject = static_cast<VariantObject*>(ote->m_location); VirtualObject* pVObj = reinterpret_cast<VirtualObject*>(pObject); VirtualObjectHeader* pBase = pVObj->getHeader(); unsigned maxByteSize = pBase->getMaxAllocation(); maxByteSize; unsigned currentTotalByteSize = pBase->getCurrentAllocation(); ASSERT(_ROUND2(currentTotalByteSize, dwPageSize) == currentTotalByteSize); unsigned newTotalByteSize = _ROUND2(newByteSize + sizeof(VirtualObjectHeader), dwPageSize); // Minimum virtual allocation is one page (4k normally) ASSERT(newTotalByteSize >= dwPageSize); if (newTotalByteSize > currentTotalByteSize) { // The object is increasing in size - commit some more memory ASSERT(newByteSize <= maxByteSize); unsigned allocSize = newTotalByteSize - currentTotalByteSize; ASSERT(_ROUND2(allocSize, dwPageSize) == allocSize); if (!::VirtualAlloc(reinterpret_cast<BYTE*>(pBase) + currentTotalByteSize, allocSize, MEM_COMMIT, PAGE_READWRITE)) return 0; // Request to resize failed } else if (newTotalByteSize < currentTotalByteSize) { const Behavior* behavior = ote->m_oteClass->m_location; behavior; // The object is shrinking - decommit some memory ASSERT(newByteSize > (ObjectHeaderSize+behavior->fixedFields())*sizeof(MWORD)); MWORD* pCeiling = reinterpret_cast<MWORD*>(reinterpret_cast<BYTE*>(pBase) + newTotalByteSize); // Determine the size of the committed region above shrinkTo MEMORY_BASIC_INFORMATION mbi; VERIFY(::VirtualQuery(pCeiling, &mbi, sizeof(mbi)) == sizeof(mbi)); ASSERT(mbi.AllocationBase == pBase); if (mbi.State == MEM_COMMIT) { // Decommit memory above new ceiling VERIFY(::VirtualFree(pCeiling, mbi.RegionSize, MEM_DECOMMIT)); } } // And resize the object as far as Smalltalk is concerned to the nearest page boundary // above and including shrinkTo //pBase->setCurrentAllocation(newTotalByteSize); ote->setSize(newByteSize); return pObject; }