/** * Calculate the offset of an address from the base of the heap. * @param The address which require the offset for. * @return The offset from heap base. */ uintptr_t MM_HeapVirtualMemory::calculateOffsetFromHeapBase(void* address) { MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(_omrVM); MM_MemoryManager* memoryManager = extensions->memoryManager; return memoryManager->calculateOffsetFromHeapBase(&_vmemHandle, address); }
/** * Allocate and initialize the receivers internal structures. * @return true on success, false on failure. */ bool MM_ParallelSweepChunkArray::initialize(MM_EnvironmentBase* env, bool useVmem) { bool result = false; MM_GCExtensionsBase* extensions = env->getExtensions(); _useVmem = useVmem; if (extensions->isFvtestForceSweepChunkArrayCommitFailure()) { Trc_MM_SweepHeapSectioning_parallelSweepChunkArrayCommitFailureForced(env->getLanguageVMThread()); } else { if (useVmem) { MM_MemoryManager* memoryManager = extensions->memoryManager; if (memoryManager->createVirtualMemoryForMetadata(env, &_memoryHandle, extensions->heapAlignment, _size * sizeof(MM_ParallelSweepChunk))) { void* base = memoryManager->getHeapBase(&_memoryHandle); result = memoryManager->commitMemory(&_memoryHandle, base, _size * sizeof(MM_ParallelSweepChunk)); if (!result) { Trc_MM_SweepHeapSectioning_parallelSweepChunkArrayCommitFailed(env->getLanguageVMThread(), base, _size * sizeof(MM_ParallelSweepChunk)); } _array = (MM_ParallelSweepChunk*)base; } } else { if (0 != _size) { _array = (MM_ParallelSweepChunk*)env->getForge()->allocate(_size * sizeof(MM_ParallelSweepChunk), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE()); result = (NULL != _array); } else { result = true; } } } return result; }
/** * Commit the address range into physical memory. * @return true if successful, false otherwise. * @note This is a bit of a strange function to have as public API. Should it be removed? */ bool MM_HeapVirtualMemory::commitMemory(void* address, uintptr_t size) { MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(_omrVM); MM_MemoryManager* memoryManager = extensions->memoryManager; return memoryManager->commitMemory(&_vmemHandle, address, size); }
/** * Decommit the address range from physical memory. * @return true if successful, false otherwise. * @note This is a bit of a strange function to have as public API. Should it be removed? */ bool MM_HeapVirtualMemory::decommitMemory(void* address, uintptr_t size, void* lowValidAddress, void* highValidAddress) { MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(_omrVM); MM_MemoryManager* memoryManager = extensions->memoryManager; return memoryManager->decommitMemory(&_vmemHandle, address, size, lowValidAddress, highValidAddress); }
/** * Attach a physical arena of the specified size to the receiver. * This reserves the address space within the receiver for the arena, and connects the arena to the list * of those associated to the receiver (in address order). * * @return true if the arena was attached successfully, false otherwise. * @note The memory reseved is not commited. */ bool MM_HeapVirtualMemory::attachArena(MM_EnvironmentBase* env, MM_PhysicalArena* arena, uintptr_t size) { /* Sanity check of the size */ if (getMaximumMemorySize() < size) { return false; } MM_GCExtensionsBase* extensions = env->getExtensions(); MM_MemoryManager* memoryManager = extensions->memoryManager; /* Find the insertion point for the currentArena */ void* candidateBase = memoryManager->getHeapBase(&_vmemHandle); MM_PhysicalArena* insertionHead = NULL; MM_PhysicalArena* insertionTail = _physicalArena; MM_PhysicalArena* currentArena = arena; while (insertionTail) { if ((((uintptr_t)insertionTail->getLowAddress()) - ((uintptr_t)candidateBase)) >= size) { break; } candidateBase = insertionTail->getHighAddress(); insertionHead = insertionTail; insertionTail = insertionTail->getNextArena(); } /* If we have reached the end of the currentArena list, check if there is room between the candidateBase * and the end of virtual memory */ if (!insertionTail) { if ((memoryManager->calculateOffsetToHeapTop(&_vmemHandle, candidateBase)) < size) { return false; } } /* Connect the physical currentArena into the list at the appropriate point */ currentArena->setPreviousArena(insertionHead); currentArena->setNextArena(insertionTail); if (insertionTail) { insertionTail->setPreviousArena(currentArena); } if (insertionHead) { insertionHead->setNextArena(currentArena); } else { _physicalArena = currentArena; } currentArena->setLowAddress(candidateBase); currentArena->setHighAddress((void*)(((uint8_t*)candidateBase) + size)); /* Set the arena state to being attached */ arena->setAttached(true); return true; }
/** * Free the receivers internal structures. */ void MM_ParallelSweepChunkArray::tearDown(MM_EnvironmentBase* env) { if (_useVmem) { MM_GCExtensionsBase* extensions = env->getExtensions(); MM_MemoryManager* memoryManager = extensions->memoryManager; memoryManager->destroyVirtualMemory(env, &_memoryHandle); } else { env->getForge()->free((void*)_array); } _array = (MM_ParallelSweepChunk*)NULL; }
bool MM_HeapRegionManagerTarok::enableRegionsInTable(MM_EnvironmentBase *env, MM_MemoryHandle *handle) { bool result = true; MM_GCExtensionsBase *extensions = env->getExtensions(); MM_MemoryManager *memoryManager = extensions->memoryManager; void *lowHeapEdge = memoryManager->getHeapBase(handle); void *highHeapEdge = memoryManager->getHeapTop(handle); /* maintained for RTJ */ setNodeAndLinkRegions(env, lowHeapEdge, highHeapEdge, 0); return result; }
void MM_HeapVirtualMemory::tearDown(MM_EnvironmentBase* env) { MM_MemoryManager* memoryManager = env->getExtensions()->memoryManager; MM_HeapRegionManager* manager = getHeapRegionManager(); if (NULL != manager) { manager->destroyRegionTable(env); } memoryManager->destroyVirtualMemory(env, &_vmemHandle); MM_Heap::tearDown(env); }
MM_MemoryManager* MM_MemoryManager::newInstance(MM_EnvironmentBase* env) { MM_MemoryManager* memoryManager = (MM_MemoryManager*)env->getForge()->allocate(sizeof(MM_MemoryManager), OMR::GC::AllocationCategory::FIXED, OMR_GET_CALLSITE()); if (NULL != memoryManager) { new (memoryManager) MM_MemoryManager(env); if (!memoryManager->initialize(env)) { memoryManager->kill(env); memoryManager = NULL; } } return memoryManager; }
bool MM_HeapVirtualMemory::initializeHeapRegionManager(MM_EnvironmentBase* env, MM_HeapRegionManager* manager) { bool result = false; /* since this kind of heap is backed by contiguous memory, tell the heap region manager (which was just * initialized by super) that we want to enable this range of regions for later use. */ MM_MemoryManager* memoryManager = MM_GCExtensionsBase::getExtensions(_omrVM)->memoryManager; void* heapBase = memoryManager->getHeapBase(&_vmemHandle); void* heapTop = memoryManager->getHeapTop(&_vmemHandle); if (manager->setContiguousHeapRange(env, heapBase, heapTop)) { result = manager->enableRegionsInTable(env, &_vmemHandle); } return result; }
/** * Find and return the backing store addresses base. * This routine uses the backing store of the base array and uses this memory as the return value. * @return base address of the backing store. */ void* MM_SweepHeapSectioning::getBackingStoreAddress() { MM_MemoryManager* memoryManager = _extensions->memoryManager; return (void*)memoryManager->getHeapBase(&_baseArray->_memoryHandle); }
bool MM_HeapVirtualMemory::initialize(MM_EnvironmentBase* env, uintptr_t size) { /* call the superclass to inialize before we do any work */ if (!MM_Heap::initialize(env)) { return false; } MM_GCExtensionsBase* extensions = env->getExtensions(); uintptr_t padding = extensions->heapTailPadding; uintptr_t effectiveHeapAlignment = _heapAlignment; /* we need to ensure that we allocate the heap with region alignment since the region table requires that */ MM_HeapRegionManager* manager = getHeapRegionManager(); effectiveHeapAlignment = MM_Math::roundToCeiling(manager->getRegionSize(), effectiveHeapAlignment); MM_MemoryManager* memoryManager = extensions->memoryManager; bool created = false; bool forcedOverflowProtection = false; /* Under -Xaggressive ensure a full page of padding -- see JAZZ103 45254 */ if (extensions->padToPageSize) { #if (defined(AIXPPC) && !defined(PPC64)) /* * An attempt to allocate heap with top at 0xffffffff * In this case extra padding is not required because of overflow protection padding can be used instead */ uintptr_t effectiveSize = MM_Math::roundToCeiling(manager->getRegionSize(), size); void *preferredHeapBase = (void *)((uintptr_t)0 - effectiveSize); created = memoryManager->createVirtualMemoryForHeap(env, &_vmemHandle, effectiveHeapAlignment, size, padding, preferredHeapBase, (void *)(extensions->heapCeiling)); if (created) { /* overflow protection must be there to play role of padding even top is not so close to the end of the memory */ forcedOverflowProtection = true; } else #endif /* (defined(AIXPPC) && !defined(PPC64)) */ { /* Ignore extra full page padding if page size is too large (hard coded here for 1G or larger) */ #define ONE_GB ((uintptr_t)1 * 1024 * 1024 * 1024) if (extensions->requestedPageSize < ONE_GB) { if (padding < extensions->requestedPageSize) { padding = extensions->requestedPageSize; } } } } if (!created && !memoryManager->createVirtualMemoryForHeap(env, &_vmemHandle, effectiveHeapAlignment, size, padding, (void*)(extensions->preferredHeapBase), (void*)(extensions->heapCeiling))) { return false; } /* Check we haven't overflowed the address range */ if (forcedOverflowProtection || (HIGH_ADDRESS - ((uintptr_t)memoryManager->getHeapTop(&_vmemHandle)) < (OVERFLOW_ROUNDING)) || extensions->fvtest_alwaysApplyOverflowRounding) { /* Address range overflow */ memoryManager->roundDownTop(&_vmemHandle, OVERFLOW_ROUNDING); } extensions->overflowSafeAllocSize = ((HIGH_ADDRESS - (uintptr_t)(memoryManager->getHeapTop(&_vmemHandle))) + 1); /* The memory returned might be less than we asked for -- get the actual size */ _maximumMemorySize = memoryManager->getMaximumSize(&_vmemHandle); return true; }
/** * Answer the largest size the heap will ever consume. * The value returned represents the difference between the lowest and highest possible address range * the heap can ever occupy. This value includes any memory that may never be used by the heap (e.g., * in a segmented heap scenario). * @return Maximum size that the heap will ever span. */ uintptr_t MM_HeapVirtualMemory::getMaximumPhysicalRange() { MM_MemoryManager* memoryManager = MM_GCExtensionsBase::getExtensions(_omrVM)->memoryManager; return ((uintptr_t)memoryManager->getMaximumSize(&_vmemHandle)); }
uintptr_t MM_HeapVirtualMemory::getPageFlags() { MM_MemoryManager* memoryManager = MM_GCExtensionsBase::getExtensions(_omrVM)->memoryManager; return memoryManager->getPageFlags(&_vmemHandle); }
/** * Answer the highest possible address for the heap that will ever be possible. * @return Highest address possible for the heap. */ void* MM_HeapVirtualMemory::getHeapTop() { MM_MemoryManager* memoryManager = MM_GCExtensionsBase::getExtensions(_omrVM)->memoryManager; return memoryManager->getHeapTop(&_vmemHandle); }