/** * Allocate and initialize the receivers internal structures. * @return true on success, false on failure. */ bool MM_ParallelSweepChunkArray::initialize(MM_EnvironmentBase* env, bool useVmem) { bool result = false; MM_GCExtensionsBase* extensions = env->getExtensions(); _useVmem = useVmem; if (extensions->isFvtestForceSweepChunkArrayCommitFailure()) { Trc_MM_SweepHeapSectioning_parallelSweepChunkArrayCommitFailureForced(env->getLanguageVMThread()); } else { if (useVmem) { MM_MemoryManager* memoryManager = extensions->memoryManager; if (memoryManager->createVirtualMemoryForMetadata(env, &_memoryHandle, extensions->heapAlignment, _size * sizeof(MM_ParallelSweepChunk))) { void* base = memoryManager->getHeapBase(&_memoryHandle); result = memoryManager->commitMemory(&_memoryHandle, base, _size * sizeof(MM_ParallelSweepChunk)); if (!result) { Trc_MM_SweepHeapSectioning_parallelSweepChunkArrayCommitFailed(env->getLanguageVMThread(), base, _size * sizeof(MM_ParallelSweepChunk)); } _array = (MM_ParallelSweepChunk*)base; } } else { if (0 != _size) { _array = (MM_ParallelSweepChunk*)env->getForge()->allocate(_size * sizeof(MM_ParallelSweepChunk), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE()); result = (NULL != _array); } else { result = true; } } } return result; }
bool MM_VerboseBuffer::ensureCapacity(MM_EnvironmentBase *env, uintptr_t spaceNeeded) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); bool result = true; if(freeSpace() < spaceNeeded) { /* Not enough space in the current buffer - try to alloc a larger one and use that */ char *oldBuffer = _buffer; uintptr_t currentSize = this->currentSize(); uintptr_t newStringLength = currentSize + spaceNeeded; uintptr_t newSize = newStringLength + (newStringLength / 2); char* newBuffer = (char *) extensions->getForge()->allocate(newSize, MM_AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if(NULL == newBuffer) { result = false; } else { _buffer = newBuffer; /* Got a new buffer - initialize it */ _bufferTop = _buffer + newSize; reset(); /* Copy across the contents of the old buffer */ strcpy(_buffer, oldBuffer); _bufferAlloc += currentSize; /* Delete the old buffer */ extensions->getForge()->free(oldBuffer); } } return result; }
/** * Probe the file system for existing files. Determine * the first number which is unused, or the number of the oldest * file if all numbers are used. * @return the first file number to use (starting at 0), or -1 on failure */ intptr_t MM_VerboseWriterFileLogging::findInitialFile(MM_EnvironmentBase *env) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); int64_t oldestTime = J9CONST64(0x7FFFFFFFFFFFFFFF); /* the highest possible time. */ intptr_t oldestFile = 0; if (_mode != rotating_files) { /* nothing to do */ return 0; } for (uintptr_t currentFile = 0; currentFile < _numFiles; currentFile++) { char *filenameToOpen = expandFilename(env, currentFile); if (NULL == filenameToOpen) { return -1; } int64_t thisTime = omrfile_lastmod(filenameToOpen); extensions->getForge()->free(filenameToOpen); if (thisTime < 0) { /* file doesn't exist, or some other problem reading the file */ oldestFile = currentFile; break; } else if (thisTime < oldestTime) { oldestTime = thisTime; oldestFile = currentFile; } } return oldestFile; }
bool MM_VerboseWriter::initialize(MM_EnvironmentBase* env) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_GCExtensionsBase* ext = env->getExtensions(); /* Initialize _header */ const char* version = omrgc_get_version(env->getOmrVM()); /* The length is -2 for the "%s" in VERBOSEGC_HEADER and +1 for '\0' */ uintptr_t headerLength = strlen(version) + strlen(VERBOSEGC_HEADER) - 1; _header = (char*)ext->getForge()->allocate(sizeof(char) * headerLength, OMR::GC::AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if (NULL == _header) { return false; } omrstr_printf(_header, headerLength, VERBOSEGC_HEADER, version); /* Initialize _footer */ uintptr_t footerLength = strlen(VERBOSEGC_FOOTER) + 1; _footer = (char*)ext->getForge()->allocate(sizeof(char) * footerLength, OMR::GC::AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if (NULL == _footer) { ext->getForge()->free(_header); return false; } omrstr_printf(_footer, footerLength, VERBOSEGC_FOOTER); return true; }
void MM_CollectorLanguageInterfaceImpl::scavenger_fixupDestroyedSlot(MM_EnvironmentBase *env, MM_ForwardedHeader *forwardedHeader, MM_MemorySubSpaceSemiSpace *subSpaceNew) { /* This method must be implemented if (and only if) the object header is stored in a compressed slot. in that * case the other half of the full (omrobjectptr_t sized) slot may hold a compressed object reference that * must be restored by this method. */ /* This assumes that all slots are object slots, including the slot adjacent to the header slot */ if ((0 != forwardedHeader->getPreservedOverlap()) && !_extensions->objectModel.isIndexable(forwardedHeader)) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(_omrVM); /* Get the uncompressed reference from the slot */ fomrobject_t preservedOverlap = (fomrobject_t)forwardedHeader->getPreservedOverlap(); GC_SlotObject preservedSlotObject(_omrVM, &preservedOverlap); omrobjectptr_t survivingCopyAddress = preservedSlotObject.readReferenceFromSlot(); /* Check if the address we want to read is aligned (since mis-aligned reads may still be less than a top address but extend beyond it) */ if (0 == ((uintptr_t)survivingCopyAddress & (extensions->getObjectAlignmentInBytes() - 1))) { /* Ensure that the address we want to read is within part of the heap which could contain copied objects (tenure or survivor) */ void *topOfObject = (void *)((uintptr_t *)survivingCopyAddress + 1); if (subSpaceNew->isObjectInNewSpace(survivingCopyAddress, topOfObject) || extensions->isOld(survivingCopyAddress, topOfObject)) { /* if the slot points to a reverse-forwarded object, restore the original location (in evacuate space) */ MM_ForwardedHeader reverseForwardedHeader(survivingCopyAddress); if (reverseForwardedHeader.isReverseForwardedPointer()) { /* overlapped slot must be fixed up */ fomrobject_t fixupSlot = 0; GC_SlotObject fixupSlotObject(_omrVM, &fixupSlot); fixupSlotObject.writeReferenceToSlot(reverseForwardedHeader.getReverseForwardedPointer()); forwardedHeader->restoreDestroyedOverlap((uint32_t)fixupSlot); } } } } }
/** * Free the buffer object */ void MM_VerboseBuffer::kill(MM_EnvironmentBase *env) { tearDown(env); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); extensions->getForge()->free(this); }
/** * Free the buffer itself */ void MM_VerboseBuffer::tearDown(MM_EnvironmentBase *env) { if(NULL != _buffer) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); extensions->getForge()->free(_buffer); } }
void MM_VerboseWriter::tearDown(MM_EnvironmentBase* env) { MM_GCExtensionsBase* ext = env->getExtensions(); ext->getForge()->free(_header); _header = NULL; ext->getForge()->free(_footer); _footer = NULL; }
bool MM_ConfigurationStandard::initialize(MM_EnvironmentBase* env) { MM_GCExtensionsBase* extensions = env->getExtensions(); bool result = MM_Configuration::initialize(env); if (result) { extensions->payAllocationTax = extensions->isConcurrentMarkEnabled() || extensions->isConcurrentSweepEnabled(); extensions->setStandardGC(true); } return result; }
/** * Tear down the structures managed by the MM_VerboseWriterFileLogging. * Tears down the verbose buffer. */ void MM_VerboseWriterFileLogging::tearDown(MM_EnvironmentBase *env) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); omrstr_free_tokens(_tokens); _tokens = NULL; extensions->getForge()->free(_filename); _filename = NULL; MM_VerboseWriter::tearDown(env); }
int J9THREAD_PROC MM_MasterGCThread::master_thread_proc(void *info) { MM_MasterGCThread *masterGCThread = (MM_MasterGCThread*)info; MM_GCExtensionsBase *extensions = masterGCThread->_extensions; OMR_VM *omrVM = extensions->getOmrVM(); OMRPORT_ACCESS_FROM_OMRVM(omrVM); uintptr_t rc = 0; omrsig_protect(master_thread_proc2, info, ((MM_ParallelDispatcher *)extensions->dispatcher)->getSignalHandler(), omrVM, OMRPORT_SIG_FLAG_SIGALLSYNC | OMRPORT_SIG_FLAG_MAY_CONTINUE_EXECUTION, &rc); return 0; }
omr_error_t OMR_GC_SystemCollect(OMR_VMThread* omrVMThread, uint32_t gcCode) { MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrVMThread->_vm); /* Lazily create the collector, if necessary. */ if (NULL == extensions->getGlobalCollector()) { if (OMR_ERROR_NONE != OMR_GC_InitializeCollector(omrVMThread)) { return OMR_ERROR_INTERNAL; } } extensions->heap->systemGarbageCollect(env, gcCode); return OMR_ERROR_NONE; }
omr_error_t OMR_GC_SystemCollect(OMR_VMThread* omrVMThread, uint32_t gcCode) { omr_error_t result = OMR_ERROR_NONE; MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); MM_GCExtensionsBase *extensions = env->getExtensions(); if (NULL == extensions->getGlobalCollector()) { result = OMR_GC_InitializeCollector(omrVMThread); } if (OMR_ERROR_NONE == result) { extensions->heap->systemGarbageCollect(env, gcCode); } return result; }
MM_VerboseHandlerOutput * MM_VerboseHandlerOutput::newInstance(MM_EnvironmentBase *env, MM_VerboseManager *manager) { MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_VerboseHandlerOutput *verboseHandlerOutput = (MM_VerboseHandlerOutput*)extensions->getForge()->allocate(sizeof(MM_VerboseHandlerOutput), OMR::GC::AllocationCategory::FIXED, OMR_GET_CALLSITE()); if (NULL != verboseHandlerOutput) { new(verboseHandlerOutput) MM_VerboseHandlerOutput(extensions); if(!verboseHandlerOutput->initialize(env, manager)) { verboseHandlerOutput->kill(env); verboseHandlerOutput = NULL; } } return verboseHandlerOutput; }
MM_VerboseManagerImpl * MM_VerboseManagerImpl::newInstance(MM_EnvironmentBase *env, OMR_VM* vm) { MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(vm); MM_VerboseManagerImpl *verboseManager = (MM_VerboseManagerImpl *)extensions->getForge()->allocate(sizeof(MM_VerboseManagerImpl), OMR::GC::AllocationCategory::FIXED, OMR_GET_CALLSITE()); if (verboseManager) { new(verboseManager) MM_VerboseManagerImpl(vm); if(!verboseManager->initialize(env)) { verboseManager->kill(env); verboseManager = NULL; } } return verboseManager; }
static MM_GCWriteBarrierType getWriteBarrierType(MM_EnvironmentBase* env) { MM_GCWriteBarrierType writeBarrierType = gc_modron_wrtbar_none; MM_GCExtensionsBase* extensions = env->getExtensions(); if (extensions->isScavengerEnabled()) { if (extensions->isConcurrentMarkEnabled()) { writeBarrierType = gc_modron_wrtbar_cardmark_and_oldcheck; } else { writeBarrierType = gc_modron_wrtbar_oldcheck; } } else if (extensions->isConcurrentMarkEnabled()) { writeBarrierType = gc_modron_wrtbar_cardmark; } return writeBarrierType; }
omrobjectptr_t OMR_GC_Allocate(OMR_VMThread * omrVMThread, size_t sizeInBytes, uintptr_t flags) { omrobjectptr_t heapBytes = allocHelper(omrVMThread, sizeInBytes, flags, true); if (NULL == heapBytes) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrVMThread->_vm); if (NULL == extensions->getGlobalCollector()) { /* Lazily create the collector and try to allocate again. */ if (OMR_ERROR_NONE == OMR_GC_InitializeCollector(omrVMThread)) { heapBytes = allocHelper(omrVMThread, sizeInBytes, flags, true); } } } return heapBytes; }
/** * Instantiate a new buffer object * @param size Buffer size */ MM_VerboseBuffer * MM_VerboseBuffer::newInstance(MM_EnvironmentBase *env, uintptr_t size) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_VerboseBuffer *verboseBuffer = (MM_VerboseBuffer *) extensions->getForge()->allocate(sizeof(MM_VerboseBuffer), MM_AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if(NULL != verboseBuffer) { new(verboseBuffer) MM_VerboseBuffer(env); if (!verboseBuffer->initialize(env, size)) { verboseBuffer->kill(env); verboseBuffer = NULL; } } return verboseBuffer; }
void MM_CollectorLanguageInterfaceImpl::generationalWriteBarrierStore(OMR_VMThread *omrThread, omrobjectptr_t parentObject, fomrobject_t *parentSlot, omrobjectptr_t childObject) { GC_SlotObject slotObject(omrThread->_vm, parentSlot); slotObject.writeReferenceToSlot(childObject); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrThread->_vm); if (extensions->scavengerEnabled) { if (extensions->isOld(parentObject) && !extensions->isOld(childObject)) { if (extensions->objectModel.atomicSetRemembered(parentObject)) { /* The object has been successfully marked as REMEMBERED - allocate an entry in the remembered set */ MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(omrThread); extensions->scavenger->addToRememberedSetFragment(env, parentObject); } } } }
MM_VerboseHandlerOutput * MM_VerboseManager::createVerboseHandlerOutputObject(MM_EnvironmentBase *env) { MM_VerboseHandlerOutput *handler = NULL; MM_GCExtensionsBase *extensions = env->getExtensions(); if (extensions->isStandardGC()) { #if defined(OMR_GC_MODRON_STANDARD) handler = MM_VerboseHandlerOutputStandard::newInstance(env, this); #endif /* defined(OMR_GC_MODRON_STANDARD) */ } else { handler = MM_VerboseHandlerOutput::newInstance(env, this); } return handler; }
bool MM_ConfigurationStandard::initialize(MM_EnvironmentBase* env) { MM_GCExtensionsBase* extensions = env->getExtensions(); bool result = MM_Configuration::initialize(env); if (result) { extensions->payAllocationTax = false; #if defined(OMR_GC_MODRON_CONCURRENT_MARK) extensions->payAllocationTax = extensions->payAllocationTax || extensions->concurrentMark; #endif /* OMR_GC_MODRON_CONCURRENT_MARK */ #if defined(OMR_GC_CONCURRENT_SWEEP) extensions->payAllocationTax = extensions->payAllocationTax || extensions->concurrentSweep; #endif /* OMR_GC_CONCURRENT_SWEEP */ extensions->setStandardGC(true); } return result; }
/** * Generate an expanded filename based on currentFile. * The caller is responsible for freeing the returned memory. * * @param env the current thread * @param currentFile the current file number to substitute into the filename template * * @return NULL on failure, allocated memory on success */ char* MM_VerboseWriterFileLogging::expandFilename(MM_EnvironmentBase *env, uintptr_t currentFile) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); if (_mode == rotating_files) { omrstr_set_token(_tokens, "seq", "%03zu", currentFile + 1); /* plus one so the filenames start from .001 instead of .000 */ } uintptr_t len = omrstr_subst_tokens(NULL, 0, _filename, _tokens); char *filenameToOpen = (char*)extensions->getForge()->allocate(len, OMR::GC::AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if (NULL != filenameToOpen) { omrstr_subst_tokens(filenameToOpen, len, _filename, _tokens); } return filenameToOpen; }
/** * Initialization */ MM_CollectorLanguageInterfaceImpl * MM_CollectorLanguageInterfaceImpl::newInstance(MM_EnvironmentBase *env) { MM_CollectorLanguageInterfaceImpl *cli = NULL; OMR_VM *omrVM = env->getOmrVM(); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrVM); cli = (MM_CollectorLanguageInterfaceImpl *)extensions->getForge()->allocate(sizeof(MM_CollectorLanguageInterfaceImpl), OMR::GC::AllocationCategory::FIXED, OMR_GET_CALLSITE()); if (NULL != cli) { new(cli) MM_CollectorLanguageInterfaceImpl(omrVM); if (!cli->initialize(omrVM)) { cli->kill(env); cli = NULL; } } return cli; }
/** * Initialize the buffer object * @param size Buffer size */ bool MM_VerboseBuffer::initialize(MM_EnvironmentBase *env, uintptr_t size) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); if(0 == size) { return false; } if(NULL == (_buffer = (char *) extensions->getForge()->allocate(size, MM_AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()))) { return false; } _bufferTop = _buffer + size; reset(); return true; }
bool MM_ConfigurationSegregated::initialize(MM_EnvironmentBase *env) { bool success = false; /* OMRTODO investigate why these must be equal or it segfaults. */ MM_GCExtensionsBase *extensions = env->getExtensions(); extensions->splitAvailableListSplitAmount = extensions->gcThreadCount; if (MM_Configuration::initialize(env)) { env->getOmrVM()->_sizeClasses = _delegate.getSegregatedSizeClasses(env); if (NULL != env->getOmrVM()->_sizeClasses) { extensions->setSegregatedHeap(true); extensions->setStandardGC(true); extensions->arrayletsPerRegion = extensions->regionSize / env->getOmrVM()->_arrayletLeafSize; success = true; } } return success; }
/** * Opens the file to log output to and prints the header. * @return true on sucess, false otherwise */ bool MM_VerboseWriterFileLoggingSynchronous::openFile(MM_EnvironmentBase *env) { OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_GCExtensionsBase* extensions = env->getExtensions(); const char* version = omrgc_get_version(env->getOmrVM()); char *filenameToOpen = expandFilename(env, _currentFile); if (NULL == filenameToOpen) { return false; } _logFileDescriptor = omrfile_open(filenameToOpen, EsOpenRead | EsOpenWrite | EsOpenCreate | EsOpenTruncate, 0666); if(-1 == _logFileDescriptor) { char *cursor = filenameToOpen; /** * This may have failed due to directories in the path not being available. * Try to create these directories and attempt to open again before failing. */ while ( (cursor = strchr(++cursor, DIR_SEPARATOR)) != NULL ) { *cursor = '\0'; omrfile_mkdir(filenameToOpen); *cursor = DIR_SEPARATOR; } /* Try again */ _logFileDescriptor = omrfile_open(filenameToOpen, EsOpenRead | EsOpenWrite | EsOpenCreate | EsOpenTruncate, 0666); if (-1 == _logFileDescriptor) { _manager->handleFileOpenError(env, filenameToOpen); extensions->getForge()->free(filenameToOpen); return false; } } extensions->getForge()->free(filenameToOpen); omrfile_printf(_logFileDescriptor, getHeader(env), version); return true; }
void MM_MemoryPoolSplitAddressOrderedListBase::tearDown(MM_EnvironmentBase* env) { MM_MemoryPool::tearDown(env); if (NULL != _sweepPoolState) { MM_Collector* globalCollector = _extensions->getGlobalCollector(); Assert_MM_true(NULL != globalCollector); globalCollector->deleteSweepPoolState(env, _sweepPoolState); } if (NULL != _heapFreeLists) { for (uintptr_t i = 0; i < _heapFreeListCountExtended; ++i) { _heapFreeLists[i].tearDown(); } } MM_GCExtensionsBase* extensions = env->getExtensions(); extensions->getForge()->free(_heapFreeLists); extensions->getForge()->free(_currentThreadFreeList); if (NULL != _largeObjectAllocateStats) { _largeObjectAllocateStats->kill(env); _largeObjectAllocateStats = NULL; } if (NULL != _largeObjectAllocateStatsForFreeList) { for (uintptr_t i = 0; i < _heapFreeListCountExtended; ++i) { _largeObjectAllocateStatsForFreeList[i].tearDown(env); } extensions->getForge()->free(_largeObjectAllocateStatsForFreeList); _largeObjectAllocateStatsForFreeList = NULL; } _largeObjectCollectorAllocateStatsForFreeList = NULL; _resetLock.tearDown(); }
/** * Create a new MM_VerboseWriterFileLoggingSynchronous instance. * @return Pointer to the new MM_VerboseWriterFileLoggingSynchronous. */ MM_VerboseWriterFileLoggingSynchronous * MM_VerboseWriterFileLoggingSynchronous::newInstance(MM_EnvironmentBase *env, MM_VerboseManager *manager, char *filename, uintptr_t numFiles, uintptr_t numCycles) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_VerboseWriterFileLoggingSynchronous *agent = (MM_VerboseWriterFileLoggingSynchronous *)extensions->getForge()->allocate(sizeof(MM_VerboseWriterFileLoggingSynchronous), OMR::GC::AllocationCategory::DIAGNOSTIC, OMR_GET_CALLSITE()); if(agent) { new(agent) MM_VerboseWriterFileLoggingSynchronous(env, manager); if(!agent->initialize(env, filename, numFiles, numCycles)) { agent->kill(env); agent = NULL; } } return agent; }
bool MM_MemoryManager::createVirtualMemoryForHeap(MM_EnvironmentBase* env, MM_MemoryHandle* handle, uintptr_t heapAlignment, uintptr_t size, uintptr_t tailPadding, void* preferredAddress, void* ceiling) { Assert_MM_true(NULL != handle); MM_GCExtensionsBase* extensions = env->getExtensions(); MM_VirtualMemory* instance = NULL; uintptr_t mode = (OMRPORT_VMEM_MEMORY_MODE_READ | OMRPORT_VMEM_MEMORY_MODE_WRITE); uintptr_t options = 0; uint32_t memoryCategory = OMRMEM_CATEGORY_MM_RUNTIME_HEAP; uintptr_t pageSize = extensions->requestedPageSize; uintptr_t pageFlags = extensions->requestedPageFlags; Assert_MM_true(0 != pageSize); uintptr_t allocateSize = size; uintptr_t concurrentScavengerPageSize = 0; if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * Allocate extra memory to guarantee proper alignment regardless start address location * Minimum size of over-allocation should be (Concurrent_Scavenger_page_size - section size) however * Virtual Memory can return heap size shorter by a region (and here region size == section size) * So to guarantee desired heap size over-allocate it by full Concurrent_Scavenger_page_size */ concurrentScavengerPageSize = extensions->getConcurrentScavengerPageSectionSize() * CONCURRENT_SCAVENGER_PAGE_SECTIONS; allocateSize += concurrentScavengerPageSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Requested heap size 0x%zx has been extended to 0x%zx for guaranteed alignment\n", size, allocateSize); } } else { if (heapAlignment > pageSize) { allocateSize += (heapAlignment - pageSize); } } #if defined(OMR_GC_MODRON_SCAVENGER) if (extensions->enableSplitHeap) { /* currently (ceiling != NULL) is using to recognize CompressedRefs so must be NULL for 32 bit platforms */ Assert_MM_true(NULL == ceiling); switch(extensions->splitHeapSection) { case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_TENURE: /* trying to get Tenure at the bottom of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_NURSERY: /* trying to get Nursery at the top of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_UNKNOWN: default: Assert_MM_unreachable(); break; } } #endif /* defined(OMR_GC_MODRON_SCAVENGER) */ if (NULL == ceiling) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } else { #if defined(OMR_GC_COMPRESSED_POINTERS) OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * This function is used for Compressed References platforms only * The ceiling for such platforms is set maximum memory value be supported (32G for 3-bit shift) * The ceiling it is 0 for all other platforms */ /* NON_SCALING_LOW_MEMORY_HEAP_CEILING is set to 4G for 64-bit platforms only, 0 for 32-bit platforms */ Assert_MM_true(NON_SCALING_LOW_MEMORY_HEAP_CEILING > 0); /* * Usually the suballocator memory should be allocated first (before heap) however * in case when preferred address is specified we will try to allocate heap first * to avoid possible interference with requested heap location */ bool shouldHeapBeAllocatedFirst = (NULL != preferredAddress); void* startAllocationAddress = preferredAddress; /* Set the commit size for the sub allocator. This needs to be completed before the call to omrmem_ensure_capacity32 */ omrport_control(OMRPORT_CTLDATA_ALLOCATE32_COMMIT_SIZE, extensions->suballocatorCommitSize); if (!shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; return false; } } options |= OMRPORT_VMEM_STRICT_ADDRESS | OMRPORT_VMEM_ALLOC_QUICK; #if defined(J9ZOS39064) /* 2TO32G area is extended to 64G */ options |= OMRPORT_VMEM_ZOS_USE2TO32G_AREA; /* * On ZOS an address space below 2G can not be taken for virtual memory */ #define TWO_GB_ADDRESS ((void*)((uintptr_t)2 * 1024 * 1024 * 1024)) if (NULL == preferredAddress) { startAllocationAddress = TWO_GB_ADDRESS; } #endif /* defined(J9ZOS39064) */ void* requestedTopAddress = (void*)((uintptr_t)startAllocationAddress + allocateSize + tailPadding); if (extensions->isConcurrentScavengerEnabled()) { void * ceilingToRequest = ceiling; /* Requested top address might be higher then ceiling because of added chunk */ if ((requestedTopAddress > ceiling) && ((void *)((uintptr_t)requestedTopAddress - concurrentScavengerPageSize) <= ceiling)) { /* ZOS 2_TO_64/2_TO_32 options would not allow memory request larger then 64G/32G so total requested size including tail padding should not exceed it */ allocateSize = (uintptr_t)ceiling - (uintptr_t)startAllocationAddress - tailPadding; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Total allocate size exceeds ceiling %p, reduce allocate size to 0x%zx\n", ceiling, allocateSize); } /* * There is no way that Nursery will be pushed above ceiling for valid memory options however we have * no idea about start address. So to guarantee an allocation up to the ceiling we need to request extended chunk of memory. * Set ceiling to NULL to disable ceiling control. This required bottom-up direction for allocation. */ ceilingToRequest = NULL; } options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; /* An attempt to allocate memory chunk for heap for Concurrent Scavenger */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceilingToRequest, mode, options, memoryCategory); } else { if (requestedTopAddress <= ceiling) { bool allocationTopDown = true; /* define the scan direction when reserving the GC heap in the range of (4G, 32G) */ #if defined(S390) || defined(J9ZOS390) /* s390 benefits from smaller shift values so allocate direction is bottom up */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; allocationTopDown = false; #else options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; #endif /* defined(S390) || defined(J9ZOS390) */ if (allocationTopDown && extensions->shouldForceSpecifiedShiftingCompression) { /* force to allocate heap top-down from correspondent to shift address */ void* maxAddress = (void *)(((uintptr_t)1 << 32) << extensions->forcedShiftingCompressionAmount); instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, maxAddress, mode, options, memoryCategory); } else { if (requestedTopAddress < (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING) { /* * Attempt to allocate heap below 4G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN(NON_SCALING_LOW_MEMORY_HEAP_CEILING, (uintptr_t)ceiling), mode, options, memoryCategory); } if ((NULL == instance) && (ceiling > (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING)) { #define THIRTY_TWO_GB_ADDRESS ((uintptr_t)32 * 1024 * 1024 * 1024) if (requestedTopAddress <= (void *)THIRTY_TWO_GB_ADDRESS) { /* * If requested object heap size is in range 28G-32G its allocation with 3-bit shift might compromise amount of low memory below 4G * To prevent this go straight to 4-bit shift if it possible. * Set of logical conditions to skip allocation attempt below 32G * - 4-bit shift is available option * - requested size is larger then 28G (32 minus 4) * - allocation direction is top-down, otherwise it does not make sense */ bool skipAllocationBelow32G = (ceiling > (void*)THIRTY_TWO_GB_ADDRESS) && (requestedTopAddress > (void*)(THIRTY_TWO_GB_ADDRESS - NON_SCALING_LOW_MEMORY_HEAP_CEILING)) && allocationTopDown; if (!skipAllocationBelow32G) { /* * Attempt to allocate heap below 32G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN((uintptr_t)THIRTY_TWO_GB_ADDRESS, (uintptr_t)ceiling), mode, options, memoryCategory); } } /* * Attempt to allocate above 32G */ if ((NULL == instance) && (ceiling > (void *)THIRTY_TWO_GB_ADDRESS)) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } } } } } /* * If preferredAddress is requested check is it really taken: if not - release memory * for backward compatibility this check should be done for compressedrefs platforms only */ if ((NULL != preferredAddress) && (NULL != instance) && (instance->getHeapBase() != preferredAddress)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; instance->kill(env); instance = NULL; return false; } if ((NULL != instance) && shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; instance->kill(env); instance = NULL; return false; } } #else /* defined(OMR_GC_COMPRESSED_POINTERS) */ /* * Code above might be used for non-compressedrefs platforms but need a few adjustments on it for this: * - NON_SCALING_LOW_MEMORY_HEAP_CEILING should be set * - OMRPORT_VMEM_ZOS_USE2TO32G_AREA flag for ZOS is expected to be used for compressedrefs heap allocation only */ Assert_MM_unimplemented(); #endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ } if((NULL != instance) && extensions->largePageFailOnError && (instance->getPageSize() != extensions->requestedPageSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_SATISFY_REQUESTED_PAGE_SIZE; instance->kill(env); instance = NULL; return false; } handle->setVirtualMemory(instance); if (NULL != instance) { instance->incrementConsumerCount(); handle->setMemoryBase(instance->getHeapBase()); handle->setMemoryTop(instance->getHeapTop()); /* * Aligning Nursery location to Concurrent Scavenger Page and calculate Concurrent Scavenger Page start address * There are two possible cases here: * - Nursery fits Concurrent Scavenger Page already = no extra alignment required * - current Nursery location has crossed Concurrent Scavenger Page boundary so it needs to be pushed higher to * have Nursery low address to be aligned to Concurrent Scavenger Page */ if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* projected Nursery base and top */ /* assumed Nursery location in high addresses of the heap */ uintptr_t heapBase = (uintptr_t)handle->getMemoryBase(); uintptr_t nurseryTop = heapBase + size; uintptr_t nurseryBase = nurseryTop - extensions->maxNewSpaceSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Allocated memory for heap: [%p,%p]\n", handle->getMemoryBase(), handle->getMemoryTop()); } uintptr_t baseAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryBase + 1); uintptr_t topAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryTop); if (baseAligned == topAligned) { /* Nursery fits Concurrent Scavenger Page already */ extensions->setConcurrentScavengerPageStartAddress((void *)(baseAligned - concurrentScavengerPageSize)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address 0x%zx\n", nurseryBase); } } else { /* Nursery location should be adjusted */ extensions->setConcurrentScavengerPageStartAddress((void *)baseAligned); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address adjusted to 0x%zx\n", baseAligned); } /* Move up entire heap for proper Nursery adjustment */ heapBase += (baseAligned - nurseryBase); handle->setMemoryBase((void *)heapBase); /* top of adjusted Nursery should fit reserved memory */ Assert_GC_true_with_message3(env, ((heapBase + size) <= (uintptr_t)handle->getMemoryTop()), "End of projected heap (base 0x%zx + size 0x%zx) is larger then Top allocated %p\n", heapBase, size, handle->getMemoryTop()); } /* adjust heap top to lowest possible address */ handle->setMemoryTop((void *)(heapBase + size)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Adjusted heap location: [%p,%p], Concurrent Scavenger Page start address %p, Concurrent Scavenger Page size 0x%zx\n", handle->getMemoryBase(), handle->getMemoryTop(), extensions->getConcurrentScavengerPageStartAddress(), concurrentScavengerPageSize); } /* * Concurrent Scavenger Page location might be aligned out of Compressed References supported memory range * Fail to initialize in this case */ if ((NULL != ceiling) && (handle->getMemoryTop() > ceiling)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; destroyVirtualMemory(env, handle); instance = NULL; } } } #if defined(OMR_VALGRIND_MEMCHECK) //Use handle's Memory Base to refer valgrind memory pool valgrindCreateMempool(extensions, env, (uintptr_t)handle->getMemoryBase()); #endif /* defined(OMR_VALGRIND_MEMCHECK) */ return NULL != instance; }
MMINLINE void clearRememberedSetOverflowState() { _extensions->clearRememberedSetOverflowState(); }