void MM_VerboseHandlerOutputStandard::handleConcurrentHalted(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentHaltedEvent* event = (MM_ConcurrentHaltedEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[200]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-halted %s>", tagTemplate); handleConcurrentHaltedInternal(env, eventData); writer->formatAndOutput(env, 1, "<traced " "bytesTarget=\"%zu\" bytesTotal=\"%zu\" " "bytesByMutator=\"%zu\" bytesByHelper=\"%zu\" " "percent=\"%zu\" />", event->traceTarget, event->tracedTotal, event->tracedByMutators, event->tracedByHelpers, event->traceTarget == 0 ? 0 : (uintptr_t)(((uint64_t)event->tracedTotal * 100) / (uint64_t)event->traceTarget)); writer->formatAndOutput(env, 1, "<cards cleaned=\"%zu\" thresholdBytes=\"%zu\" />", event->cardsCleaned, event->cardCleaningThreshold); writer->formatAndOutput(env, 0, "</concurrent-halted>"); writer->flush(env); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutput::handleInitialized(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_InitializedEvent* event = (MM_InitializedEvent*)eventData; MM_VerboseWriterChain* writer = _manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[200]; _manager->setInitializedTime(event->timestamp); getTagTemplate(tagTemplate, sizeof(tagTemplate), _manager->getIdAndIncrement(), omrtime_current_time_millis()); enterAtomicReportingBlock(); writer->formatAndOutput(env, 0, "<initialized %s>", tagTemplate); writer->formatAndOutput(env, 1, "<attribute name=\"gcPolicy\" value=\"%s\" />", event->gcPolicy); #if defined(OMR_GC_CONCURRENT_SCAVENGER) if (_extensions->isConcurrentScavengerEnabled()) { writer->formatAndOutput(env, 1, "<attribute name=\"concurrentScavenger\" value=\"%s\" />", #if defined(S390) _extensions->concurrentScavengerHWSupport ? "enabled, with H/W assistance" : "enabled, without H/W assistance"); #else /* defined(S390) */ "enabled"); #endif /* defined(S390) */ }
void MM_VerboseHandlerOutputStandard::handleConcurrentKickoff(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentKickoffEvent* event = (MM_ConcurrentKickoffEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[200]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-kickoff %s>", tagTemplate); const char* reasonString = getConcurrentKickoffReason(eventData); if (extensions->scavengerEnabled) { writer->formatAndOutput( env, 1, "<kickoff reason=\"%s\" targetBytes=\"%zu\" thresholdFreeBytes=\"%zu\" remainingFree=\"%zu\" tenureFreeBytes=\"%zu\" nurseryFreeBytes=\"%zu\" />", reasonString, event->traceTarget, event->kickOffThreshold, event->remainingFree, event->commonData->tenureFreeBytes, event->commonData->nurseryFreeBytes); } else { writer->formatAndOutput( env, 1, "<kickoff reason=\"%s\" targetBytes=\"%zu\" thresholdFreeBytes=\"%zu\" remainingFree=\"%zu\" tenureFreeBytes=\"%zu\" />", reasonString, event->traceTarget, event->kickOffThreshold, event->remainingFree, event->commonData->tenureFreeBytes); } writer->formatAndOutput(env, 0, "</concurrent-kickoff>"); writer->flush(env); handleConcurrentKickoffInternal(env, eventData); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutputStandard::handleConcurrentCollectionEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentCollectionEndEvent* event = (MM_ConcurrentCollectionEndEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[200]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-collection-end %s />", tagTemplate); writer->flush(env); handleConcurrentCollectionEndInternal(env, eventData); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutputStandard::handleConcurrentCollectionStart(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentCollectionStartEvent* event = (MM_ConcurrentCollectionStartEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); uint64_t currentTime = event->timestamp; uint64_t previousTime = manager->getLastConcurrentGCTime(); manager->setLastConcurrentGCTime(currentTime); if (0 == previousTime) { previousTime = manager->getInitializedTime(); } uint64_t deltaTime = omrtime_hires_delta(previousTime, currentTime, OMRPORT_TIME_DELTA_IN_MICROSECONDS); const char* cardCleaningReasonString = "unknown"; switch (event->cardCleaningReason) { case TRACING_COMPLETED: cardCleaningReasonString = "tracing completed"; break; case CARD_CLEANING_THRESHOLD_REACHED: cardCleaningReasonString = "card cleaning threshold reached"; break; } char tagTemplate[200]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-collection-start %s intervalms=\"%llu.%03llu\" >", tagTemplate, deltaTime / 1000, deltaTime % 1000); writer->formatAndOutput(env, 1, "<concurrent-trace-info reason=\"%s\" tracedByMutators=\"%zu\" tracedByHelpers=\"%zu\" cardsCleaned=\"%zu\" workStackOverflowCount=\"%zu\" />", cardCleaningReasonString, event->tracedByMutators, event->tracedByHelpers, event->cardsCleaned, event->workStackOverflowCount); writer->formatAndOutput(env, 0, "</concurrent-collection-start>"); writer->flush(env); handleConcurrentCollectionStartInternal(env, eventData); exitAtomicReportingBlock(); }
/* * Debug routine to dump details of the current state of the freelist */ void MM_MemoryPoolSplitAddressOrderedListBase::printCurrentFreeList(MM_EnvironmentBase* env, const char* area) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); omrtty_printf("Analysis of %s freelist: \n", area); for (uintptr_t i = 0; i < _heapFreeListCountExtended; ++i) { MM_HeapLinkedFreeHeader* currentFreeEntry = _heapFreeLists[i]._freeList; while (currentFreeEntry) { const char * msg = "Free chunk %p -> %p (%i) \n"; if (_heapFreeListCount == i) { msg = "Reserved chunk %p -> %p (%i) \n"; } omrtty_printf(msg, currentFreeEntry, currentFreeEntry->afterEnd(), currentFreeEntry->getSize()); currentFreeEntry = currentFreeEntry->getNext(); } } }
void MM_VerboseHandlerOutputStandard::handleConcurrentTracingEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentCompleteTracingEndEvent* event = (MM_ConcurrentCompleteTracingEndEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); uint64_t durationUs = omrtime_hires_delta(0, event->duration, OMRPORT_TIME_DELTA_IN_MICROSECONDS); enterAtomicReportingBlock(); handleGCOPOuterStanzaStart(env, "tracing", env->_cycleState->_verboseContextID, durationUs, true); writer->formatAndOutput(env, 1, "<trace bytesTraced=\"%zu\" workStackOverflowCount=\"%zu\" />", event->bytesTraced, event->workStackOverflowCount); handleConcurrentTracingEndInternal(env, eventData); handleGCOPOuterStanzaEnd(env); writer->flush(env); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutputStandard::handleConcurrentAborted(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentAbortedEvent* event = (MM_ConcurrentAbortedEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[100]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-aborted %s>", tagTemplate); const char* reason; switch((CollectionAbortReason)event->reason) { case ABORT_COLLECTION_INSUFFICENT_PROGRESS: reason = "insufficient progress made"; break; case ABORT_COLLECTION_REMEMBERSET_OVERFLOW: reason = "remembered set overflow"; break; case ABORT_COLLECTION_SCAVENGE_REMEMBEREDSET_OVERFLOW: reason = "scavenge remembered set overflow"; break; case ABORT_COLLECTION_PREPARE_HEAP_FOR_WALK: reason = "prepare heap for walk"; break; default: reason = "unknown"; break; } writer->formatAndOutput(env, 1, "<reason value=\"%s\" />", reason); writer->formatAndOutput(env, 0, "</concurrent-aborted>"); writer->flush(env); handleConcurrentAbortedInternal(env, eventData); exitAtomicReportingBlock(); }
bool MM_MemoryPoolSplitAddressOrderedListBase::printFreeListValidity(MM_EnvironmentBase* env) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); bool result = true; omrtty_printf("----- START SPLIT FREE LIST VALIDITY FOR 0x%p -----\n", this); for (uintptr_t i = 0; i < _heapFreeListCountExtended; ++i) { if ( _heapFreeListCount == i ) { omrtty_printf("--- Reserved Free List ---\n"); } bool listIsSane = true; uintptr_t calculatedSize = 0; uintptr_t calculatedHoles = 0; MM_HeapLinkedFreeHeader* currentFreeEntry = _heapFreeLists[i]._freeList; MM_HeapLinkedFreeHeader* previousFreeEntry = currentFreeEntry; while (NULL != currentFreeEntry) { listIsSane = listIsSane && ((NULL == currentFreeEntry->getNext()) || (currentFreeEntry < currentFreeEntry->getNext())); calculatedSize += currentFreeEntry->getSize(); ++calculatedHoles; previousFreeEntry = currentFreeEntry; currentFreeEntry = currentFreeEntry->getNext(); } omrtty_printf(" -- Free List %4zu (head: 0x%p, tail: 0x%p, expected size: %16zu, expected holes: %16zu): ", i, _heapFreeLists[i]._freeList, previousFreeEntry, _heapFreeLists[i]._freeSize, _heapFreeLists[i]._freeCount); listIsSane = listIsSane && (calculatedSize == _heapFreeLists[i]._freeSize) && (calculatedHoles == _heapFreeLists[i]._freeCount); if (listIsSane) { omrtty_printf("VALID\n"); } else { omrtty_printf("INVALID (calculated size: %16zu, calculated holes: %16zu)\n", calculatedSize, calculatedHoles); } result = result && listIsSane; } omrtty_printf("----- END SPLIT FREE LIST VALIDITY FOR 0x%p: %s -----\n", this, (result ? "VALID" : "INVALID")); return result; }
bool MM_MemoryManager::createVirtualMemoryForHeap(MM_EnvironmentBase* env, MM_MemoryHandle* handle, uintptr_t heapAlignment, uintptr_t size, uintptr_t tailPadding, void* preferredAddress, void* ceiling) { Assert_MM_true(NULL != handle); MM_GCExtensionsBase* extensions = env->getExtensions(); MM_VirtualMemory* instance = NULL; uintptr_t mode = (OMRPORT_VMEM_MEMORY_MODE_READ | OMRPORT_VMEM_MEMORY_MODE_WRITE); uintptr_t options = 0; uint32_t memoryCategory = OMRMEM_CATEGORY_MM_RUNTIME_HEAP; uintptr_t pageSize = extensions->requestedPageSize; uintptr_t pageFlags = extensions->requestedPageFlags; Assert_MM_true(0 != pageSize); uintptr_t allocateSize = size; uintptr_t concurrentScavengerPageSize = 0; if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * Allocate extra memory to guarantee proper alignment regardless start address location * Minimum size of over-allocation should be (Concurrent_Scavenger_page_size - section size) however * Virtual Memory can return heap size shorter by a region (and here region size == section size) * So to guarantee desired heap size over-allocate it by full Concurrent_Scavenger_page_size */ concurrentScavengerPageSize = extensions->getConcurrentScavengerPageSectionSize() * CONCURRENT_SCAVENGER_PAGE_SECTIONS; allocateSize += concurrentScavengerPageSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Requested heap size 0x%zx has been extended to 0x%zx for guaranteed alignment\n", size, allocateSize); } } else { if (heapAlignment > pageSize) { allocateSize += (heapAlignment - pageSize); } } #if defined(OMR_GC_MODRON_SCAVENGER) if (extensions->enableSplitHeap) { /* currently (ceiling != NULL) is using to recognize CompressedRefs so must be NULL for 32 bit platforms */ Assert_MM_true(NULL == ceiling); switch(extensions->splitHeapSection) { case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_TENURE: /* trying to get Tenure at the bottom of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_NURSERY: /* trying to get Nursery at the top of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_UNKNOWN: default: Assert_MM_unreachable(); break; } } #endif /* defined(OMR_GC_MODRON_SCAVENGER) */ if (NULL == ceiling) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } else { #if defined(OMR_GC_COMPRESSED_POINTERS) OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * This function is used for Compressed References platforms only * The ceiling for such platforms is set maximum memory value be supported (32G for 3-bit shift) * The ceiling it is 0 for all other platforms */ /* NON_SCALING_LOW_MEMORY_HEAP_CEILING is set to 4G for 64-bit platforms only, 0 for 32-bit platforms */ Assert_MM_true(NON_SCALING_LOW_MEMORY_HEAP_CEILING > 0); /* * Usually the suballocator memory should be allocated first (before heap) however * in case when preferred address is specified we will try to allocate heap first * to avoid possible interference with requested heap location */ bool shouldHeapBeAllocatedFirst = (NULL != preferredAddress); void* startAllocationAddress = preferredAddress; /* Set the commit size for the sub allocator. This needs to be completed before the call to omrmem_ensure_capacity32 */ omrport_control(OMRPORT_CTLDATA_ALLOCATE32_COMMIT_SIZE, extensions->suballocatorCommitSize); if (!shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; return false; } } options |= OMRPORT_VMEM_STRICT_ADDRESS | OMRPORT_VMEM_ALLOC_QUICK; #if defined(J9ZOS39064) /* 2TO32G area is extended to 64G */ options |= OMRPORT_VMEM_ZOS_USE2TO32G_AREA; /* * On ZOS an address space below 2G can not be taken for virtual memory */ #define TWO_GB_ADDRESS ((void*)((uintptr_t)2 * 1024 * 1024 * 1024)) if (NULL == preferredAddress) { startAllocationAddress = TWO_GB_ADDRESS; } #endif /* defined(J9ZOS39064) */ void* requestedTopAddress = (void*)((uintptr_t)startAllocationAddress + allocateSize + tailPadding); if (extensions->isConcurrentScavengerEnabled()) { void * ceilingToRequest = ceiling; /* Requested top address might be higher then ceiling because of added chunk */ if ((requestedTopAddress > ceiling) && ((void *)((uintptr_t)requestedTopAddress - concurrentScavengerPageSize) <= ceiling)) { /* ZOS 2_TO_64/2_TO_32 options would not allow memory request larger then 64G/32G so total requested size including tail padding should not exceed it */ allocateSize = (uintptr_t)ceiling - (uintptr_t)startAllocationAddress - tailPadding; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Total allocate size exceeds ceiling %p, reduce allocate size to 0x%zx\n", ceiling, allocateSize); } /* * There is no way that Nursery will be pushed above ceiling for valid memory options however we have * no idea about start address. So to guarantee an allocation up to the ceiling we need to request extended chunk of memory. * Set ceiling to NULL to disable ceiling control. This required bottom-up direction for allocation. */ ceilingToRequest = NULL; } options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; /* An attempt to allocate memory chunk for heap for Concurrent Scavenger */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceilingToRequest, mode, options, memoryCategory); } else { if (requestedTopAddress <= ceiling) { bool allocationTopDown = true; /* define the scan direction when reserving the GC heap in the range of (4G, 32G) */ #if defined(S390) || defined(J9ZOS390) /* s390 benefits from smaller shift values so allocate direction is bottom up */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; allocationTopDown = false; #else options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; #endif /* defined(S390) || defined(J9ZOS390) */ if (allocationTopDown && extensions->shouldForceSpecifiedShiftingCompression) { /* force to allocate heap top-down from correspondent to shift address */ void* maxAddress = (void *)(((uintptr_t)1 << 32) << extensions->forcedShiftingCompressionAmount); instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, maxAddress, mode, options, memoryCategory); } else { if (requestedTopAddress < (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING) { /* * Attempt to allocate heap below 4G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN(NON_SCALING_LOW_MEMORY_HEAP_CEILING, (uintptr_t)ceiling), mode, options, memoryCategory); } if ((NULL == instance) && (ceiling > (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING)) { #define THIRTY_TWO_GB_ADDRESS ((uintptr_t)32 * 1024 * 1024 * 1024) if (requestedTopAddress <= (void *)THIRTY_TWO_GB_ADDRESS) { /* * If requested object heap size is in range 28G-32G its allocation with 3-bit shift might compromise amount of low memory below 4G * To prevent this go straight to 4-bit shift if it possible. * Set of logical conditions to skip allocation attempt below 32G * - 4-bit shift is available option * - requested size is larger then 28G (32 minus 4) * - allocation direction is top-down, otherwise it does not make sense */ bool skipAllocationBelow32G = (ceiling > (void*)THIRTY_TWO_GB_ADDRESS) && (requestedTopAddress > (void*)(THIRTY_TWO_GB_ADDRESS - NON_SCALING_LOW_MEMORY_HEAP_CEILING)) && allocationTopDown; if (!skipAllocationBelow32G) { /* * Attempt to allocate heap below 32G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN((uintptr_t)THIRTY_TWO_GB_ADDRESS, (uintptr_t)ceiling), mode, options, memoryCategory); } } /* * Attempt to allocate above 32G */ if ((NULL == instance) && (ceiling > (void *)THIRTY_TWO_GB_ADDRESS)) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } } } } } /* * If preferredAddress is requested check is it really taken: if not - release memory * for backward compatibility this check should be done for compressedrefs platforms only */ if ((NULL != preferredAddress) && (NULL != instance) && (instance->getHeapBase() != preferredAddress)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; instance->kill(env); instance = NULL; return false; } if ((NULL != instance) && shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; instance->kill(env); instance = NULL; return false; } } #else /* defined(OMR_GC_COMPRESSED_POINTERS) */ /* * Code above might be used for non-compressedrefs platforms but need a few adjustments on it for this: * - NON_SCALING_LOW_MEMORY_HEAP_CEILING should be set * - OMRPORT_VMEM_ZOS_USE2TO32G_AREA flag for ZOS is expected to be used for compressedrefs heap allocation only */ Assert_MM_unimplemented(); #endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ } if((NULL != instance) && extensions->largePageFailOnError && (instance->getPageSize() != extensions->requestedPageSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_SATISFY_REQUESTED_PAGE_SIZE; instance->kill(env); instance = NULL; return false; } handle->setVirtualMemory(instance); if (NULL != instance) { instance->incrementConsumerCount(); handle->setMemoryBase(instance->getHeapBase()); handle->setMemoryTop(instance->getHeapTop()); /* * Aligning Nursery location to Concurrent Scavenger Page and calculate Concurrent Scavenger Page start address * There are two possible cases here: * - Nursery fits Concurrent Scavenger Page already = no extra alignment required * - current Nursery location has crossed Concurrent Scavenger Page boundary so it needs to be pushed higher to * have Nursery low address to be aligned to Concurrent Scavenger Page */ if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* projected Nursery base and top */ /* assumed Nursery location in high addresses of the heap */ uintptr_t heapBase = (uintptr_t)handle->getMemoryBase(); uintptr_t nurseryTop = heapBase + size; uintptr_t nurseryBase = nurseryTop - extensions->maxNewSpaceSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Allocated memory for heap: [%p,%p]\n", handle->getMemoryBase(), handle->getMemoryTop()); } uintptr_t baseAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryBase + 1); uintptr_t topAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryTop); if (baseAligned == topAligned) { /* Nursery fits Concurrent Scavenger Page already */ extensions->setConcurrentScavengerPageStartAddress((void *)(baseAligned - concurrentScavengerPageSize)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address 0x%zx\n", nurseryBase); } } else { /* Nursery location should be adjusted */ extensions->setConcurrentScavengerPageStartAddress((void *)baseAligned); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address adjusted to 0x%zx\n", baseAligned); } /* Move up entire heap for proper Nursery adjustment */ heapBase += (baseAligned - nurseryBase); handle->setMemoryBase((void *)heapBase); /* top of adjusted Nursery should fit reserved memory */ Assert_GC_true_with_message3(env, ((heapBase + size) <= (uintptr_t)handle->getMemoryTop()), "End of projected heap (base 0x%zx + size 0x%zx) is larger then Top allocated %p\n", heapBase, size, handle->getMemoryTop()); } /* adjust heap top to lowest possible address */ handle->setMemoryTop((void *)(heapBase + size)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Adjusted heap location: [%p,%p], Concurrent Scavenger Page start address %p, Concurrent Scavenger Page size 0x%zx\n", handle->getMemoryBase(), handle->getMemoryTop(), extensions->getConcurrentScavengerPageStartAddress(), concurrentScavengerPageSize); } /* * Concurrent Scavenger Page location might be aligned out of Compressed References supported memory range * Fail to initialize in this case */ if ((NULL != ceiling) && (handle->getMemoryTop() > ceiling)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; destroyVirtualMemory(env, handle); instance = NULL; } } } #if defined(OMR_VALGRIND_MEMCHECK) //Use handle's Memory Base to refer valgrind memory pool valgrindCreateMempool(extensions, env, (uintptr_t)handle->getMemoryBase()); #endif /* defined(OMR_VALGRIND_MEMCHECK) */ return NULL != instance; }