void MM_MemoryManager::destroyVirtualMemory(MM_EnvironmentBase* env, MM_MemoryHandle* handle) { Assert_MM_true(NULL != handle); MM_VirtualMemory* memory = handle->getVirtualMemory(); if (NULL != memory) { Assert_MM_true(memory->getConsumerCount() > 0); memory->decrementConsumerCount(); if (0 == memory->getConsumerCount()) { /* this is last consumer attached to this Virtual Memory instance - delete it */ memory->kill(env); /* * If this instance has been used as a preallocated (but not taken) memory it should be cleared as well */ if (memory == _preAllocated.getVirtualMemory()) { _preAllocated.setVirtualMemory(NULL); } } } handle->setVirtualMemory(NULL); handle->setMemoryBase(NULL); handle->setMemoryTop(NULL); #if defined(OMR_VALGRIND_MEMCHECK) valgrindDestroyMempool(env->getExtensions()); #endif /* defined(OMR_VALGRIND_MEMCHECK) */ }
MM_VirtualMemory* MM_VirtualMemory::newInstance(MM_EnvironmentBase* env, uintptr_t heapAlignment, uintptr_t size, uintptr_t pageSize, uintptr_t pageFlags, uintptr_t tailPadding, void* preferredAddress, void* ceiling, uintptr_t mode, uintptr_t options, uint32_t memoryCategory) { MM_VirtualMemory* vmem = (MM_VirtualMemory*)env->getForge()->allocate(sizeof(MM_VirtualMemory), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE()); if (vmem) { new (vmem) MM_VirtualMemory(env, heapAlignment, pageSize, pageFlags, tailPadding, mode); if (!vmem->initialize(env, size, preferredAddress, ceiling, options, memoryCategory)) { vmem->kill(env); vmem = NULL; } } return vmem; }
bool MM_MemoryManager::createVirtualMemoryForHeap(MM_EnvironmentBase* env, MM_MemoryHandle* handle, uintptr_t heapAlignment, uintptr_t size, uintptr_t tailPadding, void* preferredAddress, void* ceiling) { Assert_MM_true(NULL != handle); MM_GCExtensionsBase* extensions = env->getExtensions(); MM_VirtualMemory* instance = NULL; uintptr_t mode = (OMRPORT_VMEM_MEMORY_MODE_READ | OMRPORT_VMEM_MEMORY_MODE_WRITE); uintptr_t options = 0; uint32_t memoryCategory = OMRMEM_CATEGORY_MM_RUNTIME_HEAP; uintptr_t pageSize = extensions->requestedPageSize; uintptr_t pageFlags = extensions->requestedPageFlags; Assert_MM_true(0 != pageSize); uintptr_t allocateSize = size; uintptr_t concurrentScavengerPageSize = 0; if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * Allocate extra memory to guarantee proper alignment regardless start address location * Minimum size of over-allocation should be (Concurrent_Scavenger_page_size - section size) however * Virtual Memory can return heap size shorter by a region (and here region size == section size) * So to guarantee desired heap size over-allocate it by full Concurrent_Scavenger_page_size */ concurrentScavengerPageSize = extensions->getConcurrentScavengerPageSectionSize() * CONCURRENT_SCAVENGER_PAGE_SECTIONS; allocateSize += concurrentScavengerPageSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Requested heap size 0x%zx has been extended to 0x%zx for guaranteed alignment\n", size, allocateSize); } } else { if (heapAlignment > pageSize) { allocateSize += (heapAlignment - pageSize); } } #if defined(OMR_GC_MODRON_SCAVENGER) if (extensions->enableSplitHeap) { /* currently (ceiling != NULL) is using to recognize CompressedRefs so must be NULL for 32 bit platforms */ Assert_MM_true(NULL == ceiling); switch(extensions->splitHeapSection) { case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_TENURE: /* trying to get Tenure at the bottom of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_NURSERY: /* trying to get Nursery at the top of virtual memory */ options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; break; case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_UNKNOWN: default: Assert_MM_unreachable(); break; } } #endif /* defined(OMR_GC_MODRON_SCAVENGER) */ if (NULL == ceiling) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } else { #if defined(OMR_GC_COMPRESSED_POINTERS) OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* * This function is used for Compressed References platforms only * The ceiling for such platforms is set maximum memory value be supported (32G for 3-bit shift) * The ceiling it is 0 for all other platforms */ /* NON_SCALING_LOW_MEMORY_HEAP_CEILING is set to 4G for 64-bit platforms only, 0 for 32-bit platforms */ Assert_MM_true(NON_SCALING_LOW_MEMORY_HEAP_CEILING > 0); /* * Usually the suballocator memory should be allocated first (before heap) however * in case when preferred address is specified we will try to allocate heap first * to avoid possible interference with requested heap location */ bool shouldHeapBeAllocatedFirst = (NULL != preferredAddress); void* startAllocationAddress = preferredAddress; /* Set the commit size for the sub allocator. This needs to be completed before the call to omrmem_ensure_capacity32 */ omrport_control(OMRPORT_CTLDATA_ALLOCATE32_COMMIT_SIZE, extensions->suballocatorCommitSize); if (!shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; return false; } } options |= OMRPORT_VMEM_STRICT_ADDRESS | OMRPORT_VMEM_ALLOC_QUICK; #if defined(J9ZOS39064) /* 2TO32G area is extended to 64G */ options |= OMRPORT_VMEM_ZOS_USE2TO32G_AREA; /* * On ZOS an address space below 2G can not be taken for virtual memory */ #define TWO_GB_ADDRESS ((void*)((uintptr_t)2 * 1024 * 1024 * 1024)) if (NULL == preferredAddress) { startAllocationAddress = TWO_GB_ADDRESS; } #endif /* defined(J9ZOS39064) */ void* requestedTopAddress = (void*)((uintptr_t)startAllocationAddress + allocateSize + tailPadding); if (extensions->isConcurrentScavengerEnabled()) { void * ceilingToRequest = ceiling; /* Requested top address might be higher then ceiling because of added chunk */ if ((requestedTopAddress > ceiling) && ((void *)((uintptr_t)requestedTopAddress - concurrentScavengerPageSize) <= ceiling)) { /* ZOS 2_TO_64/2_TO_32 options would not allow memory request larger then 64G/32G so total requested size including tail padding should not exceed it */ allocateSize = (uintptr_t)ceiling - (uintptr_t)startAllocationAddress - tailPadding; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Total allocate size exceeds ceiling %p, reduce allocate size to 0x%zx\n", ceiling, allocateSize); } /* * There is no way that Nursery will be pushed above ceiling for valid memory options however we have * no idea about start address. So to guarantee an allocation up to the ceiling we need to request extended chunk of memory. * Set ceiling to NULL to disable ceiling control. This required bottom-up direction for allocation. */ ceilingToRequest = NULL; } options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; /* An attempt to allocate memory chunk for heap for Concurrent Scavenger */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceilingToRequest, mode, options, memoryCategory); } else { if (requestedTopAddress <= ceiling) { bool allocationTopDown = true; /* define the scan direction when reserving the GC heap in the range of (4G, 32G) */ #if defined(S390) || defined(J9ZOS390) /* s390 benefits from smaller shift values so allocate direction is bottom up */ options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP; allocationTopDown = false; #else options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN; #endif /* defined(S390) || defined(J9ZOS390) */ if (allocationTopDown && extensions->shouldForceSpecifiedShiftingCompression) { /* force to allocate heap top-down from correspondent to shift address */ void* maxAddress = (void *)(((uintptr_t)1 << 32) << extensions->forcedShiftingCompressionAmount); instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, maxAddress, mode, options, memoryCategory); } else { if (requestedTopAddress < (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING) { /* * Attempt to allocate heap below 4G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN(NON_SCALING_LOW_MEMORY_HEAP_CEILING, (uintptr_t)ceiling), mode, options, memoryCategory); } if ((NULL == instance) && (ceiling > (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING)) { #define THIRTY_TWO_GB_ADDRESS ((uintptr_t)32 * 1024 * 1024 * 1024) if (requestedTopAddress <= (void *)THIRTY_TWO_GB_ADDRESS) { /* * If requested object heap size is in range 28G-32G its allocation with 3-bit shift might compromise amount of low memory below 4G * To prevent this go straight to 4-bit shift if it possible. * Set of logical conditions to skip allocation attempt below 32G * - 4-bit shift is available option * - requested size is larger then 28G (32 minus 4) * - allocation direction is top-down, otherwise it does not make sense */ bool skipAllocationBelow32G = (ceiling > (void*)THIRTY_TWO_GB_ADDRESS) && (requestedTopAddress > (void*)(THIRTY_TWO_GB_ADDRESS - NON_SCALING_LOW_MEMORY_HEAP_CEILING)) && allocationTopDown; if (!skipAllocationBelow32G) { /* * Attempt to allocate heap below 32G */ instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, (void*)OMR_MIN((uintptr_t)THIRTY_TWO_GB_ADDRESS, (uintptr_t)ceiling), mode, options, memoryCategory); } } /* * Attempt to allocate above 32G */ if ((NULL == instance) && (ceiling > (void *)THIRTY_TWO_GB_ADDRESS)) { instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceiling, mode, options, memoryCategory); } } } } } /* * If preferredAddress is requested check is it really taken: if not - release memory * for backward compatibility this check should be done for compressedrefs platforms only */ if ((NULL != preferredAddress) && (NULL != instance) && (instance->getHeapBase() != preferredAddress)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; instance->kill(env); instance = NULL; return false; } if ((NULL != instance) && shouldHeapBeAllocatedFirst) { if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE; instance->kill(env); instance = NULL; return false; } } #else /* defined(OMR_GC_COMPRESSED_POINTERS) */ /* * Code above might be used for non-compressedrefs platforms but need a few adjustments on it for this: * - NON_SCALING_LOW_MEMORY_HEAP_CEILING should be set * - OMRPORT_VMEM_ZOS_USE2TO32G_AREA flag for ZOS is expected to be used for compressedrefs heap allocation only */ Assert_MM_unimplemented(); #endif /* defined(OMR_GC_COMPRESSED_POINTERS) */ } if((NULL != instance) && extensions->largePageFailOnError && (instance->getPageSize() != extensions->requestedPageSize)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_SATISFY_REQUESTED_PAGE_SIZE; instance->kill(env); instance = NULL; return false; } handle->setVirtualMemory(instance); if (NULL != instance) { instance->incrementConsumerCount(); handle->setMemoryBase(instance->getHeapBase()); handle->setMemoryTop(instance->getHeapTop()); /* * Aligning Nursery location to Concurrent Scavenger Page and calculate Concurrent Scavenger Page start address * There are two possible cases here: * - Nursery fits Concurrent Scavenger Page already = no extra alignment required * - current Nursery location has crossed Concurrent Scavenger Page boundary so it needs to be pushed higher to * have Nursery low address to be aligned to Concurrent Scavenger Page */ if (extensions->isConcurrentScavengerEnabled()) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* projected Nursery base and top */ /* assumed Nursery location in high addresses of the heap */ uintptr_t heapBase = (uintptr_t)handle->getMemoryBase(); uintptr_t nurseryTop = heapBase + size; uintptr_t nurseryBase = nurseryTop - extensions->maxNewSpaceSize; if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Allocated memory for heap: [%p,%p]\n", handle->getMemoryBase(), handle->getMemoryTop()); } uintptr_t baseAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryBase + 1); uintptr_t topAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryTop); if (baseAligned == topAligned) { /* Nursery fits Concurrent Scavenger Page already */ extensions->setConcurrentScavengerPageStartAddress((void *)(baseAligned - concurrentScavengerPageSize)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address 0x%zx\n", nurseryBase); } } else { /* Nursery location should be adjusted */ extensions->setConcurrentScavengerPageStartAddress((void *)baseAligned); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Expected Nursery start address adjusted to 0x%zx\n", baseAligned); } /* Move up entire heap for proper Nursery adjustment */ heapBase += (baseAligned - nurseryBase); handle->setMemoryBase((void *)heapBase); /* top of adjusted Nursery should fit reserved memory */ Assert_GC_true_with_message3(env, ((heapBase + size) <= (uintptr_t)handle->getMemoryTop()), "End of projected heap (base 0x%zx + size 0x%zx) is larger then Top allocated %p\n", heapBase, size, handle->getMemoryTop()); } /* adjust heap top to lowest possible address */ handle->setMemoryTop((void *)(heapBase + size)); if (extensions->isDebugConcurrentScavengerPageAlignment()) { omrtty_printf("Adjusted heap location: [%p,%p], Concurrent Scavenger Page start address %p, Concurrent Scavenger Page size 0x%zx\n", handle->getMemoryBase(), handle->getMemoryTop(), extensions->getConcurrentScavengerPageStartAddress(), concurrentScavengerPageSize); } /* * Concurrent Scavenger Page location might be aligned out of Compressed References supported memory range * Fail to initialize in this case */ if ((NULL != ceiling) && (handle->getMemoryTop() > ceiling)) { extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP; destroyVirtualMemory(env, handle); instance = NULL; } } } #if defined(OMR_VALGRIND_MEMCHECK) //Use handle's Memory Base to refer valgrind memory pool valgrindCreateMempool(extensions, env, (uintptr_t)handle->getMemoryBase()); #endif /* defined(OMR_VALGRIND_MEMCHECK) */ return NULL != instance; }