Ejemplo n.º 1
0
	/**
	 * Save the spine to this thread's "saved object" slot so that it will be used as a root and will be updated if the spine moves.
	 * NOTE:  This call must be balanced by a following restoreObjects call
	 */
	MMINLINE void
	saveObjects(MM_EnvironmentBase* env)
	{
		if ((NULL != _spine) && !(env->saveObjects((omrobjectptr_t)_spine))) {
			Assert_MM_unreachable();
		}
	}
Ejemplo n.º 2
0
void
MM_SweepSchemeSegregated::sweepRegion(MM_EnvironmentBase *env, MM_HeapRegionDescriptorSegregated *region)
{
	region->getMemoryPoolACL()->resetCounts();

	switch (region->getRegionType()) {

	case MM_HeapRegionDescriptor::SEGREGATED_SMALL:
		sweepSmallRegion(env, region);
		if (isClearMarkMapAfterSweep()) {
			unmarkRegion(env, region);
		}
		addBytesFreedAfterSweep(env, region);
		break;

#if defined(OMR_GC_ARRAYLETS)
	case MM_HeapRegionDescriptor::ARRAYLET_LEAF:
		sweepArrayletRegion(env, region);
		addBytesFreedAfterSweep(env, region);
		break;
#endif /* defined(OMR_GC_ARRAYLETS) */

	case MM_HeapRegionDescriptor::SEGREGATED_LARGE:
		sweepLargeRegion(env, region);
		break;

	default:
		Assert_MM_unreachable();
	}
}
Ejemplo n.º 3
0
uintptr_t
MM_CollectorLanguageInterfaceImpl::concurrentGC_collectRoots(MM_EnvironmentStandard *env, ConcurrentStatus concurrentStatus, MM_ScanClassesMode *scanClassesMode, bool &collectedRoots, bool &paidTax)
{
	uintptr_t bytesScanned = 0;
	collectedRoots = true;
	paidTax = true;

	switch (concurrentStatus) {
	case CONCURRENT_ROOT_TRACING1:
		break;
	case CONCURRENT_ROOT_TRACING2:
		markingScheme_scanRoots(env);
		break;
	case CONCURRENT_ROOT_TRACING3:
		break;
	case CONCURRENT_ROOT_TRACING4:
		break;
	case CONCURRENT_ROOT_TRACING5:
		break;
#if defined(OMR_GC_DYNAMIC_CLASS_UNLOADING)
	case CONCURRENT_TRACE_ONLY:
		break;
#endif /* OMR_GC_DYNAMIC_CLASS_UNLOADING */
	default:
		Assert_MM_unreachable();
	}

	return bytesScanned;
}
Ejemplo n.º 4
0
bool
MM_EnvironmentBase::saveObjects(omrobjectptr_t objectPtr)
{
	void *heapBase = getExtensions()->heap->getHeapBase();
	void *heapTop = getExtensions()->heap->getHeapTop();

	Assert_MM_true((heapBase <= objectPtr) && (heapTop > objectPtr));
	Assert_MM_true((heapBase <= objectPtr) && (heapTop > objectPtr));
	Assert_MM_true(_omrVMThread->_savedObject1 != objectPtr);
	Assert_MM_true(_omrVMThread->_savedObject2 != objectPtr);

	if (NULL == _omrVMThread->_savedObject1) {
		_omrVMThread->_savedObject1 = objectPtr;
		return true;
	} else {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject1) && (heapTop > _omrVMThread->_savedObject1));
	}

	if (NULL == _omrVMThread->_savedObject2) {
		_omrVMThread->_savedObject2 = objectPtr;
		return true;
	} else {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject2) && (heapTop > _omrVMThread->_savedObject2));
	}

	Assert_MM_unreachable();
	return false;
}
Ejemplo n.º 5
0
uintptr_t
MM_MasterGCThread::master_thread_proc2(OMRPortLibrary* portLib, void *info)
{
	MM_MasterGCThread *masterGCThread = (MM_MasterGCThread*)info;
	/* jump into the master thread procedure and wait for work.  This method will NOT return */
	masterGCThread->masterThreadEntryPoint();
	Assert_MM_unreachable();
	return 0;
}
Ejemplo n.º 6
0
uintptr_t
MM_ConcurrentMarkingDelegate::collectRoots(
	MM_EnvironmentBase* env, uintptr_t concurrentStatus, bool* collectedRoots, bool* paidTax)
{
	uintptr_t bytesScanned = 0;
	*collectedRoots        = true;
	*paidTax               = true;

	switch (concurrentStatus) {
	case CONCURRENT_ROOT_TRACING1: _markingScheme->markLiveObjectsRoots(env); break;
	default: Assert_MM_unreachable();
	}

	return bytesScanned;
}
uintptr_t
MM_CollectorLanguageInterfaceImpl::concurrentGC_collectRoots(MM_EnvironmentStandard *env, uintptr_t concurrentStatus, bool *collectedRoots, bool *paidTax)
{
	uintptr_t bytesScanned = 0;
	*collectedRoots = true;
	*paidTax = true;

	switch (concurrentStatus) {
	case CONCURRENT_ROOT_TRACING1:
		break;
	default:
		Assert_MM_unreachable();
	}

	return bytesScanned;
}
uintptr_t
MM_CollectorLanguageInterfaceImpl::concurrentGC_getNextTracingMode(uintptr_t executionMode)
{
	uintptr_t nextExecutionMode = CONCURRENT_TRACE_ONLY;
	switch (executionMode) {
	case CONCURRENT_ROOT_TRACING:
		nextExecutionMode = CONCURRENT_ROOT_TRACING1;
		break;
	case CONCURRENT_ROOT_TRACING1:
		nextExecutionMode = CONCURRENT_TRACE_ONLY;
		break;
	default:
		Assert_MM_unreachable();
	}

	return nextExecutionMode;
}
Ejemplo n.º 9
0
void *
MM_MemorySubSpaceGenerational::allocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription, MM_ObjectAllocationInterface *objectAllocationInterface, MM_MemorySubSpace *baseSubSpace, MM_MemorySubSpace *previousSubSpace, bool shouldCollectOnFailure)
{
	if (shouldCollectOnFailure) {
		/* Should never receive this call */
		Assert_MM_unreachable();
		return NULL;
	} else {
		if(previousSubSpace == _memorySubSpaceNew) {
			/* The allocate request is coming from new space - forward on to the old area */
			return _memorySubSpaceOld->allocateTLH(env, allocDescription, objectAllocationInterface, baseSubSpace, this, false);
		}
	
		/* The allocate comes from the old area - failure */
		return NULL;
	}
}
Ejemplo n.º 10
0
	/**
	 * Determine the next unit of tracing work that must be performed during root collection. Each distinct
	 * value returned represents a discrete unit of language-dependent root collection work. The executionMode
	 * parameter represents the current tracing mode, the returned valued with be the next tracing mode. The
	 * first call during a concurrent collection cycle will receive CONCURRENT_ROOT_TRACING as current tracing
	 * mode. When all language defined values have been returned, this method must return CONCURRENT_TRACE_ONLY
	 * to indicate that all root objects have been traced.
	 *
	 * @param executionMode the current (most recently completed) tracing mode
	 * @return the next language-defined tracing mode, or CONCURRENT_TRACE_ONLY if all language-defined roots have been traced
	 * @see MM_ConcurrentMarkingDelegate::collectRoots(MM_EnvironmentBase *, uintptr_t, bool *, bool *)
	 */
	MMINLINE uintptr_t
	getNextTracingMode(uintptr_t executionMode)
	{
		uintptr_t nextExecutionMode = CONCURRENT_TRACE_ONLY;
		switch (executionMode) {
		case CONCURRENT_ROOT_TRACING:
			nextExecutionMode = CONCURRENT_ROOT_TRACING1;
			break;
		case CONCURRENT_ROOT_TRACING1:
			nextExecutionMode = CONCURRENT_TRACE_ONLY;
			break;
		default:
			Assert_MM_unreachable();
		}

		return nextExecutionMode;
	}
Ejemplo n.º 11
0
void
MM_EnvironmentBase::restoreObjects(omrobjectptr_t *objectPtrIndirect)
{
	void *heapBase = getExtensions()->heap->getHeapBase();
	void *heapTop = getExtensions()->heap->getHeapTop();

	if (NULL != _omrVMThread->_savedObject2) {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject2) && (heapTop > _omrVMThread->_savedObject2));
		*objectPtrIndirect = (omrobjectptr_t)_omrVMThread->_savedObject2;
		_omrVMThread->_savedObject2 = NULL;
	} else if (NULL != _omrVMThread->_savedObject1) {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject1) && (heapTop > _omrVMThread->_savedObject1));
		*objectPtrIndirect = (omrobjectptr_t)_omrVMThread->_savedObject1;
		_omrVMThread->_savedObject1 = NULL;
	} else {
		Assert_MM_unreachable();
	}
}
Ejemplo n.º 12
0
void
MM_SweepSchemeSegregated::addBytesFreedAfterSweep(MM_EnvironmentBase *env, MM_HeapRegionDescriptorSegregated *region)
{
	/* Bytes freed by large regions are counted when the regions are returned to the free region list,
	 * see emptyRefionReturned
	 */
	/* Notify the allocation tracker of the bytes that have been freed */
	MM_MemoryPoolAggregatedCellList *memoryPoolACL = region->getMemoryPoolACL();
	uintptr_t currentFreeBytes = memoryPoolACL->getFreeCount();
	if (region->isSmall()) {
		currentFreeBytes *= region->getCellSize();
#if defined(OMR_GC_ARRAYLETS)
	} else if (region->isArraylet()) {
		currentFreeBytes *= env->getOmrVM()->_arrayletLeafSize;
#endif /* defined(OMR_GC_ARRAYLETS) */
	} else {
		Assert_MM_unreachable();
	}
	env->_allocationTracker->addBytesFreed(env, (currentFreeBytes - memoryPoolACL->getPreSweepFreeBytes()));
	memoryPoolACL->setPreSweepFreeBytes(currentFreeBytes);
}
Ejemplo n.º 13
0
void
MM_ConcurrentScavengeTask::run(MM_EnvironmentBase *envBase)
{
	MM_EnvironmentStandard *env = MM_EnvironmentStandard::getEnvironment(envBase);

	switch (_action) {
	case SCAVENGE_ALL:
		_collector->workThreadProcessRoots(env);
		_collector->workThreadScan(env);
		_collector->workThreadComplete(env);
		break;
	case SCAVENGE_ROOTS:
		_collector->workThreadProcessRoots(env);
		break;
	case SCAVENGE_SCAN:
		_collector->workThreadScan(env);
		break;
	case SCAVENGE_COMPLETE:
		_collector->workThreadComplete(env);
		break;
	default:
		Assert_MM_unreachable();
	}
}
Ejemplo n.º 14
0
bool
MM_MemoryManager::createVirtualMemoryForHeap(MM_EnvironmentBase* env, MM_MemoryHandle* handle, uintptr_t heapAlignment, uintptr_t size, uintptr_t tailPadding, void* preferredAddress, void* ceiling)
{
	Assert_MM_true(NULL != handle);
	MM_GCExtensionsBase* extensions = env->getExtensions();

	MM_VirtualMemory* instance = NULL;
	uintptr_t mode = (OMRPORT_VMEM_MEMORY_MODE_READ | OMRPORT_VMEM_MEMORY_MODE_WRITE);
	uintptr_t options = 0;
	uint32_t memoryCategory = OMRMEM_CATEGORY_MM_RUNTIME_HEAP;

	uintptr_t pageSize = extensions->requestedPageSize;
	uintptr_t pageFlags = extensions->requestedPageFlags;
	Assert_MM_true(0 != pageSize);

	uintptr_t allocateSize = size;

	uintptr_t concurrentScavengerPageSize = 0;
	if (extensions->isConcurrentScavengerEnabled()) {
		OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
		/*
		 * Allocate extra memory to guarantee proper alignment regardless start address location
		 * Minimum size of over-allocation should be (Concurrent_Scavenger_page_size - section size) however
		 * Virtual Memory can return heap size shorter by a region (and here region size == section size)
		 * So to guarantee desired heap size over-allocate it by full Concurrent_Scavenger_page_size
		 */
		concurrentScavengerPageSize = extensions->getConcurrentScavengerPageSectionSize() * CONCURRENT_SCAVENGER_PAGE_SECTIONS;
		allocateSize += concurrentScavengerPageSize;
		if (extensions->isDebugConcurrentScavengerPageAlignment()) {
			omrtty_printf("Requested heap size 0x%zx has been extended to 0x%zx for guaranteed alignment\n", size, allocateSize);
		}
	} else {
		if (heapAlignment > pageSize) {
			allocateSize += (heapAlignment - pageSize);
		}
	}

#if defined(OMR_GC_MODRON_SCAVENGER)
	if (extensions->enableSplitHeap) {
		/* currently (ceiling != NULL) is using to recognize CompressedRefs so must be NULL for 32 bit platforms */
		Assert_MM_true(NULL == ceiling);

		switch(extensions->splitHeapSection) {
		case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_TENURE:
			/* trying to get Tenure at the bottom of virtual memory */
			options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP;
			break;
		case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_NURSERY:
			/* trying to get Nursery at the top of virtual memory */
			options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN;
			break;
		case MM_GCExtensionsBase::HEAP_INITIALIZATION_SPLIT_HEAP_UNKNOWN:
		default:
			Assert_MM_unreachable();
			break;
		}
	}
#endif /* defined(OMR_GC_MODRON_SCAVENGER) */

	if (NULL == ceiling) {
		instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress,
												 ceiling, mode, options, memoryCategory);
	} else {
#if defined(OMR_GC_COMPRESSED_POINTERS)
		OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
		/*
		 * This function is used for Compressed References platforms only
		 * The ceiling for such platforms is set maximum memory value be supported (32G for 3-bit shift)
		 * The ceiling it is 0 for all other platforms
		 */
		/* NON_SCALING_LOW_MEMORY_HEAP_CEILING is set to 4G for 64-bit platforms only, 0 for 32-bit platforms */
		Assert_MM_true(NON_SCALING_LOW_MEMORY_HEAP_CEILING > 0);
		
		/*
		 * Usually the suballocator memory should be allocated first (before heap) however
		 * in case when preferred address is specified we will try to allocate heap first
		 * to avoid possible interference with requested heap location
		 */
		bool shouldHeapBeAllocatedFirst = (NULL != preferredAddress);
		void* startAllocationAddress = preferredAddress;

		/* Set the commit size for the sub allocator. This needs to be completed before the call to omrmem_ensure_capacity32 */
		omrport_control(OMRPORT_CTLDATA_ALLOCATE32_COMMIT_SIZE, extensions->suballocatorCommitSize);

		if (!shouldHeapBeAllocatedFirst) {
			if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) {
				extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE;
				return false;
			}
		}

		options |= OMRPORT_VMEM_STRICT_ADDRESS | OMRPORT_VMEM_ALLOC_QUICK;

#if defined(J9ZOS39064)
		/* 2TO32G area is extended to 64G */
		options |= OMRPORT_VMEM_ZOS_USE2TO32G_AREA;

		/*
		 * On ZOS an address space below 2G can not be taken for virtual memory
		 */
#define TWO_GB_ADDRESS ((void*)((uintptr_t)2 * 1024 * 1024 * 1024))
		if (NULL == preferredAddress) {
			startAllocationAddress = TWO_GB_ADDRESS;
		}
#endif /* defined(J9ZOS39064) */

		void* requestedTopAddress = (void*)((uintptr_t)startAllocationAddress + allocateSize + tailPadding);

		if (extensions->isConcurrentScavengerEnabled()) {
			void * ceilingToRequest = ceiling;
			/* Requested top address might be higher then ceiling because of added chunk */
			if ((requestedTopAddress > ceiling) && ((void *)((uintptr_t)requestedTopAddress - concurrentScavengerPageSize) <= ceiling)) {
				/* ZOS 2_TO_64/2_TO_32 options would not allow memory request larger then 64G/32G so total requested size including tail padding should not exceed it */
				allocateSize = (uintptr_t)ceiling - (uintptr_t)startAllocationAddress - tailPadding;

				if (extensions->isDebugConcurrentScavengerPageAlignment()) {
					omrtty_printf("Total allocate size exceeds ceiling %p, reduce allocate size to 0x%zx\n", ceiling, allocateSize);
				}
				/*
				 * There is no way that Nursery will be pushed above ceiling for valid memory options however we have
				 * no idea about start address. So to guarantee an allocation up to the ceiling we need to request extended chunk of memory. 
				 * Set ceiling to NULL to disable ceiling control. This required bottom-up direction for allocation.
				 */
				ceilingToRequest = NULL;
			}

			options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP;

			/* An attempt to allocate memory chunk for heap for Concurrent Scavenger */
			instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress, ceilingToRequest, mode, options, memoryCategory);
		} else {
			if (requestedTopAddress <= ceiling) {
				bool allocationTopDown = true;
				/* define the scan direction when reserving the GC heap in the range of (4G, 32G) */
#if defined(S390) || defined(J9ZOS390)
				/* s390 benefits from smaller shift values so allocate direction is bottom up */
				options |= OMRPORT_VMEM_ALLOC_DIR_BOTTOM_UP;
				allocationTopDown = false;
#else
				options |= OMRPORT_VMEM_ALLOC_DIR_TOP_DOWN;
#endif /* defined(S390) || defined(J9ZOS390) */

				if (allocationTopDown && extensions->shouldForceSpecifiedShiftingCompression) {
					/* force to allocate heap top-down from correspondent to shift address */
					void* maxAddress = (void *)(((uintptr_t)1 << 32) << extensions->forcedShiftingCompressionAmount);

					instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress,
							maxAddress, mode, options, memoryCategory);
				} else {
					if (requestedTopAddress < (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING) {
						/*
						 * Attempt to allocate heap below 4G
						 */
						instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress,
																 (void*)OMR_MIN(NON_SCALING_LOW_MEMORY_HEAP_CEILING, (uintptr_t)ceiling), mode, options, memoryCategory);
					}

					if ((NULL == instance) && (ceiling > (void*)NON_SCALING_LOW_MEMORY_HEAP_CEILING)) {

#define THIRTY_TWO_GB_ADDRESS ((uintptr_t)32 * 1024 * 1024 * 1024)
						if (requestedTopAddress <= (void *)THIRTY_TWO_GB_ADDRESS) {
							/*
							 * If requested object heap size is in range 28G-32G its allocation with 3-bit shift might compromise amount of low memory below 4G
							 * To prevent this go straight to 4-bit shift if it possible.
							 * Set of logical conditions to skip allocation attempt below 32G
							 *  - 4-bit shift is available option
							 *  - requested size is larger then 28G (32 minus 4)
							 *  - allocation direction is top-down, otherwise it does not make sense
							 */
							bool skipAllocationBelow32G = (ceiling > (void*)THIRTY_TWO_GB_ADDRESS)
								 && (requestedTopAddress > (void*)(THIRTY_TWO_GB_ADDRESS - NON_SCALING_LOW_MEMORY_HEAP_CEILING))
								 && allocationTopDown;

							if (!skipAllocationBelow32G) {
								/*
								 * Attempt to allocate heap below 32G
								 */
								instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress,
																		 (void*)OMR_MIN((uintptr_t)THIRTY_TWO_GB_ADDRESS, (uintptr_t)ceiling), mode, options, memoryCategory);
							}
						}

						/*
						 * Attempt to allocate above 32G
						 */
						if ((NULL == instance) && (ceiling > (void *)THIRTY_TWO_GB_ADDRESS)) {
							instance = MM_VirtualMemory::newInstance(env, heapAlignment, allocateSize, pageSize, pageFlags, tailPadding, preferredAddress,
																	 ceiling, mode, options, memoryCategory);
						}
					}
				}
			}
		}

		/*
		 * If preferredAddress is requested check is it really taken: if not - release memory
		 * for backward compatibility this check should be done for compressedrefs platforms only
		 */
		if ((NULL != preferredAddress) && (NULL != instance) && (instance->getHeapBase() != preferredAddress)) {
			extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP;
			instance->kill(env);
			instance = NULL;
			return false;
		}

		if ((NULL != instance) && shouldHeapBeAllocatedFirst) {
			if (OMRPORT_ENSURE_CAPACITY_FAILED == omrmem_ensure_capacity32(extensions->suballocatorInitialSize)) {
				extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE;
				instance->kill(env);
				instance = NULL;
				return false;
			}
		}
#else /* defined(OMR_GC_COMPRESSED_POINTERS) */

		/*
		 * Code above might be used for non-compressedrefs platforms but need a few adjustments on it for this:
		 *  - NON_SCALING_LOW_MEMORY_HEAP_CEILING should be set
		 *  - OMRPORT_VMEM_ZOS_USE2TO32G_AREA flag for ZOS is expected to be used for compressedrefs heap allocation only
		 */
		Assert_MM_unimplemented();

#endif /* defined(OMR_GC_COMPRESSED_POINTERS) */
	}

	if((NULL != instance) && extensions->largePageFailOnError && (instance->getPageSize() != extensions->requestedPageSize)) {
		extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_SATISFY_REQUESTED_PAGE_SIZE;
		instance->kill(env);
		instance = NULL;
		return false;
	}

	handle->setVirtualMemory(instance);
	if (NULL != instance) {
		instance->incrementConsumerCount();
		handle->setMemoryBase(instance->getHeapBase());
		handle->setMemoryTop(instance->getHeapTop());

		/*
		 * Aligning Nursery location to Concurrent Scavenger Page and calculate Concurrent Scavenger Page start address
		 * There are two possible cases here:
		 * - Nursery fits Concurrent Scavenger Page already = no extra alignment required
		 * - current Nursery location has crossed Concurrent Scavenger Page boundary so it needs to be pushed higher to
		 *   have Nursery low address to be aligned to Concurrent Scavenger Page
		 */
		if (extensions->isConcurrentScavengerEnabled()) {
			OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
			/* projected Nursery base and top */
			/* assumed Nursery location in high addresses of the heap */
			uintptr_t heapBase = (uintptr_t)handle->getMemoryBase();
			uintptr_t nurseryTop = heapBase + size;
			uintptr_t nurseryBase = nurseryTop - extensions->maxNewSpaceSize;

			if (extensions->isDebugConcurrentScavengerPageAlignment()) {
				omrtty_printf("Allocated memory for heap: [%p,%p]\n", handle->getMemoryBase(), handle->getMemoryTop());
			}

			uintptr_t baseAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryBase + 1);
			uintptr_t topAligned = MM_Math::roundToCeiling(concurrentScavengerPageSize, nurseryTop);

			if (baseAligned == topAligned) {
				/* Nursery fits Concurrent Scavenger Page already */
				extensions->setConcurrentScavengerPageStartAddress((void *)(baseAligned - concurrentScavengerPageSize));

				if (extensions->isDebugConcurrentScavengerPageAlignment()) {
					omrtty_printf("Expected Nursery start address 0x%zx\n", nurseryBase);
				}
			} else {
				/* Nursery location should be adjusted */
				extensions->setConcurrentScavengerPageStartAddress((void *)baseAligned);

				if (extensions->isDebugConcurrentScavengerPageAlignment()) {
					omrtty_printf("Expected Nursery start address adjusted to 0x%zx\n", baseAligned);
				}

				/* Move up entire heap for proper Nursery adjustment */
				heapBase += (baseAligned - nurseryBase);
				handle->setMemoryBase((void *)heapBase);

				/* top of adjusted Nursery should fit reserved memory */
				Assert_GC_true_with_message3(env, ((heapBase + size) <= (uintptr_t)handle->getMemoryTop()),
						"End of projected heap (base 0x%zx + size 0x%zx) is larger then Top allocated %p\n",
						heapBase, size, handle->getMemoryTop());
			}

			/* adjust heap top to lowest possible address */
			handle->setMemoryTop((void *)(heapBase + size));

			if (extensions->isDebugConcurrentScavengerPageAlignment()) {
				omrtty_printf("Adjusted heap location: [%p,%p], Concurrent Scavenger Page start address %p, Concurrent Scavenger Page size 0x%zx\n",
						handle->getMemoryBase(), handle->getMemoryTop(), extensions->getConcurrentScavengerPageStartAddress(), concurrentScavengerPageSize);
			}

			/*
			 * Concurrent Scavenger Page location might be aligned out of Compressed References supported memory range
			 * Fail to initialize in this case
			 */
			if ((NULL != ceiling) && (handle->getMemoryTop() > ceiling)) {
				extensions->heapInitializationFailureReason = MM_GCExtensionsBase::HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP;
				destroyVirtualMemory(env, handle);
				instance = NULL;
			}
		}
	}

#if defined(OMR_VALGRIND_MEMCHECK)
	//Use handle's Memory Base to refer valgrind memory pool
	valgrindCreateMempool(extensions, env, (uintptr_t)handle->getMemoryBase());
#endif /* defined(OMR_VALGRIND_MEMCHECK) */

	return NULL != instance;
}
Ejemplo n.º 15
0
bool
MM_MasterGCThread::garbageCollect(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription)
{
	Assert_MM_mustHaveExclusiveVMAccess(env->getOmrVMThread());
	bool didAttemptCollect = false;
	
	if (NULL != _collector) {
		/* the collector has started up so try to run */
		/* once the master thread has stored itself in the _masterGCThread, it should never need to collect - this would hang */
		Assert_MM_true(omrthread_self() != _masterGCThread);
		if (_runAsImplicit || (NULL == _masterGCThread)) {
			/* We might not have _masterGCThread in the startup phase or late in the shutdown phase.
			 * For example, there may be a native out-of-memory during startup or RAS may 
			 * trigger a GC after we've shutdown the master thread.
			 */
			Assert_MM_true(0 == env->getSlaveID());
			_collector->preMasterGCThreadInitialize(env);
			_collector->masterThreadGarbageCollect(env, allocDescription);

			if (_runAsImplicit && _collector->isConcurrentWorkAvailable(env)) {
				omrthread_monitor_enter(_collectorControlMutex);

				if (STATE_WAITING == _masterThreadState) {
					_masterThreadState = STATE_GC_REQUESTED;
					omrthread_monitor_notify(_collectorControlMutex);
				}

				omrthread_monitor_exit(_collectorControlMutex);
			}
		} else {
			/* this is the general case, when the master thread is running internally */
			omrthread_monitor_enter(_collectorControlMutex);
			/* The variable assignments below are safe because we hold Xaccess.  Otherwise, it is possible (based on the wait/notify mechanism here)
			 * that another thread could come in under this mutex and stomp on the "parameters" while another thread is waiting.
			 */
			_allocDesc = allocDescription;
			_incomingCycleState = env->_cycleState;
			MasterGCThreadState previousState = _masterThreadState;
			_masterThreadState = STATE_GC_REQUESTED;
			if (STATE_WAITING == previousState) {
				omrthread_monitor_notify(_collectorControlMutex);
			} else if (STATE_RUNNING_CONCURRENT == previousState) {
				_collector->forceConcurrentFinish();
			} else {
				Assert_MM_unreachable();
			}
			
			/* The master thread will claim exclusive VM access. Artificially give it up in this thread so that tools like -Xcheck:vm continue to work. */
			uintptr_t savedExclusiveCount = env->relinquishExclusiveVMAccess();
			while (STATE_GC_REQUESTED == _masterThreadState) {
				omrthread_monitor_wait(_collectorControlMutex);
			}
			env->assumeExclusiveVMAccess(savedExclusiveCount);

			Assert_MM_true(NULL == _incomingCycleState);
			omrthread_monitor_exit(_collectorControlMutex);
		}
		
		didAttemptCollect = true;
	}
	return didAttemptCollect;
}
Ejemplo n.º 16
0
void
MM_OverflowStandard::fillFromOverflow(MM_EnvironmentBase *env, MM_Packet *packet)
{
	Assert_MM_unreachable();
}
Ejemplo n.º 17
0
	/**
	 * Allocate an arraylet leaf.
	 */
	virtual void *
	allocateArrayletLeaf(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, MM_MemorySpace *memorySpace, bool shouldCollectOnFailure)
	{
		Assert_MM_unreachable();
		return NULL;
	}
Ejemplo n.º 18
0
	virtual uintptr_t concurrentClassMark(MM_EnvironmentStandard *env, bool &completedClassMark) { Assert_MM_unreachable(); return 0;}
Ejemplo n.º 19
0
	virtual void *allocateTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription, MM_ObjectAllocationInterface *objectAllocationInterface, MM_MemorySubSpace *baseSubSpace, MM_MemorySubSpace *previousSubSpace, bool shouldCollectOnFailure)
	{
		Assert_MM_unreachable();
		return NULL;
	}
Ejemplo n.º 20
0
	/**
	 * Called under exclusive to request that the subspace attempt to replenish the given context and satisfy the given allocateDescription of the given allocationType
	 * 
	 * @param[in] env The current thread
	 * @param[in] context The allocation context which the sender failed to replenish
	 * @param[in] objectAllocationInterface The alocation interface through which the original allocation call was initiated (only used by TLH allocations, can be NULL in other cases)
	 * @param[in] allocateDescription The allocation request which initiated the allocation failure
	 * @param[in] allocationType The type of allocation request we eventually must satisfy
	 * 
	 * @return The result of the allocation attempted under exclusive
	 */
	virtual void *lockedReplenishAndAllocate(MM_EnvironmentBase *env, MM_AllocationContext *context, MM_ObjectAllocationInterface *objectAllocationInterface, MM_AllocateDescription *allocateDescription, AllocationType allocationType)
	{
		Assert_MM_unreachable();
		return NULL;
	}
Ejemplo n.º 21
0
void
MM_ConcurrentOverflow::fillFromOverflow(MM_EnvironmentBase *env, MM_Packet *packet)
{
	Assert_MM_unreachable();
}