Exemplo n.º 1
0
void
MM_MarkMap::initializeMarkMap(MM_EnvironmentBase *env)
{
	/* TODO: The multiplier should really be some constant defined globally */
	const uintptr_t MODRON_PARALLEL_MULTIPLIER = 32;
	uintptr_t heapAlignment = _extensions->heapAlignment;

	/* Determine the size of heap that a work unit of mark map clearing corresponds to */
	uintptr_t heapClearUnitFactor = env->_currentTask->getThreadCount();
	heapClearUnitFactor = ((heapClearUnitFactor == 1) ? 1 : heapClearUnitFactor * MODRON_PARALLEL_MULTIPLIER);
	uintptr_t heapClearUnitSize = _extensions->heap->getMemorySize() / heapClearUnitFactor;
	heapClearUnitSize = MM_Math::roundToCeiling(heapAlignment, heapClearUnitSize);

	/* Walk all object segments to determine what ranges of the mark map should be cleared */
	MM_HeapRegionDescriptor *region;
	MM_Heap *heap = _extensions->getHeap();
	MM_HeapRegionManager *regionManager = heap->getHeapRegionManager();
	GC_HeapRegionIterator regionIterator(regionManager);
	while(NULL != (region = regionIterator.nextRegion())) {
		if (region->isCommitted()) {
			/* Walk the segment in chunks the size of the heapClearUnit size, checking if the corresponding mark map
			 * range should  be cleared.
			 */
			uint8_t* heapClearAddress = (uint8_t*)region->getLowAddress();
			uintptr_t heapClearSizeRemaining = region->getSize();

			while(0 != heapClearSizeRemaining) {
				/* Calculate the size of heap that is to be processed */
				uintptr_t heapCurrentClearSize = (heapClearUnitSize > heapClearSizeRemaining) ? heapClearSizeRemaining : heapClearUnitSize;
				Assert_MM_true(heapCurrentClearSize > 0);

				/* Check if the thread should clear the corresponding mark map range for the current heap range */
				if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
					/* Convert the heap address/size to its corresponding mark map address/size */
					/* NOTE: We calculate the low and high heap offsets, and build the mark map index and size values
					 * from these to avoid rounding errors (if we use the size, the conversion routine could get a different
					 * rounding result then the actual end address)
					 */
					uintptr_t heapClearOffset = ((uintptr_t)heapClearAddress) - _heapMapBaseDelta;
					uintptr_t heapMapClearIndex = convertHeapIndexToHeapMapIndex(env, heapClearOffset, sizeof(uintptr_t));
					uintptr_t heapMapClearSize =
						convertHeapIndexToHeapMapIndex(env, heapClearOffset + heapCurrentClearSize, sizeof(uintptr_t))
						- heapMapClearIndex;

					/* And clear the mark map */
					OMRZeroMemory((void *) (((uintptr_t)_heapMapBits) + heapMapClearIndex), heapMapClearSize);
				}

				/* Move to the next address range in the segment */
				heapClearAddress += heapCurrentClearSize;
				heapClearSizeRemaining -= heapCurrentClearSize;
			}
		}
	}
}
Exemplo n.º 2
0
uintptr_t *
MM_AllocationContextSegregated::allocateArraylet(MM_EnvironmentBase *env, omrarrayptr_t parent)
{
	arrayletAllocationLock();

retry:
	uintptr_t *arraylet = (_arrayletRegion == NULL) ? NULL : _arrayletRegion->allocateArraylet(env, parent);

	if (arraylet != NULL) {
		arrayletAllocationUnlock();

		OMRZeroMemory(arraylet, env->getOmrVM()->_arrayletLeafSize);
		return arraylet;
	}

	flushArraylet(env);

	MM_HeapRegionDescriptorSegregated *region = _regionPool->allocateRegionFromArrayletSizeClass(env);
	if (region != NULL) {
		/* cache the arraylet full region in AC */
		_perContextArrayletFullRegions->enqueue(region);
		_arrayletRegion = region;
		goto retry;
	}

	region = _regionPool->allocateFromRegionPool(env, 1, OMR_SIZECLASSES_ARRAYLET, MAX_UINT);
	if (region != NULL) {
		/* cache the small full region in AC */
		_perContextArrayletFullRegions->enqueue(region);
		_arrayletRegion = region;
		goto retry;
	}

	arrayletAllocationUnlock();

	return NULL;
}
Exemplo n.º 3
0
/**
 * Refresh the TLH.
 */
bool
MM_TLHAllocationSupport::refresh(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription, bool shouldCollectOnFailure)
{
	MM_GCExtensionsBase* extensions = env->getExtensions();

	/* Refresh the TLH only if the allocation request will fit in half the refresh size
	 * or in the TLH minimum size.
	 */
	uintptr_t sizeInBytesRequired = allocDescription->getContiguousBytes();
	uintptr_t tlhMinimumSize = extensions->tlhMinimumSize;
	uintptr_t tlhMaximumSize = extensions->tlhMaximumSize;
	uintptr_t halfRefreshSize = getRefreshSize() >> 1;
	uintptr_t abandonSize = (tlhMinimumSize > halfRefreshSize ? tlhMinimumSize : halfRefreshSize);
	if (sizeInBytesRequired > abandonSize) {
		/* increase thread hungriness if we did not refresh */
		if (getRefreshSize() < tlhMaximumSize && sizeInBytesRequired < tlhMaximumSize) {
			setRefreshSize(getRefreshSize() + extensions->tlhIncrementSize);
		}
		return false;
	}

	MM_AllocationStats *stats = _objectAllocationInterface->getAllocationStats();

	stats->_tlhDiscardedBytes += getSize();

	/* Try to cache the current TLH */
	if (NULL != getRealAlloc() && getSize() >= tlhMinimumSize) {
		/* Cache the current TLH because it is bigger than the minimum size */
		MM_HeapLinkedFreeHeaderTLH* newCache = (MM_HeapLinkedFreeHeaderTLH*)getRealAlloc();
		newCache->setSize(getSize());
		newCache->_memoryPool = getMemoryPool();
		newCache->_memorySubSpace = getMemorySubSpace();
		newCache->setNext(_abandonedList);
		_abandonedList = newCache;
		++_abandonedListSize;
		if (_abandonedListSize > stats->_tlhMaxAbandonedListSize) {
			stats->_tlhMaxAbandonedListSize = _abandonedListSize;
		}
		wipeTLH(env);
	} else {
		clear(env);
	}

	bool didRefresh = false;
	/* Try allocating a TLH */
	if ((NULL != _abandonedList) && (sizeInBytesRequired <= tlhMinimumSize)) {
		/* Try to get a cached TLH */
		setupTLH(env, (void *)_abandonedList, (void *)_abandonedList->afterEnd(),
				_abandonedList->_memorySubSpace, _abandonedList->_memoryPool);

		_abandonedList = (MM_HeapLinkedFreeHeaderTLH *)_abandonedList->getNext();
		--_abandonedListSize;

#if defined(OMR_GC_BATCH_CLEAR_TLH)
		if (_zeroTLH) {
			if (0 != extensions->batchClearTLH) {
				memset(getBase(), 0, sizeof(MM_HeapLinkedFreeHeaderTLH));
			}
		}
#endif /* OMR_GC_BATCH_CLEAR_TLH */

		allocDescription->setTLHAllocation(true);
		allocDescription->setNurseryAllocation(getMemorySubSpace()->getTypeFlags() == MEMORY_TYPE_NEW);
		allocDescription->setMemoryPool(getMemoryPool());

		stats->_tlhRefreshCountReused += 1;
		stats->_tlhAllocatedReused += getSize();
		stats->_tlhDiscardedBytes -= getSize();

		didRefresh = true;
	} else {
		/* Try allocating a fresh TLH */
		MM_AllocationContext *ac = env->getAllocationContext();
		MM_MemorySpace *memorySpace = _objectAllocationInterface->getOwningEnv()->getMemorySpace();

		if (NULL != ac) {
			/* ensure that we are allowed to use the AI in this configuration in the Tarok case */
			/* allocation contexts currently aren't supported with generational schemes */
			Assert_MM_true(memorySpace->getTenureMemorySubSpace() == memorySpace->getDefaultMemorySubSpace());
			didRefresh = (NULL != ac->allocateTLH(env, allocDescription, _objectAllocationInterface, shouldCollectOnFailure));
		} else {
			MM_MemorySubSpace *subspace = memorySpace->getDefaultMemorySubSpace();
			didRefresh = (NULL != subspace->allocateTLH(env, allocDescription, _objectAllocationInterface, NULL, NULL, shouldCollectOnFailure));
		}

		if (didRefresh) {
#if defined(OMR_GC_BATCH_CLEAR_TLH)
			if (_zeroTLH) {
				if (0 != extensions->batchClearTLH) {
					void *base = getBase();
					void *top = getTop();
					OMRZeroMemory(base, (uintptr_t)top - (uintptr_t)base);
				}
			}
#endif /* defined(OMR_GC_BATCH_CLEAR_TLH) */

			/*
			 * THL was refreshed however it might be already flushed in GC
			 * Some special features (like Prepare Heap For Walk called by GC check)
			 * might request flush of all TLHs
			 * Flushed TLH would have Base=Top=Allocated=0, so getSize returns 0
			 * Do not change stats here if TLH is flushed already
			 */
			if (0 < getSize()) {
				stats->_tlhRefreshCountFresh += 1;
				stats->_tlhAllocatedFresh += getSize();
			}
		}
	}

	if (didRefresh) {
		/*
		 * THL was refreshed however it might be already flushed in GC
		 * Some special features (like Prepare Heap For Walk called by GC check)
		 * might request flush of all TLHs
		 * Flushed TLH would have Base=Top=Allocated=0, so getSize returns 0
		 * Do not change stats here if TLH is flushed already
		 */
		if (0 < getSize()) {
			reportRefreshCache(env);
			stats->_tlhRequestedBytes += getRefreshSize();
			/* TODO VMDESIGN 1322: adjust the amount consumed by the TLH refresh since a TLH refresh
			 * may not give you the size requested */
			/* Increase thread hungriness */
			/* TODO: TLH values (max/min/inc) should be per tlh, or somewhere else? */
			if (getRefreshSize() < tlhMaximumSize) {
				setRefreshSize(getRefreshSize() + extensions->tlhIncrementSize);
			}
		}
	}

	return didRefresh;
}