예제 #1
0
/* This method is moved here so that AC can see the large full page and cache it */
uintptr_t *
MM_AllocationContextSegregated::allocateLarge(MM_EnvironmentBase *env, uintptr_t sizeInBytesRequired)
{
	uintptr_t neededRegions = _regionPool->divideUpRegion(sizeInBytesRequired);
	MM_HeapRegionDescriptorSegregated *region = NULL;
	uintptr_t excess = 0;

	while (region == NULL && excess < MAX_UINT) {
		region = _regionPool->allocateFromRegionPool(env, neededRegions, OMR_SIZECLASSES_LARGE, excess);
		excess = (2 * excess) + 1;
	}

	uintptr_t *result = (region == NULL) ? NULL : (uintptr_t *)region->getLowAddress();

	/* Flush the large page right away. */
	if (region != NULL) {
		/* cache the large full region in AC */
		_perContextLargeFullRegions->enqueue(region);

		/* reset ACL counts */
		region->getMemoryPoolACL()->resetCounts();
	}

	return result;
}
예제 #2
0
void
MM_SweepSchemeSegregated::incrementalSweepLarge(MM_EnvironmentBase *env)
{
	/* Sweep through large objects. */
	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	MM_HeapRegionQueue *largeSweepRegions = regionPool->getLargeSweepRegions();
	MM_HeapRegionQueue *largeFullRegions = regionPool->getLargeFullRegions();
	MM_HeapRegionDescriptorSegregated *currentRegion;
	while ((currentRegion = largeSweepRegions->dequeue()) != NULL) {
		sweepRegion(env, currentRegion);
		
		if (currentRegion->getMemoryPoolACL()->getFreeCount() == 0) {
			largeFullRegions->enqueue(currentRegion);
		} else {
			currentRegion->emptyRegionReturned(env);
			regionPool->addFreeRegion(env, currentRegion);
		}
		yieldFromSweep(env);
	}
}
예제 #3
0
void
MM_SweepSchemeSegregated::incrementalSweepArraylet(MM_EnvironmentBase *env)
{
	uintptr_t arrayletsPerRegion = env->getExtensions()->arrayletsPerRegion;

	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	MM_HeapRegionQueue *arrayletSweepRegions = regionPool->getArrayletSweepRegions();
	MM_HeapRegionQueue *arrayletAvailableRegions = regionPool->getArrayletAvailableRegions();
	MM_HeapRegionDescriptorSegregated *currentRegion;
	
	while ((currentRegion = arrayletSweepRegions->dequeue()) != NULL) {
		sweepRegion(env, currentRegion);
		
		if (currentRegion->getMemoryPoolACL()->getFreeCount() != arrayletsPerRegion) {
			arrayletAvailableRegions->enqueue(currentRegion);
		} else {
			currentRegion->emptyRegionReturned(env);
			regionPool->addFreeRegion(env, currentRegion);
		}
		
		yieldFromSweep(env);
	}
}
예제 #4
0
void
MM_SweepSchemeSegregated::incrementalSweepSmall(MM_EnvironmentBase *env)
{
	MM_GCExtensionsBase *ext = env->getExtensions();
	bool shouldUpdateOccupancy = ext->nonDeterministicSweep;
	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	uintptr_t splitIndex = env->getSlaveID() % (regionPool->getSplitAvailableListSplitCount());

	/* 
	 * Iterate through the regions so that each region is processed exactly once.
	 * Interleaved sweeping: free up some number of regions of all sizeClasses right away
	 * so that application have somewhere to allocate from and minimize the usage of
	 * non deterministic sweeps. 
	 * Any marked objects are unmarked.
	 * If a region holds a marked object, then the region is kept active; 
	 * if a region contains no marked objects, then it can be returned to a free list.
	 */
	MM_SizeClasses *sizeClasses = ext->defaultSizeClasses;
	while (regionPool->getCurrentTotalCountOfSweepRegions()) {
		for (uintptr_t sizeClass = OMR_SIZECLASSES_MIN_SMALL; sizeClass <= OMR_SIZECLASSES_MAX_SMALL; sizeClass++) {
			while (regionPool->getCurrentCountOfSweepRegions(sizeClass)) {
				float yetToComplete = (float)regionPool->getCurrentCountOfSweepRegions(sizeClass) / regionPool->getInitialCountOfSweepRegions(sizeClass);
				float totalYetToComplete = (float)regionPool->getCurrentTotalCountOfSweepRegions() / regionPool->getInitialTotalCountOfSweepRegions();
				
				if (yetToComplete < totalYetToComplete) {
					break;
				}
				
				MM_HeapRegionQueue *sweepList = regionPool->getSmallSweepRegions(sizeClass);
				MM_HeapRegionDescriptorSegregated *currentRegion;
				uintptr_t numCells = sizeClasses->getNumCells(sizeClass);
				uintptr_t sweepSmallRegionsPerIteration = calcSweepSmallRegionsPerIteration(numCells);
				uintptr_t yieldSlackTime = resetSweepSmallRegionCount(env, sweepSmallRegionsPerIteration);
				uintptr_t actualSweepRegions;
				if ((actualSweepRegions = sweepList->dequeue(env->getRegionWorkList(), sweepSmallRegionsPerIteration)) > 0) {
					regionPool->decrementCurrentCountOfSweepRegions(sizeClass, actualSweepRegions);
					regionPool->decrementCurrentTotalCountOfSweepRegions(actualSweepRegions);
					uintptr_t freedRegions = 0, processedRegions = 0;
					MM_HeapRegionQueue *fullList = env->getRegionLocalFull();
					while ((currentRegion = env->getRegionWorkList()->dequeue()) != NULL) {
						sweepRegion(env, currentRegion);
						if (currentRegion->getMemoryPoolACL()->getFreeCount() < numCells) {
							uintptr_t occupancy = (currentRegion->getMemoryPoolACL()->getMarkCount() * 100) / numCells;
							/* Maintain average occupancy needed for nondeterministic sweep heuristic */
							if (shouldUpdateOccupancy) {
								regionPool->updateOccupancy(sizeClass, occupancy);
							}
							if (currentRegion->getMemoryPoolACL()->getMarkCount() == numCells) {
								/* Return full regions to full list */
								fullList->enqueue(currentRegion);
							} else {
								regionPool->enqueueAvailable(currentRegion, sizeClass, occupancy, splitIndex);
							}
						} else {
							currentRegion->emptyRegionReturned(env);
							currentRegion->setFree(1);
							env->getRegionLocalFree()->enqueue(currentRegion);
							freedRegions++;
						}
						processedRegions++;
						
						if (updateSweepSmallRegionCount()) {
							yieldFromSweep(env, yieldSlackTime);
						}
					}
					regionPool->addSingleFree(env, env->getRegionLocalFree());				
					regionPool->getSmallFullRegions(sizeClass)->enqueue(fullList);
					yieldFromSweep(env, yieldSlackTime);
				}
			} /* end of while(currentTotalCountOfSweepRegions); */
		}
	}
}
예제 #5
0
/*
 * Pre allocate a list of cells, the amount to pre-allocate is retrieved from the env's allocation interface.
 * @return the carved off first cell in the list
 */
uintptr_t *
MM_AllocationContextSegregated::preAllocateSmall(MM_EnvironmentBase *env, uintptr_t sizeInBytesRequired)
{
	MM_SizeClasses *sizeClasses = env->getExtensions()->defaultSizeClasses;
	uintptr_t sizeClass = sizeClasses->getSizeClassSmall(sizeInBytesRequired);
	uintptr_t sweepCount = 0;
	uint64_t sweepStartTime = 0;
	bool done = false;
	uintptr_t *result = NULL;

	/* BEN TODO 1429: The object allocation interface base class should define all API used by this method such that casting would be unnecessary. */
	MM_SegregatedAllocationInterface* segregatedAllocationInterface = (MM_SegregatedAllocationInterface*)env->_objectAllocationInterface;
	uintptr_t replenishSize = segregatedAllocationInterface->getReplenishSize(env, sizeInBytesRequired);
	uintptr_t preAllocatedBytes = 0;

	while (!done) {

		/* If we have a region, attempt to replenish the ACL's cache */
		MM_HeapRegionDescriptorSegregated *region = _smallRegions[sizeClass];
		if (NULL != region) {
			MM_MemoryPoolAggregatedCellList *memoryPoolACL = region->getMemoryPoolACL();
			uintptr_t* cellList = memoryPoolACL->preAllocateCells(env, sizeClasses->getCellSize(sizeClass), replenishSize, &preAllocatedBytes);
			if (NULL != cellList) {
				Assert_MM_true(preAllocatedBytes > 0);
				if (shouldPreMarkSmallCells(env)) {
					_markingScheme->preMarkSmallCells(env, region, cellList, preAllocatedBytes);
				}
				segregatedAllocationInterface->replenishCache(env, sizeInBytesRequired, cellList, preAllocatedBytes);
				result = (uintptr_t *) segregatedAllocationInterface->allocateFromCache(env, sizeInBytesRequired);
				done = true;
			}
		}

		smallAllocationLock();

		/* Either we did not have a region or we failed to preAllocate from the ACL. Retry if this is no
		 * longer true */
		region = _smallRegions[sizeClass];
		if ((NULL == region) || !region->getMemoryPoolACL()->hasCell()) {

			/* This may cause the start of a GC */
			signalSmallRegionDepleted(env, sizeClass);

			flushSmall(env, sizeClass);

			/* Attempt to get a region of this size class which may already have some allocated cells */
			if (!tryAllocateRegionFromSmallSizeClass(env, sizeClass)) {
				/* Attempt to get a region by sweeping */
				if (!trySweepAndAllocateRegionFromSmallSizeClass(env, sizeClass, &sweepCount, &sweepStartTime)) {
					/* Attempt to get an unused region */
					if (!tryAllocateFromRegionPool(env, sizeClass)) {
						/* Really out of regions */
						done = true;
					}
				}
			}
		}

		smallAllocationUnlock();
	}
	return result;

}