/* enqueue src at the _end_ of the receiver's queue */
	virtual void enqueue(MM_HeapRegionQueue *srcAsPQ)
	{
		MM_LockingHeapRegionQueue* src = MM_LockingHeapRegionQueue::asLockingHeapRegionQueue(srcAsPQ);
		if (NULL == src->_head) { /* Nothing to move - single read needs no lock */
			return;
		}
		lock();
		src->lock();
		/* Remove from src */
		MM_HeapRegionDescriptorSegregated *front = src->_head;
		MM_HeapRegionDescriptorSegregated *back = src->_tail;
		uintptr_t srcLength = src->_length;
		src->_head = NULL;
		src->_tail = NULL;
		src->_length = 0;
		
		/* Add to back of self */
		front->setPrev(_tail); /* OK even if _tail is NULL */
		if (_tail == NULL) {
			_head = front;
		} else {
			_tail->setNext(front);
		}
		_tail = back;
		_length += srcLength;
		
		src->unlock();
		unlock();
	}
/* This method is moved here so that AC can see the large full page and cache it */
uintptr_t *
MM_AllocationContextSegregated::allocateLarge(MM_EnvironmentBase *env, uintptr_t sizeInBytesRequired)
{
	uintptr_t neededRegions = _regionPool->divideUpRegion(sizeInBytesRequired);
	MM_HeapRegionDescriptorSegregated *region = NULL;
	uintptr_t excess = 0;

	while (region == NULL && excess < MAX_UINT) {
		region = _regionPool->allocateFromRegionPool(env, neededRegions, OMR_SIZECLASSES_LARGE, excess);
		excess = (2 * excess) + 1;
	}

	uintptr_t *result = (region == NULL) ? NULL : (uintptr_t *)region->getLowAddress();

	/* Flush the large page right away. */
	if (region != NULL) {
		/* cache the large full region in AC */
		_perContextLargeFullRegions->enqueue(region);

		/* reset ACL counts */
		region->getMemoryPoolACL()->resetCounts();
	}

	return result;
}
	virtual void 
	push(MM_FreeHeapRegionList *srcAsFPL) 
	{ 
		MM_LockingFreeHeapRegionList* src = MM_LockingFreeHeapRegionList::asLockingFreeHeapRegionList(srcAsFPL);
		if (src->_head == NULL) { /* Nothing to move - single read needs no lock */
			return;
		}
		lock();
		src->lock();
		
		/* Remove from src */
		MM_HeapRegionDescriptorSegregated *front = src->_head;
		MM_HeapRegionDescriptorSegregated *back = src->_tail;
		uintptr_t srcLength = src->_length;
		src->_head = NULL;
		src->_tail = NULL;
		src->_length = 0;
		
		/* Add to front of self */
		back->setNext(_head); /* OK even if _head is NULL */
		if (_head == NULL) {
			_tail = back;
		} else {
			_head->setPrev(back);
		}
		_head = front;
		_length += srcLength;
		
		src->unlock();
		unlock();
	}
	MM_HeapRegionDescriptorSegregated *dequeueInternal()
	{
		MM_HeapRegionDescriptorSegregated *result = _head;
		if (_head != NULL) {
			_length--;
			_head = result->getNext();
			result->setNext(NULL);
			if (NULL == _head) {
				_tail = NULL;
			} else {
				_head->setPrev(NULL);
			}
		}
		return result;
	}	
void
MM_SweepSchemeSegregated::incrementalSweepLarge(MM_EnvironmentBase *env)
{
	/* Sweep through large objects. */
	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	MM_HeapRegionQueue *largeSweepRegions = regionPool->getLargeSweepRegions();
	MM_HeapRegionQueue *largeFullRegions = regionPool->getLargeFullRegions();
	MM_HeapRegionDescriptorSegregated *currentRegion;
	while ((currentRegion = largeSweepRegions->dequeue()) != NULL) {
		sweepRegion(env, currentRegion);
		
		if (currentRegion->getMemoryPoolACL()->getFreeCount() == 0) {
			largeFullRegions->enqueue(currentRegion);
		} else {
			currentRegion->emptyRegionReturned(env);
			regionPool->addFreeRegion(env, currentRegion);
		}
		yieldFromSweep(env);
	}
}
bool
MM_AllocationContextSegregated::tryAllocateFromRegionPool(MM_EnvironmentBase *env, uintptr_t sizeClass)
{
	MM_HeapRegionDescriptorSegregated *region = _regionPool->allocateFromRegionPool(env, 1, sizeClass, MAX_UINT);
	bool result = false;
	if(NULL != region) {
		/* cache the small full region in AC */
		_perContextSmallFullRegions[sizeClass]->enqueue(region);

		region->formatFresh(env, sizeClass, region->getLowAddress());

		/* A store barrier is required here since the initialization of the new region needs to write-back before
		 * we make it reachable via _smallRegions (_smallRegions is accessed from outside the lock which covers this write)
		 */
		MM_AtomicOperations::storeSync();
		_smallRegions[sizeClass] = region;
		result = true;
	}
	return result;
}
	void enqueueInternal(MM_HeapRegionDescriptorSegregated *region)
	{ 
		assert1(NULL == region->getNext() && NULL == region->getPrev());
		if (NULL == _head) {
			_head = _tail = region;
		} else {
			_tail->setNext(region);
			region->setPrev(_tail);
			_tail = region;
		}
		_length++;
	}
	/*
	 * This method should be used with care.  In particular, it is wrong to detach from a freelist
	 * while iterating over it unless the detach stops further iteration.
	 */
	void
	detachInternal(MM_HeapRegionDescriptorSegregated *cur)
	{
		_length--;
		MM_HeapRegionDescriptorSegregated *prev = cur->getPrev();
		MM_HeapRegionDescriptorSegregated *next = cur->getNext();
		if (prev != NULL) {
			Assert_MM_true(prev->getNext() == cur);
			prev->setNext(next);
		} else {
			Assert_MM_true(cur == _head);
		}
		if (next != NULL) {
			Assert_MM_true(next->getPrev() == cur);
			next->setPrev(prev);
		} else {
			Assert_MM_true(cur == _tail);
		}
		cur->setPrev(NULL);
		cur->setNext(NULL);
		if (_head == cur) {
			_head = next;
		}
		if (_tail == cur) {
			_tail = prev;
		}
	}
void
MM_SweepSchemeSegregated::incrementalSweepArraylet(MM_EnvironmentBase *env)
{
	uintptr_t arrayletsPerRegion = env->getExtensions()->arrayletsPerRegion;

	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	MM_HeapRegionQueue *arrayletSweepRegions = regionPool->getArrayletSweepRegions();
	MM_HeapRegionQueue *arrayletAvailableRegions = regionPool->getArrayletAvailableRegions();
	MM_HeapRegionDescriptorSegregated *currentRegion;
	
	while ((currentRegion = arrayletSweepRegions->dequeue()) != NULL) {
		sweepRegion(env, currentRegion);
		
		if (currentRegion->getMemoryPoolACL()->getFreeCount() != arrayletsPerRegion) {
			arrayletAvailableRegions->enqueue(currentRegion);
		} else {
			currentRegion->emptyRegionReturned(env);
			regionPool->addFreeRegion(env, currentRegion);
		}
		
		yieldFromSweep(env);
	}
}
	void
	pushInternal(MM_HeapRegionDescriptorSegregated *region)
	{
		Assert_MM_true(NULL == region->getNext() && NULL == region->getPrev());
		_length++;
		if (NULL == _head) {
			_head = region;
			_tail = region;
		} else {
			_head->setPrev(region);
			region->setNext(_head);
			_head = region;
		}
	}
void
MM_SweepSchemeSegregated::incrementalSweepSmall(MM_EnvironmentBase *env)
{
	MM_GCExtensionsBase *ext = env->getExtensions();
	bool shouldUpdateOccupancy = ext->nonDeterministicSweep;
	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	uintptr_t splitIndex = env->getSlaveID() % (regionPool->getSplitAvailableListSplitCount());

	/* 
	 * Iterate through the regions so that each region is processed exactly once.
	 * Interleaved sweeping: free up some number of regions of all sizeClasses right away
	 * so that application have somewhere to allocate from and minimize the usage of
	 * non deterministic sweeps. 
	 * Any marked objects are unmarked.
	 * If a region holds a marked object, then the region is kept active; 
	 * if a region contains no marked objects, then it can be returned to a free list.
	 */
	MM_SizeClasses *sizeClasses = ext->defaultSizeClasses;
	while (regionPool->getCurrentTotalCountOfSweepRegions()) {
		for (uintptr_t sizeClass = OMR_SIZECLASSES_MIN_SMALL; sizeClass <= OMR_SIZECLASSES_MAX_SMALL; sizeClass++) {
			while (regionPool->getCurrentCountOfSweepRegions(sizeClass)) {
				float yetToComplete = (float)regionPool->getCurrentCountOfSweepRegions(sizeClass) / regionPool->getInitialCountOfSweepRegions(sizeClass);
				float totalYetToComplete = (float)regionPool->getCurrentTotalCountOfSweepRegions() / regionPool->getInitialTotalCountOfSweepRegions();
				
				if (yetToComplete < totalYetToComplete) {
					break;
				}
				
				MM_HeapRegionQueue *sweepList = regionPool->getSmallSweepRegions(sizeClass);
				MM_HeapRegionDescriptorSegregated *currentRegion;
				uintptr_t numCells = sizeClasses->getNumCells(sizeClass);
				uintptr_t sweepSmallRegionsPerIteration = calcSweepSmallRegionsPerIteration(numCells);
				uintptr_t yieldSlackTime = resetSweepSmallRegionCount(env, sweepSmallRegionsPerIteration);
				uintptr_t actualSweepRegions;
				if ((actualSweepRegions = sweepList->dequeue(env->getRegionWorkList(), sweepSmallRegionsPerIteration)) > 0) {
					regionPool->decrementCurrentCountOfSweepRegions(sizeClass, actualSweepRegions);
					regionPool->decrementCurrentTotalCountOfSweepRegions(actualSweepRegions);
					uintptr_t freedRegions = 0, processedRegions = 0;
					MM_HeapRegionQueue *fullList = env->getRegionLocalFull();
					while ((currentRegion = env->getRegionWorkList()->dequeue()) != NULL) {
						sweepRegion(env, currentRegion);
						if (currentRegion->getMemoryPoolACL()->getFreeCount() < numCells) {
							uintptr_t occupancy = (currentRegion->getMemoryPoolACL()->getMarkCount() * 100) / numCells;
							/* Maintain average occupancy needed for nondeterministic sweep heuristic */
							if (shouldUpdateOccupancy) {
								regionPool->updateOccupancy(sizeClass, occupancy);
							}
							if (currentRegion->getMemoryPoolACL()->getMarkCount() == numCells) {
								/* Return full regions to full list */
								fullList->enqueue(currentRegion);
							} else {
								regionPool->enqueueAvailable(currentRegion, sizeClass, occupancy, splitIndex);
							}
						} else {
							currentRegion->emptyRegionReturned(env);
							currentRegion->setFree(1);
							env->getRegionLocalFree()->enqueue(currentRegion);
							freedRegions++;
						}
						processedRegions++;
						
						if (updateSweepSmallRegionCount()) {
							yieldFromSweep(env, yieldSlackTime);
						}
					}
					regionPool->addSingleFree(env, env->getRegionLocalFree());				
					regionPool->getSmallFullRegions(sizeClass)->enqueue(fullList);
					yieldFromSweep(env, yieldSlackTime);
				}
			} /* end of while(currentTotalCountOfSweepRegions); */
		}
	}
}
void
MM_SweepSchemeSegregated::incrementalCoalesceFreeRegions(MM_EnvironmentBase *env)
{
	resetCoalesceFreeRegionCount(env);

	MM_GCExtensionsBase *ext = env->getExtensions();
	MM_HeapRegionManager *regionManager = ext->heap->getHeapRegionManager();
	uintptr_t regionCount = regionManager->getTableRegionCount();
	MM_RegionPoolSegregated *regionPool = _memoryPool->getRegionPool();
	MM_FreeHeapRegionList *coalesceFreeList = regionPool->getCoalesceFreeList();
	
	uintptr_t yieldSlackTime = resetCoalesceFreeRegionCount(env);
	yieldFromSweep(env, yieldSlackTime);

	coalesceFreeList->push(regionPool->getSingleFreeList());
	coalesceFreeList->push(regionPool->getMultiFreeList());
	
	MM_HeapRegionDescriptorSegregated *coalescing = NULL;
	MM_HeapRegionDescriptorSegregated *currentRegion = NULL;
	
	for (uintptr_t i=0; i< regionCount; ) {
		
		currentRegion = (MM_HeapRegionDescriptorSegregated *)regionManager->mapRegionTableIndexToDescriptor(i);
		uintptr_t range = currentRegion->getRange();
		i += range;
		
		bool shouldYield = updateCoalesceFreeRegionCount(range);
		bool shouldClose = shouldYield || (i >= regionCount);
		
		if (currentRegion->isFree()) {
			coalesceFreeList->detach(currentRegion);
			bool joined = (range < MAX_REGION_COALESCE) && (coalescing != NULL && coalescing->joinFreeRangeInit(currentRegion));
			if (joined) {
				currentRegion = NULL;
			} else {
				shouldClose = true;
			}
		} else {
			currentRegion = NULL;
		}
		if (shouldClose && coalescing != NULL) {
			coalescing->joinFreeRangeComplete();
			regionPool->addFreeRegion(env, coalescing, true);
			coalescing = NULL;
		}
		if (shouldYield) {
			if (currentRegion != NULL) {
				regionPool->addFreeRegion(env, currentRegion, true);
				currentRegion = NULL;
			}
			yieldFromSweep(env, yieldSlackTime);
		} else {
			if (coalescing == NULL) {
				coalescing = currentRegion;
			}
		}
	}
	if (currentRegion != NULL) {
		regionPool->addFreeRegion(env, currentRegion, true);		
		currentRegion = NULL;
	}

	yieldFromSweep(env);
}
/*
 * Pre allocate a list of cells, the amount to pre-allocate is retrieved from the env's allocation interface.
 * @return the carved off first cell in the list
 */
uintptr_t *
MM_AllocationContextSegregated::preAllocateSmall(MM_EnvironmentBase *env, uintptr_t sizeInBytesRequired)
{
	MM_SizeClasses *sizeClasses = env->getExtensions()->defaultSizeClasses;
	uintptr_t sizeClass = sizeClasses->getSizeClassSmall(sizeInBytesRequired);
	uintptr_t sweepCount = 0;
	uint64_t sweepStartTime = 0;
	bool done = false;
	uintptr_t *result = NULL;

	/* BEN TODO 1429: The object allocation interface base class should define all API used by this method such that casting would be unnecessary. */
	MM_SegregatedAllocationInterface* segregatedAllocationInterface = (MM_SegregatedAllocationInterface*)env->_objectAllocationInterface;
	uintptr_t replenishSize = segregatedAllocationInterface->getReplenishSize(env, sizeInBytesRequired);
	uintptr_t preAllocatedBytes = 0;

	while (!done) {

		/* If we have a region, attempt to replenish the ACL's cache */
		MM_HeapRegionDescriptorSegregated *region = _smallRegions[sizeClass];
		if (NULL != region) {
			MM_MemoryPoolAggregatedCellList *memoryPoolACL = region->getMemoryPoolACL();
			uintptr_t* cellList = memoryPoolACL->preAllocateCells(env, sizeClasses->getCellSize(sizeClass), replenishSize, &preAllocatedBytes);
			if (NULL != cellList) {
				Assert_MM_true(preAllocatedBytes > 0);
				if (shouldPreMarkSmallCells(env)) {
					_markingScheme->preMarkSmallCells(env, region, cellList, preAllocatedBytes);
				}
				segregatedAllocationInterface->replenishCache(env, sizeInBytesRequired, cellList, preAllocatedBytes);
				result = (uintptr_t *) segregatedAllocationInterface->allocateFromCache(env, sizeInBytesRequired);
				done = true;
			}
		}

		smallAllocationLock();

		/* Either we did not have a region or we failed to preAllocate from the ACL. Retry if this is no
		 * longer true */
		region = _smallRegions[sizeClass];
		if ((NULL == region) || !region->getMemoryPoolACL()->hasCell()) {

			/* This may cause the start of a GC */
			signalSmallRegionDepleted(env, sizeClass);

			flushSmall(env, sizeClass);

			/* Attempt to get a region of this size class which may already have some allocated cells */
			if (!tryAllocateRegionFromSmallSizeClass(env, sizeClass)) {
				/* Attempt to get a region by sweeping */
				if (!trySweepAndAllocateRegionFromSmallSizeClass(env, sizeClass, &sweepCount, &sweepStartTime)) {
					/* Attempt to get an unused region */
					if (!tryAllocateFromRegionPool(env, sizeClass)) {
						/* Really out of regions */
						done = true;
					}
				}
			}
		}

		smallAllocationUnlock();
	}
	return result;

}