Ejemplo n.º 1
0
/**
 * Get exclusive control of the card table to prepare it for card cleaning.
 * 
 * @param currentPhase the current card clean pahse at point of call
 * @param threadAtSafePoint Whether the calling thread is at a safe point or not
 * @return TRUE if exclusive access acquired; FALSE otherwise
 */
MMINLINE bool
MM_ConcurrentCardTableForWC::getExclusiveCardTableAccess(MM_EnvironmentBase *env, CardCleanPhase currentPhase, bool threadAtSafePoint)
{
	/* Because the WC CardTable requires exclusive access to prepare the cards for cleaning, we cannot gain
	 * exclusive access to the card table if the thread is not at a safe point. Request async call back
	 * on this thread if this is the case so we can prepare card table 
	 */
	if(!threadAtSafePoint) {
		_callback->requestCallback(env);
		return false;
	}

	/* Get the current global gc count */
	uintptr_t gcCount = _extensions->globalGCStats.gcCount;
	bool phaseChangeCompleted = false;

	env->acquireExclusiveVMAccess();
	if ((gcCount != _extensions->globalGCStats.gcCount) || (currentPhase != _cardCleanPhase)) {
		/* Nothing to do so get out  */
		phaseChangeCompleted = true;
	}

	if (phaseChangeCompleted) {
		env->releaseExclusiveVMAccess();
		return false;
	}

    MM_AtomicOperations::lockCompareExchangeU32((volatile uint32_t*)&_cardCleanPhase,
                                            (uint32_t) currentPhase,
                                            (uint32_t) currentPhase + 1);
	assume0(cardTableBeingPrepared(_cardCleanPhase));                                            
    return true;
}
/**
 * Move a chunk of heap from one location to another within the receivers owned regions.
 * This involves fixing up any free list information that may change as a result of an address change.
 *
 * @param srcBase Start address to move.
 * @param srcTop End address to move.
 * @param dstBase Start of destination address to move into.
 *
 */
void
MM_MemoryPoolSplitAddressOrderedListBase::moveHeap(MM_EnvironmentBase* env, void* srcBase, void* srcTop, void* dstBase)
{
	for (uintptr_t i = 0; i < _heapFreeListCount; ++i) {
		MM_HeapLinkedFreeHeader* currentFreeEntry, *previousFreeEntry;

		previousFreeEntry = NULL;
		currentFreeEntry = _heapFreeLists[i]._freeList;
		while (NULL != currentFreeEntry) {
			if (((void*)currentFreeEntry >= srcBase) && ((void*)currentFreeEntry < srcTop)) {
				MM_HeapLinkedFreeHeader* newFreeEntry;
				newFreeEntry = (MM_HeapLinkedFreeHeader*)((((uintptr_t)currentFreeEntry) - ((uintptr_t)srcBase)) + ((uintptr_t)dstBase));

				if (previousFreeEntry) {
					assume0(previousFreeEntry < newFreeEntry);
					previousFreeEntry->setNext(newFreeEntry);
				} else {
					_heapFreeLists[i]._freeList = newFreeEntry;
				}
			}
			previousFreeEntry = currentFreeEntry;
			currentFreeEntry = currentFreeEntry->getNext();
		}
	}
}
Ejemplo n.º 3
0
/**
 * Counter balance a contract.
 * React to a pending contract of the the given subspace by possibly adjusted (expanding) other subspaces to fill minimum
 * quotas, etc.
 * The generational receiver determines which direction to push the request so that the opposing sibbling has a chance to
 * expand.
 * @return the adjusted contract size that is allowed to the receiver.
 */
uintptr_t
MM_MemorySubSpaceGenerational::counterBalanceContract(
	MM_EnvironmentBase *env,
	MM_MemorySubSpace *previousSubSpace,
	MM_MemorySubSpace *contractSubSpace,
	uintptr_t contractSize,
	uintptr_t contractAlignment)
{
	uintptr_t expandSize;
	/* Determine if a counter balancing expand is required */
	assume0(contractSize <= _currentSize);
	if((_currentSize - contractSize) >= _minimumSize) {
		return contractSize;
	}
	expandSize = _minimumSize - (_currentSize - contractSize);
	assume0(expandSize == MM_Math::roundToFloor(MM_GCExtensions::getExtensions(env)->heapAlignment, expandSize)); /* contract delta should be the same alignment as expand delta */
	
	/* Find the space that needs to expand, and do it */
	if(previousSubSpace == _memorySubSpaceNew) {
		return _memorySubSpaceOld->counterBalanceContractWithExpand(env, this, contractSubSpace, contractSize, contractAlignment, expandSize);
	}
	
	return _memorySubSpaceNew->counterBalanceContractWithExpand(env, this, contractSubSpace, contractSize, contractAlignment, expandSize);
}
Ejemplo n.º 4
0
/**
 * Release exclusive control of the card table
 */
MMINLINE void
MM_ConcurrentCardTableForWC::releaseExclusiveCardTableAccess(MM_EnvironmentBase *env)
{
	/* Cache the current value */
	CardCleanPhase currentPhase = _cardCleanPhase;
	
	/* Finished initializing. Only one thread can be here but for 
	 * consistency use atomic operation to update card cleaning phase
	*/
	assume0(cardCleaningInProgress((CardCleanPhase((uint32_t)currentPhase + 1))));
	MM_AtomicOperations::lockCompareExchangeU32((volatile uint32_t*)&_cardCleanPhase,
											(uint32_t) currentPhase,
											(uint32_t) currentPhase + 1);
											
	/* Cancel any outstanding events on other threads */
	_callback->cancelCallback(env);
											
	/* We are done so release exclusive now so mutators can restart */
	env->releaseExclusiveVMAccess(); 										
}
Ejemplo n.º 5
0
	/**
	 * @note This should not be called by anyone but Dispatcher or ParallelDispatcher 
	 **/
	MMINLINE virtual void setThreadCount(uintptr_t threadCount) { assume0(1 == threadCount); }
Ejemplo n.º 6
0
/**
 * Prepare a chunk of the card table for card cleaning.
 * 
 * This function is passed the start and end of a chunk of the card table which needs preparing
 * for card cleaning; be it concurrent or final card cleaning. As the chunk may contain 
 * holes (non-contiguous heap) we use the _cleaningRanges array to determine which cards within
 * the chunk actually need processing, ie a chunk can span part of one or more cleaning ranges.
 * The "action" passed to this function defines what we do to each unclean card within the chunk. 
 * If action is MARK_DIRTY_CARD_SAFE we modify all cards with value CARD_DIRTY to CARD_CLEAN_SAFE; 
 * for action MARK_SAFE_CARD_DIRTY we reset any cards marked as CARD_CLEAN_SAFE to CARD_DIRTY.
 *  
 * @param chunkStart - first card to be processed 
 * @param chunkEnd   - last card to be processed
 * @param action - defines what to do to each un-clean card, value is either MARK_DIRTY_CARD_SAFE or
 * MARK_SAFE_CARD_DIRTY.
 */ 
void
MM_ConcurrentCardTableForWC::prepareCardTableChunk(MM_EnvironmentBase *env, Card *chunkStart, Card *chunkEnd, CardAction action)
{
	uintptr_t prepareUnitFactor, prepareUnitSize;
	
 	/* Determine the size of card table work unit */ 
 	prepareUnitFactor = env->_currentTask->getThreadCount();
 	prepareUnitFactor = ((prepareUnitFactor == 1) ? 1 : prepareUnitFactor * PREPARE_PARALLEL_MULTIPLIER);
	prepareUnitSize = countCardsInRange(env, chunkStart, chunkEnd);
	prepareUnitSize = prepareUnitSize / prepareUnitFactor;
	prepareUnitSize = prepareUnitSize > 0 ? MM_Math::roundToCeiling(PREPARE_UNIT_SIZE_ALIGNMENT, prepareUnitSize) : 
                                            PREPARE_UNIT_SIZE_ALIGNMENT;												 
	
	/* Walk all card cleaning ranges to determine which cards should be prepared */
	for (CleaningRange *range=_cleaningRanges; range < _lastCleaningRange; range++) {
		
		Card *prepareAddress;
		uintptr_t prepareSizeRemaining;
		uintptr_t currentPrepareSize;
		/* Is this range strictly before the chunk we are looking for? */
		if (chunkStart >= range->topCard){
			/* Yes, so just skip to the next */
			continue;
		}
		/* Is this range strictly after the chunk we are looking for? */
		if (chunkEnd <= range->baseCard){
			/* Yes, so our work is done */
			break;
		}
		
		/* Walk the segment in chunks the size of the heapPrepareUnit size */ 
		prepareAddress = chunkStart > range->baseCard ? chunkStart : range->baseCard;
		prepareSizeRemaining = chunkEnd > range->topCard ? range->topCard - prepareAddress :
														  chunkEnd - prepareAddress;   	

		while(prepareSizeRemaining > 0 ) {
			/* Calculate the size of card table chunk to be processed next */
			currentPrepareSize = (prepareUnitSize > prepareSizeRemaining) ? prepareSizeRemaining : prepareUnitSize;
				
			/* Check if the thread should clear the corresponding mark map range for the current heap range */ 
			if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) {
				Card *firstCard,*endCard;
				firstCard = prepareAddress;
				endCard = prepareAddress + currentPrepareSize;
				
				for (Card *currentCard = firstCard; currentCard < endCard; currentCard++) {
					/* Are we are on an uintptr_t boundary ?. If so scan the card table a uintptr_t
					 * at a time until we find a slot which is non-zero or the end of card table
					 * found. This is based on the premise that the card table will be mostly
					 * empty and scanning an uintptr_t at a time will reduce the time taken to
					 * scan the card table.
					 */
					if (((Card)CARD_CLEAN == *currentCard) &&
						((uintptr_t)currentCard % sizeof(uintptr_t) == 0)) {
						 uintptr_t *nextSlot= (uintptr_t *)currentCard;
						while ( ((Card *)nextSlot < endCard) && (*nextSlot == SLOT_ALL_CLEAN) ) {
							nextSlot++;
						}
						
						/*
						 * Either end of scan or a slot which contains a dirty card found. Reset scan ptr
						 */
						currentCard= (Card *)nextSlot; 

						/* End of card table reached ? */
						if (currentCard >= endCard) {
							break; 
						}  
					}

					if (MARK_DIRTY_CARD_SAFE == action) {
						/* Is next card dirty ? If so flag it as safe to clean */	
						if ((Card)CARD_DIRTY == *currentCard) {
							/* If card has marked objects we need to clean it */
							if (cardHasMarkedObjects(env, currentCard)) {
								*currentCard = (Card)CARD_CLEAN_SAFE;
							} else {	
								*currentCard = (Card)CARD_CLEAN;
							}	
						}
					} else {
						assume0(action == MARK_SAFE_CARD_DIRTY);
						if ((Card)CARD_CLEAN_SAFE == *currentCard) {
							*currentCard = (Card)CARD_DIRTY;
						}
					}
				}
			} /* of J9MODRON_HANDLE_NEXT_WORK_UNIT */	
			
			/* Move to the next address range in the segment */ 
			prepareAddress += currentPrepareSize; 
			prepareSizeRemaining -= currentPrepareSize; 
		}	
	}	
} 
Ejemplo n.º 7
0
	/**
	 * Move the concurrent sweep mode from one type to another.
	 */
	MMINLINE void switchMode(uintptr_t oldMode, uintptr_t newMode) {
		assume0(_mode == oldMode);
		_mode = newMode;
	}
/**
 * Reset and reassign each chunk to a range of heap memory.
 * Given the current updated listed of chunks and the corresponding heap memory, walk the chunk
 * list reassigning each chunk to an appropriate range of memory.  This will clear each chunk
 * structure and then assign its basic values that connect it to a range of memory (base/top,
 * pool, segment, etc).
 * @return the total number of chunks in the system.
 */
uintptr_t
MM_SweepHeapSectioningSegmented::reassignChunks(MM_EnvironmentBase *env)
{
	MM_ParallelSweepChunk *chunk; /* Sweep table chunk (global) */
	MM_ParallelSweepChunk *previousChunk;
	uintptr_t totalChunkCount;  /* Total chunks in system */

	MM_SweepHeapSectioningIterator sectioningIterator(this);

	totalChunkCount = 0;
	previousChunk = NULL;

	MM_HeapRegionManager *regionManager = _extensions->getHeap()->getHeapRegionManager();
	GC_HeapRegionIterator regionIterator(regionManager);
	MM_HeapRegionDescriptor *region = NULL;

	while (NULL != (region = regionIterator.nextRegion())) {
		if (region->isCommitted()) {
			/* TODO:  this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */
			uintptr_t *heapChunkBase = (uintptr_t *)region->getLowAddress();  /* Heap chunk base pointer */
			uintptr_t *regionHighAddress = (uintptr_t *)region->getHighAddress();

			while (heapChunkBase < regionHighAddress) {
				void *poolHighAddr;
				uintptr_t *heapChunkTop;
				MM_MemoryPool *pool;

				chunk = sectioningIterator.nextChunk();
				Assert_MM_true(chunk != NULL);  /* Should never return NULL */
				totalChunkCount += 1;

				/* Clear all data in the chunk (including sweep implementation specific information) */
				chunk->clear();

				if(((uintptr_t)regionHighAddress - (uintptr_t)heapChunkBase) < _extensions->parSweepChunkSize) {
					/* corner case - we will wrap our address range */
					heapChunkTop = regionHighAddress;
				} else {
					/* normal case - just increment by the chunk size */
					heapChunkTop = (uintptr_t *)((uintptr_t)heapChunkBase + _extensions->parSweepChunkSize);
				}

				/* Find out if the range of memory we are considering spans 2 different pools.  If it does,
				 * the current chunk can only be attributed to one, so we limit the upper range of the chunk
				 * to the first pool and will continue the assignment at the upper address range.
				 */
				pool = region->getSubSpace()->getMemoryPool(env, heapChunkBase, heapChunkTop, poolHighAddr);
				if (NULL == poolHighAddr) {
					heapChunkTop = (heapChunkTop > regionHighAddress ? regionHighAddress : heapChunkTop);
				} else {
					/* Yes ..so adjust chunk boundaries */
					assume0(poolHighAddr > heapChunkBase && poolHighAddr < heapChunkTop);
					heapChunkTop = (uintptr_t *) poolHighAddr;
				}

				/* All values for the chunk have been calculated - assign them */
				chunk->chunkBase = (void *)heapChunkBase;
				chunk->chunkTop = (void *)heapChunkTop;
				chunk->memoryPool = pool;
				chunk->_coalesceCandidate = (heapChunkBase != region->getLowAddress());
				chunk->_previous= previousChunk;
				if(NULL != previousChunk) {
					previousChunk->_next = chunk;
				}

				/* Move to the next chunk */
				heapChunkBase = heapChunkTop;

				/* and remember address of previous chunk */
				previousChunk = chunk;

				assume0((uintptr_t)heapChunkBase == MM_Math::roundToCeiling(_extensions->heapAlignment,(uintptr_t)heapChunkBase));
			}
		}
	}

	if(NULL != previousChunk) {
		previousChunk->_next = NULL;
	}

	return totalChunkCount;
}