void MM_MarkMap::initializeMarkMap(MM_EnvironmentBase *env) { /* TODO: The multiplier should really be some constant defined globally */ const uintptr_t MODRON_PARALLEL_MULTIPLIER = 32; uintptr_t heapAlignment = _extensions->heapAlignment; /* Determine the size of heap that a work unit of mark map clearing corresponds to */ uintptr_t heapClearUnitFactor = env->_currentTask->getThreadCount(); heapClearUnitFactor = ((heapClearUnitFactor == 1) ? 1 : heapClearUnitFactor * MODRON_PARALLEL_MULTIPLIER); uintptr_t heapClearUnitSize = _extensions->heap->getMemorySize() / heapClearUnitFactor; heapClearUnitSize = MM_Math::roundToCeiling(heapAlignment, heapClearUnitSize); /* Walk all object segments to determine what ranges of the mark map should be cleared */ MM_HeapRegionDescriptor *region; MM_Heap *heap = _extensions->getHeap(); MM_HeapRegionManager *regionManager = heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); while(NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* Walk the segment in chunks the size of the heapClearUnit size, checking if the corresponding mark map * range should be cleared. */ uint8_t* heapClearAddress = (uint8_t*)region->getLowAddress(); uintptr_t heapClearSizeRemaining = region->getSize(); while(0 != heapClearSizeRemaining) { /* Calculate the size of heap that is to be processed */ uintptr_t heapCurrentClearSize = (heapClearUnitSize > heapClearSizeRemaining) ? heapClearSizeRemaining : heapClearUnitSize; Assert_MM_true(heapCurrentClearSize > 0); /* Check if the thread should clear the corresponding mark map range for the current heap range */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { /* Convert the heap address/size to its corresponding mark map address/size */ /* NOTE: We calculate the low and high heap offsets, and build the mark map index and size values * from these to avoid rounding errors (if we use the size, the conversion routine could get a different * rounding result then the actual end address) */ uintptr_t heapClearOffset = ((uintptr_t)heapClearAddress) - _heapMapBaseDelta; uintptr_t heapMapClearIndex = convertHeapIndexToHeapMapIndex(env, heapClearOffset, sizeof(uintptr_t)); uintptr_t heapMapClearSize = convertHeapIndexToHeapMapIndex(env, heapClearOffset + heapCurrentClearSize, sizeof(uintptr_t)) - heapMapClearIndex; /* And clear the mark map */ OMRZeroMemory((void *) (((uintptr_t)_heapMapBits) + heapMapClearIndex), heapMapClearSize); } /* Move to the next address range in the segment */ heapClearAddress += heapCurrentClearSize; heapClearSizeRemaining -= heapCurrentClearSize; } } } }
/** * Sweep all chunks. * * @param totalChunkCount total number of chunks to be swept */ void MM_ParallelSweepScheme::sweepAllChunks(MM_EnvironmentBase *env, uintptr_t totalChunkCount) { #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) uintptr_t chunksProcessed = 0; /* Chunks processed by this thread */ #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ MM_ParallelSweepChunk *chunk = NULL; MM_ParallelSweepChunk *prevChunk = NULL; MM_SweepHeapSectioningIterator sectioningIterator(_sweepHeapSectioning); for (uintptr_t chunkNum = 0; chunkNum < totalChunkCount; chunkNum++) { chunk = sectioningIterator.nextChunk(); Assert_MM_true (chunk != NULL); /* Should never return NULL */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) chunksProcessed += 1; #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ /* if we are changing memory pool, flush the thread local stats to appropriate (previous) pool */ if ((NULL != prevChunk) && (prevChunk->memoryPool != chunk->memoryPool)) { prevChunk->memoryPool->getLargeObjectAllocateStats()->getFreeEntrySizeClassStats()->mergeLocked(&env->_freeEntrySizeClassStats); } /* if we are starting or changing memory pool, setup frequent allocation sizes in free entry stats for the pool we are about to sweep */ if ((NULL == prevChunk) || (prevChunk->memoryPool != chunk->memoryPool)) { MM_MemoryPool *topLevelMemoryPool = chunk->memoryPool->getParent(); if (NULL == topLevelMemoryPool) { topLevelMemoryPool = chunk->memoryPool; } env->_freeEntrySizeClassStats.initializeFrequentAllocation(topLevelMemoryPool->getLargeObjectAllocateStats()); } /* Sweep the chunk */ sweepChunk(env, chunk); prevChunk = chunk; } } #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) env->_sweepStats.sweepChunksProcessed = chunksProcessed; env->_sweepStats.sweepChunksTotal = totalChunkCount; #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ /* flush the remaining stats (since the the last pool switch) */ if (NULL != prevChunk) { prevChunk->memoryPool->getLargeObjectAllocateStats()->getFreeEntrySizeClassStats()->mergeLocked(&env->_freeEntrySizeClassStats); } }
void MM_HeapWalker::rememberedObjectSlotsDo(MM_EnvironmentBase *env, MM_HeapWalkerSlotFunc function, void *userData, uintptr_t walkFlags, bool parallel) { SlotObjectDoUserData slotObjectDoUserData = { function, userData, walkFlags }; omrobjectptr_t* slotPtr = NULL; MM_SublistPuddle *puddle = NULL; OMR_VMThread *omrVMThread = env->getOmrVMThread(); GC_SublistIterator remSetIterator(&(env->getExtensions()->rememberedSet)); while ((puddle = remSetIterator.nextList()) != NULL) { if (!parallel || J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { GC_SublistSlotIterator remSetSlotIterator(puddle); while ((slotPtr = (omrobjectptr_t*)remSetSlotIterator.nextSlot()) != NULL) { if (*slotPtr != NULL) { heapWalkerObjectSlotDo(omrVMThread, NULL, *slotPtr, &slotObjectDoUserData); } } } } }
/** * Prepare a chunk of the card table for card cleaning. * * This function is passed the start and end of a chunk of the card table which needs preparing * for card cleaning; be it concurrent or final card cleaning. As the chunk may contain * holes (non-contiguous heap) we use the _cleaningRanges array to determine which cards within * the chunk actually need processing, ie a chunk can span part of one or more cleaning ranges. * The "action" passed to this function defines what we do to each unclean card within the chunk. * If action is MARK_DIRTY_CARD_SAFE we modify all cards with value CARD_DIRTY to CARD_CLEAN_SAFE; * for action MARK_SAFE_CARD_DIRTY we reset any cards marked as CARD_CLEAN_SAFE to CARD_DIRTY. * * @param chunkStart - first card to be processed * @param chunkEnd - last card to be processed * @param action - defines what to do to each un-clean card, value is either MARK_DIRTY_CARD_SAFE or * MARK_SAFE_CARD_DIRTY. */ void MM_ConcurrentCardTableForWC::prepareCardTableChunk(MM_EnvironmentBase *env, Card *chunkStart, Card *chunkEnd, CardAction action) { uintptr_t prepareUnitFactor, prepareUnitSize; /* Determine the size of card table work unit */ prepareUnitFactor = env->_currentTask->getThreadCount(); prepareUnitFactor = ((prepareUnitFactor == 1) ? 1 : prepareUnitFactor * PREPARE_PARALLEL_MULTIPLIER); prepareUnitSize = countCardsInRange(env, chunkStart, chunkEnd); prepareUnitSize = prepareUnitSize / prepareUnitFactor; prepareUnitSize = prepareUnitSize > 0 ? MM_Math::roundToCeiling(PREPARE_UNIT_SIZE_ALIGNMENT, prepareUnitSize) : PREPARE_UNIT_SIZE_ALIGNMENT; /* Walk all card cleaning ranges to determine which cards should be prepared */ for (CleaningRange *range=_cleaningRanges; range < _lastCleaningRange; range++) { Card *prepareAddress; uintptr_t prepareSizeRemaining; uintptr_t currentPrepareSize; /* Is this range strictly before the chunk we are looking for? */ if (chunkStart >= range->topCard){ /* Yes, so just skip to the next */ continue; } /* Is this range strictly after the chunk we are looking for? */ if (chunkEnd <= range->baseCard){ /* Yes, so our work is done */ break; } /* Walk the segment in chunks the size of the heapPrepareUnit size */ prepareAddress = chunkStart > range->baseCard ? chunkStart : range->baseCard; prepareSizeRemaining = chunkEnd > range->topCard ? range->topCard - prepareAddress : chunkEnd - prepareAddress; while(prepareSizeRemaining > 0 ) { /* Calculate the size of card table chunk to be processed next */ currentPrepareSize = (prepareUnitSize > prepareSizeRemaining) ? prepareSizeRemaining : prepareUnitSize; /* Check if the thread should clear the corresponding mark map range for the current heap range */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { Card *firstCard,*endCard; firstCard = prepareAddress; endCard = prepareAddress + currentPrepareSize; for (Card *currentCard = firstCard; currentCard < endCard; currentCard++) { /* Are we are on an uintptr_t boundary ?. If so scan the card table a uintptr_t * at a time until we find a slot which is non-zero or the end of card table * found. This is based on the premise that the card table will be mostly * empty and scanning an uintptr_t at a time will reduce the time taken to * scan the card table. */ if (((Card)CARD_CLEAN == *currentCard) && ((uintptr_t)currentCard % sizeof(uintptr_t) == 0)) { uintptr_t *nextSlot= (uintptr_t *)currentCard; while ( ((Card *)nextSlot < endCard) && (*nextSlot == SLOT_ALL_CLEAN) ) { nextSlot++; } /* * Either end of scan or a slot which contains a dirty card found. Reset scan ptr */ currentCard= (Card *)nextSlot; /* End of card table reached ? */ if (currentCard >= endCard) { break; } } if (MARK_DIRTY_CARD_SAFE == action) { /* Is next card dirty ? If so flag it as safe to clean */ if ((Card)CARD_DIRTY == *currentCard) { /* If card has marked objects we need to clean it */ if (cardHasMarkedObjects(env, currentCard)) { *currentCard = (Card)CARD_CLEAN_SAFE; } else { *currentCard = (Card)CARD_CLEAN; } } } else { assume0(action == MARK_SAFE_CARD_DIRTY); if ((Card)CARD_CLEAN_SAFE == *currentCard) { *currentCard = (Card)CARD_DIRTY; } } } } /* of J9MODRON_HANDLE_NEXT_WORK_UNIT */ /* Move to the next address range in the segment */ prepareAddress += currentPrepareSize; prepareSizeRemaining -= currentPrepareSize; } } }