/** * Sweep all chunks. * * @param totalChunkCount total number of chunks to be swept */ void MM_ParallelSweepScheme::sweepAllChunks(MM_EnvironmentBase *env, uintptr_t totalChunkCount) { #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) uintptr_t chunksProcessed = 0; /* Chunks processed by this thread */ #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ MM_ParallelSweepChunk *chunk = NULL; MM_ParallelSweepChunk *prevChunk = NULL; MM_SweepHeapSectioningIterator sectioningIterator(_sweepHeapSectioning); for (uintptr_t chunkNum = 0; chunkNum < totalChunkCount; chunkNum++) { chunk = sectioningIterator.nextChunk(); Assert_MM_true (chunk != NULL); /* Should never return NULL */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) chunksProcessed += 1; #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ /* if we are changing memory pool, flush the thread local stats to appropriate (previous) pool */ if ((NULL != prevChunk) && (prevChunk->memoryPool != chunk->memoryPool)) { prevChunk->memoryPool->getLargeObjectAllocateStats()->getFreeEntrySizeClassStats()->mergeLocked(&env->_freeEntrySizeClassStats); } /* if we are starting or changing memory pool, setup frequent allocation sizes in free entry stats for the pool we are about to sweep */ if ((NULL == prevChunk) || (prevChunk->memoryPool != chunk->memoryPool)) { MM_MemoryPool *topLevelMemoryPool = chunk->memoryPool->getParent(); if (NULL == topLevelMemoryPool) { topLevelMemoryPool = chunk->memoryPool; } env->_freeEntrySizeClassStats.initializeFrequentAllocation(topLevelMemoryPool->getLargeObjectAllocateStats()); } /* Sweep the chunk */ sweepChunk(env, chunk); prevChunk = chunk; } } #if defined(J9MODRON_TGC_PARALLEL_STATISTICS) env->_sweepStats.sweepChunksProcessed = chunksProcessed; env->_sweepStats.sweepChunksTotal = totalChunkCount; #endif /* J9MODRON_TGC_PARALLEL_STATISTICS */ /* flush the remaining stats (since the the last pool switch) */ if (NULL != prevChunk) { prevChunk->memoryPool->getLargeObjectAllocateStats()->getFreeEntrySizeClassStats()->mergeLocked(&env->_freeEntrySizeClassStats); } }
void MM_ParallelSweepScheme::connectAllChunks(MM_EnvironmentBase *env, uintptr_t totalChunkCount) { /* Initialize all sweep states for sweeping */ initializeSweepStates(env); /* Walk the sweep chunk table connecting free lists */ MM_ParallelSweepChunk *sweepChunk; MM_SweepHeapSectioningIterator sectioningIterator(_sweepHeapSectioning); for (uintptr_t chunkNum = 0; chunkNum < totalChunkCount; chunkNum++) { sweepChunk = sectioningIterator.nextChunk(); Assert_MM_true(sweepChunk != NULL); /* Should never return NULL */ connectChunk(env, sweepChunk); } /* Walk all memory spaces flushing the previous free entry */ flushAllFinalChunks(env); }
/** * Reset and reassign each chunk to a range of heap memory. * Given the current updated listed of chunks and the corresponding heap memory, walk the chunk * list reassigning each chunk to an appropriate range of memory. This will clear each chunk * structure and then assign its basic values that connect it to a range of memory (base/top, * pool, segment, etc). * @return the total number of chunks in the system. */ uintptr_t MM_SweepHeapSectioningSegmented::reassignChunks(MM_EnvironmentBase *env) { MM_ParallelSweepChunk *chunk; /* Sweep table chunk (global) */ MM_ParallelSweepChunk *previousChunk; uintptr_t totalChunkCount; /* Total chunks in system */ MM_SweepHeapSectioningIterator sectioningIterator(this); totalChunkCount = 0; previousChunk = NULL; MM_HeapRegionManager *regionManager = _extensions->getHeap()->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); MM_HeapRegionDescriptor *region = NULL; while (NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* TODO: this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */ uintptr_t *heapChunkBase = (uintptr_t *)region->getLowAddress(); /* Heap chunk base pointer */ uintptr_t *regionHighAddress = (uintptr_t *)region->getHighAddress(); while (heapChunkBase < regionHighAddress) { void *poolHighAddr; uintptr_t *heapChunkTop; MM_MemoryPool *pool; chunk = sectioningIterator.nextChunk(); Assert_MM_true(chunk != NULL); /* Should never return NULL */ totalChunkCount += 1; /* Clear all data in the chunk (including sweep implementation specific information) */ chunk->clear(); if(((uintptr_t)regionHighAddress - (uintptr_t)heapChunkBase) < _extensions->parSweepChunkSize) { /* corner case - we will wrap our address range */ heapChunkTop = regionHighAddress; } else { /* normal case - just increment by the chunk size */ heapChunkTop = (uintptr_t *)((uintptr_t)heapChunkBase + _extensions->parSweepChunkSize); } /* Find out if the range of memory we are considering spans 2 different pools. If it does, * the current chunk can only be attributed to one, so we limit the upper range of the chunk * to the first pool and will continue the assignment at the upper address range. */ pool = region->getSubSpace()->getMemoryPool(env, heapChunkBase, heapChunkTop, poolHighAddr); if (NULL == poolHighAddr) { heapChunkTop = (heapChunkTop > regionHighAddress ? regionHighAddress : heapChunkTop); } else { /* Yes ..so adjust chunk boundaries */ assume0(poolHighAddr > heapChunkBase && poolHighAddr < heapChunkTop); heapChunkTop = (uintptr_t *) poolHighAddr; } /* All values for the chunk have been calculated - assign them */ chunk->chunkBase = (void *)heapChunkBase; chunk->chunkTop = (void *)heapChunkTop; chunk->memoryPool = pool; chunk->_coalesceCandidate = (heapChunkBase != region->getLowAddress()); chunk->_previous= previousChunk; if(NULL != previousChunk) { previousChunk->_next = chunk; } /* Move to the next chunk */ heapChunkBase = heapChunkTop; /* and remember address of previous chunk */ previousChunk = chunk; assume0((uintptr_t)heapChunkBase == MM_Math::roundToCeiling(_extensions->heapAlignment,(uintptr_t)heapChunkBase)); } } } if(NULL != previousChunk) { previousChunk->_next = NULL; } return totalChunkCount; }