void MM_MarkMap::initializeMarkMap(MM_EnvironmentBase *env) { /* TODO: The multiplier should really be some constant defined globally */ const uintptr_t MODRON_PARALLEL_MULTIPLIER = 32; uintptr_t heapAlignment = _extensions->heapAlignment; /* Determine the size of heap that a work unit of mark map clearing corresponds to */ uintptr_t heapClearUnitFactor = env->_currentTask->getThreadCount(); heapClearUnitFactor = ((heapClearUnitFactor == 1) ? 1 : heapClearUnitFactor * MODRON_PARALLEL_MULTIPLIER); uintptr_t heapClearUnitSize = _extensions->heap->getMemorySize() / heapClearUnitFactor; heapClearUnitSize = MM_Math::roundToCeiling(heapAlignment, heapClearUnitSize); /* Walk all object segments to determine what ranges of the mark map should be cleared */ MM_HeapRegionDescriptor *region; MM_Heap *heap = _extensions->getHeap(); MM_HeapRegionManager *regionManager = heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); while(NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* Walk the segment in chunks the size of the heapClearUnit size, checking if the corresponding mark map * range should be cleared. */ uint8_t* heapClearAddress = (uint8_t*)region->getLowAddress(); uintptr_t heapClearSizeRemaining = region->getSize(); while(0 != heapClearSizeRemaining) { /* Calculate the size of heap that is to be processed */ uintptr_t heapCurrentClearSize = (heapClearUnitSize > heapClearSizeRemaining) ? heapClearSizeRemaining : heapClearUnitSize; Assert_MM_true(heapCurrentClearSize > 0); /* Check if the thread should clear the corresponding mark map range for the current heap range */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { /* Convert the heap address/size to its corresponding mark map address/size */ /* NOTE: We calculate the low and high heap offsets, and build the mark map index and size values * from these to avoid rounding errors (if we use the size, the conversion routine could get a different * rounding result then the actual end address) */ uintptr_t heapClearOffset = ((uintptr_t)heapClearAddress) - _heapMapBaseDelta; uintptr_t heapMapClearIndex = convertHeapIndexToHeapMapIndex(env, heapClearOffset, sizeof(uintptr_t)); uintptr_t heapMapClearSize = convertHeapIndexToHeapMapIndex(env, heapClearOffset + heapCurrentClearSize, sizeof(uintptr_t)) - heapMapClearIndex; /* And clear the mark map */ OMRZeroMemory((void *) (((uintptr_t)_heapMapBits) + heapMapClearIndex), heapMapClearSize); } /* Move to the next address range in the segment */ heapClearAddress += heapCurrentClearSize; heapClearSizeRemaining -= heapCurrentClearSize; } } } }
/** * Reset and reassign each chunk to a range of heap memory. * Given the current updated listed of chunks and the corresponding heap memory, walk the chunk * list reassigning each chunk to an appropriate range of memory. This will clear each chunk * structure and then assign its basic values that connect it to a range of memory (base/top, * pool, segment, etc). * @return the total number of chunks in the system. */ uintptr_t MM_SweepHeapSectioningSegmented::reassignChunks(MM_EnvironmentBase *env) { MM_ParallelSweepChunk *chunk; /* Sweep table chunk (global) */ MM_ParallelSweepChunk *previousChunk; uintptr_t totalChunkCount; /* Total chunks in system */ MM_SweepHeapSectioningIterator sectioningIterator(this); totalChunkCount = 0; previousChunk = NULL; MM_HeapRegionManager *regionManager = _extensions->getHeap()->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); MM_HeapRegionDescriptor *region = NULL; while (NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* TODO: this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */ uintptr_t *heapChunkBase = (uintptr_t *)region->getLowAddress(); /* Heap chunk base pointer */ uintptr_t *regionHighAddress = (uintptr_t *)region->getHighAddress(); while (heapChunkBase < regionHighAddress) { void *poolHighAddr; uintptr_t *heapChunkTop; MM_MemoryPool *pool; chunk = sectioningIterator.nextChunk(); Assert_MM_true(chunk != NULL); /* Should never return NULL */ totalChunkCount += 1; /* Clear all data in the chunk (including sweep implementation specific information) */ chunk->clear(); if(((uintptr_t)regionHighAddress - (uintptr_t)heapChunkBase) < _extensions->parSweepChunkSize) { /* corner case - we will wrap our address range */ heapChunkTop = regionHighAddress; } else { /* normal case - just increment by the chunk size */ heapChunkTop = (uintptr_t *)((uintptr_t)heapChunkBase + _extensions->parSweepChunkSize); } /* Find out if the range of memory we are considering spans 2 different pools. If it does, * the current chunk can only be attributed to one, so we limit the upper range of the chunk * to the first pool and will continue the assignment at the upper address range. */ pool = region->getSubSpace()->getMemoryPool(env, heapChunkBase, heapChunkTop, poolHighAddr); if (NULL == poolHighAddr) { heapChunkTop = (heapChunkTop > regionHighAddress ? regionHighAddress : heapChunkTop); } else { /* Yes ..so adjust chunk boundaries */ assume0(poolHighAddr > heapChunkBase && poolHighAddr < heapChunkTop); heapChunkTop = (uintptr_t *) poolHighAddr; } /* All values for the chunk have been calculated - assign them */ chunk->chunkBase = (void *)heapChunkBase; chunk->chunkTop = (void *)heapChunkTop; chunk->memoryPool = pool; chunk->_coalesceCandidate = (heapChunkBase != region->getLowAddress()); chunk->_previous= previousChunk; if(NULL != previousChunk) { previousChunk->_next = chunk; } /* Move to the next chunk */ heapChunkBase = heapChunkTop; /* and remember address of previous chunk */ previousChunk = chunk; assume0((uintptr_t)heapChunkBase == MM_Math::roundToCeiling(_extensions->heapAlignment,(uintptr_t)heapChunkBase)); } } } if(NULL != previousChunk) { previousChunk->_next = NULL; } return totalChunkCount; }