/** * Walk all objects in the heap in a single threaded linear fashion. */ void MM_HeapWalker::allObjectsDo(MM_EnvironmentBase *env, MM_HeapWalkerObjectFunc function, void *userData, uintptr_t walkFlags, bool parallel, bool prepareHeapForWalk) { uintptr_t typeFlags = 0; GC_OMRVMInterface::flushCachesForWalk(env->getOmrVM()); if (walkFlags & J9_MU_WALK_NEW_AND_REMEMBERED_ONLY) { typeFlags |= MEMORY_TYPE_NEW; } MM_GCExtensionsBase *extensions = env->getExtensions(); MM_HeapRegionManager *regionManager = extensions->heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); MM_HeapRegionDescriptor *region = NULL; OMR_VMThread *omrVMThread = env->getOmrVMThread(); while (NULL != (region = regionIterator.nextRegion())) { if (typeFlags == (region->getTypeFlags() & typeFlags)) { /* Optimization to avoid virtual dispatch for every slot in the system */ omrobjectptr_t object = NULL; GC_ObjectHeapIteratorAddressOrderedList liveObjectIterator(extensions, region, false); while (NULL != (object = liveObjectIterator.nextObject())) { function(omrVMThread, region, object, userData); } } } }
/** * Walk all segments and calculate the maximum number of chunks needed to represent the current heap. * The chunk calculation is done on a per segment basis (no segment can represent memory from more than 1 chunk), * and partial sized chunks (ie: less than the chunk size) are reserved for any remaining space at the end of a * segment. * @return number of chunks required to represent the current heap memory. */ uintptr_t MM_SweepHeapSectioningSegmented::calculateActualChunkNumbers() const { uintptr_t totalChunkCount = 0; MM_HeapRegionDescriptor *region; MM_Heap *heap = _extensions->heap; MM_HeapRegionManager *regionManager = heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); while((region = regionIterator.nextRegion()) != NULL) { if ((region)->isCommitted()) { /* TODO: this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */ MM_MemorySubSpace *subspace = region->getSubSpace(); /* if this is a committed region, it requires a non-NULL subspace */ Assert_MM_true(NULL != subspace); uintptr_t poolCount = subspace->getMemoryPoolCount(); totalChunkCount += MM_Math::roundToCeiling(_extensions->parSweepChunkSize, region->getSize()) / _extensions->parSweepChunkSize; /* Add extra chunks if more than one memory pool */ totalChunkCount += (poolCount - 1); } } return totalChunkCount; }
void MM_MarkMap::initializeMarkMap(MM_EnvironmentBase *env) { /* TODO: The multiplier should really be some constant defined globally */ const uintptr_t MODRON_PARALLEL_MULTIPLIER = 32; uintptr_t heapAlignment = _extensions->heapAlignment; /* Determine the size of heap that a work unit of mark map clearing corresponds to */ uintptr_t heapClearUnitFactor = env->_currentTask->getThreadCount(); heapClearUnitFactor = ((heapClearUnitFactor == 1) ? 1 : heapClearUnitFactor * MODRON_PARALLEL_MULTIPLIER); uintptr_t heapClearUnitSize = _extensions->heap->getMemorySize() / heapClearUnitFactor; heapClearUnitSize = MM_Math::roundToCeiling(heapAlignment, heapClearUnitSize); /* Walk all object segments to determine what ranges of the mark map should be cleared */ MM_HeapRegionDescriptor *region; MM_Heap *heap = _extensions->getHeap(); MM_HeapRegionManager *regionManager = heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); while(NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* Walk the segment in chunks the size of the heapClearUnit size, checking if the corresponding mark map * range should be cleared. */ uint8_t* heapClearAddress = (uint8_t*)region->getLowAddress(); uintptr_t heapClearSizeRemaining = region->getSize(); while(0 != heapClearSizeRemaining) { /* Calculate the size of heap that is to be processed */ uintptr_t heapCurrentClearSize = (heapClearUnitSize > heapClearSizeRemaining) ? heapClearSizeRemaining : heapClearUnitSize; Assert_MM_true(heapCurrentClearSize > 0); /* Check if the thread should clear the corresponding mark map range for the current heap range */ if(J9MODRON_HANDLE_NEXT_WORK_UNIT(env)) { /* Convert the heap address/size to its corresponding mark map address/size */ /* NOTE: We calculate the low and high heap offsets, and build the mark map index and size values * from these to avoid rounding errors (if we use the size, the conversion routine could get a different * rounding result then the actual end address) */ uintptr_t heapClearOffset = ((uintptr_t)heapClearAddress) - _heapMapBaseDelta; uintptr_t heapMapClearIndex = convertHeapIndexToHeapMapIndex(env, heapClearOffset, sizeof(uintptr_t)); uintptr_t heapMapClearSize = convertHeapIndexToHeapMapIndex(env, heapClearOffset + heapCurrentClearSize, sizeof(uintptr_t)) - heapMapClearIndex; /* And clear the mark map */ OMRZeroMemory((void *) (((uintptr_t)_heapMapBits) + heapMapClearIndex), heapMapClearSize); } /* Move to the next address range in the segment */ heapClearAddress += heapCurrentClearSize; heapClearSizeRemaining -= heapCurrentClearSize; } } } }
void * MM_HeapRegionManagerTarok::findHighestValidAddressBelow(MM_HeapRegionDescriptor *targetRegion) { void *lowValidAddress = _lowTableEdge; uintptr_t targetIndex = mapDescriptorToRegionTableIndex(targetRegion); uintptr_t cursorIndex = 0; while (cursorIndex < targetIndex) { MM_HeapRegionDescriptor *cursorRegion = mapRegionTableIndexToDescriptor(cursorIndex); if (cursorRegion->_isAllocated) { lowValidAddress = cursorRegion->getHighAddress(); } cursorIndex++; } return lowValidAddress; }
void * MM_HeapRegionManagerTarok::findLowestValidAddressAbove(MM_HeapRegionDescriptor *targetRegion) { void *highValidAddress = _highTableEdge; uintptr_t targetIndex = mapDescriptorToRegionTableIndex(targetRegion); uintptr_t cursorIndex = targetIndex + 1; while (cursorIndex < _tableRegionCount) { MM_HeapRegionDescriptor *cursorRegion = mapRegionTableIndexToDescriptor(cursorIndex); if (cursorRegion->_isAllocated) { highValidAddress = cursorRegion->getLowAddress(); break; } cursorIndex++; } return highValidAddress; }
MM_HeapRegionDescriptor * MM_HeapRegionManagerTarok::internalAcquireSingleTableRegion(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, uintptr_t freeListIndex) { Assert_MM_true(NULL != _freeRegionTable[freeListIndex]); /*since we only need one region, always return the first free region */ MM_HeapRegionDescriptor *toReturn = _freeRegionTable[freeListIndex]; _freeRegionTable[freeListIndex] = toReturn->_nextInSet; toReturn->_nextInSet = NULL; toReturn->_isAllocated = true; toReturn->associateWithSubSpace(subSpace); _totalHeapSize += toReturn->getSize(); return toReturn; }
void MM_HeapRegionManagerTarok::setNodeAndLinkRegions(MM_EnvironmentBase *env, void *lowHeapEdge, void *highHeapEdge, uintptr_t numaNode) { uintptr_t regionCount = 0; MM_HeapRegionDescriptor *firstRegion = NULL; Trc_MM_HeapRegionManager_enableRegionsInTable_Entry(env->getLanguageVMThread(), lowHeapEdge, highHeapEdge, numaNode); if (highHeapEdge > lowHeapEdge) { for (uint8_t* address = (uint8_t*)lowHeapEdge; address < highHeapEdge; address += getRegionSize()) { MM_HeapRegionDescriptor *region = tableDescriptorForAddress(address); region->setNumaNode(numaNode); regionCount += 1; } firstRegion = tableDescriptorForAddress(lowHeapEdge); firstRegion->_nextInSet = _freeRegionTable[numaNode]; _freeRegionTable[numaNode] = firstRegion; internalLinkRegions(env, firstRegion, regionCount); } Trc_MM_HeapRegionManager_enableRegionsInTable_Exit(env->getLanguageVMThread(), regionCount, firstRegion, numaNode); }
void MM_ConcurrentOverflow::handleOverflow(MM_EnvironmentBase *env) { MM_EnvironmentStandard *envStandard = MM_EnvironmentStandard::getEnvironment(env); if (envStandard->_currentTask->synchronizeGCThreadsAndReleaseMaster(env, UNIQUE_ID)) { _overflow = false; envStandard->_currentTask->releaseSynchronizedGCThreads(envStandard); } MM_Heap *heap = _extensions->heap; MM_HeapRegionManager *regionManager = heap->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); MM_HeapRegionDescriptor *region = NULL; MM_ConcurrentGC *collector = (MM_ConcurrentGC *)_extensions->getGlobalCollector(); MM_CardCleanerForMarking cardCleanerForMarking(collector->getMarkingScheme()); MM_ConcurrentCardTable *cardTable = collector->getCardTable(); while((region = regionIterator.nextRegion()) != NULL) { cardTable->cleanCardTableForRange(envStandard, &cardCleanerForMarking, region->getLowAddress(), region->getHighAddress()); } envStandard->_currentTask->synchronizeGCThreads(env, UNIQUE_ID); }
/** * Reset and reassign each chunk to a range of heap memory. * Given the current updated listed of chunks and the corresponding heap memory, walk the chunk * list reassigning each chunk to an appropriate range of memory. This will clear each chunk * structure and then assign its basic values that connect it to a range of memory (base/top, * pool, segment, etc). * @return the total number of chunks in the system. */ uintptr_t MM_SweepHeapSectioningSegmented::reassignChunks(MM_EnvironmentBase *env) { MM_ParallelSweepChunk *chunk; /* Sweep table chunk (global) */ MM_ParallelSweepChunk *previousChunk; uintptr_t totalChunkCount; /* Total chunks in system */ MM_SweepHeapSectioningIterator sectioningIterator(this); totalChunkCount = 0; previousChunk = NULL; MM_HeapRegionManager *regionManager = _extensions->getHeap()->getHeapRegionManager(); GC_HeapRegionIterator regionIterator(regionManager); MM_HeapRegionDescriptor *region = NULL; while (NULL != (region = regionIterator.nextRegion())) { if (region->isCommitted()) { /* TODO: this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */ uintptr_t *heapChunkBase = (uintptr_t *)region->getLowAddress(); /* Heap chunk base pointer */ uintptr_t *regionHighAddress = (uintptr_t *)region->getHighAddress(); while (heapChunkBase < regionHighAddress) { void *poolHighAddr; uintptr_t *heapChunkTop; MM_MemoryPool *pool; chunk = sectioningIterator.nextChunk(); Assert_MM_true(chunk != NULL); /* Should never return NULL */ totalChunkCount += 1; /* Clear all data in the chunk (including sweep implementation specific information) */ chunk->clear(); if(((uintptr_t)regionHighAddress - (uintptr_t)heapChunkBase) < _extensions->parSweepChunkSize) { /* corner case - we will wrap our address range */ heapChunkTop = regionHighAddress; } else { /* normal case - just increment by the chunk size */ heapChunkTop = (uintptr_t *)((uintptr_t)heapChunkBase + _extensions->parSweepChunkSize); } /* Find out if the range of memory we are considering spans 2 different pools. If it does, * the current chunk can only be attributed to one, so we limit the upper range of the chunk * to the first pool and will continue the assignment at the upper address range. */ pool = region->getSubSpace()->getMemoryPool(env, heapChunkBase, heapChunkTop, poolHighAddr); if (NULL == poolHighAddr) { heapChunkTop = (heapChunkTop > regionHighAddress ? regionHighAddress : heapChunkTop); } else { /* Yes ..so adjust chunk boundaries */ assume0(poolHighAddr > heapChunkBase && poolHighAddr < heapChunkTop); heapChunkTop = (uintptr_t *) poolHighAddr; } /* All values for the chunk have been calculated - assign them */ chunk->chunkBase = (void *)heapChunkBase; chunk->chunkTop = (void *)heapChunkTop; chunk->memoryPool = pool; chunk->_coalesceCandidate = (heapChunkBase != region->getLowAddress()); chunk->_previous= previousChunk; if(NULL != previousChunk) { previousChunk->_next = chunk; } /* Move to the next chunk */ heapChunkBase = heapChunkTop; /* and remember address of previous chunk */ previousChunk = chunk; assume0((uintptr_t)heapChunkBase == MM_Math::roundToCeiling(_extensions->heapAlignment,(uintptr_t)heapChunkBase)); } } } if(NULL != previousChunk) { previousChunk->_next = NULL; } return totalChunkCount; }