void Heap::markRoots() { ASSERT(isValidThreadState(m_globalData)); if (m_operationInProgress != NoOperation) CRASH(); m_operationInProgress = Collection; void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_blocks); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); ConservativeRoots registerFileRoots(&m_blocks); registerFile().gatherConservativeRoots(registerFileRoots); clearMarks(); SlotVisitor& visitor = m_slotVisitor; HeapRootVisitor heapRootVisitor(visitor); visitor.append(machineThreadRoots); visitor.drain(); visitor.append(registerFileRoots); visitor.drain(); markProtectedObjects(heapRootVisitor); visitor.drain(); markTempSortVectors(heapRootVisitor); visitor.drain(); if (m_markListSet && m_markListSet->size()) MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); if (m_globalData->exception) heapRootVisitor.visit(&m_globalData->exception); visitor.drain(); m_handleHeap.visitStrongHandles(heapRootVisitor); visitor.drain(); m_handleStack.visit(heapRootVisitor); visitor.drain(); // Weak handles must be marked last, because their owners use the set of // opaque roots to determine reachability. int lastOpaqueRootCount; do { lastOpaqueRootCount = visitor.opaqueRootCount(); m_handleHeap.visitWeakHandles(heapRootVisitor); visitor.drain(); // If the set of opaque roots has grown, more weak handles may have become reachable. } while (lastOpaqueRootCount != visitor.opaqueRootCount()); visitor.reset(); m_operationInProgress = NoOperation; }
void Heap::markRoots(bool fullGC) { COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots); UNUSED_PARAM(fullGC); ASSERT(isValidThreadState(m_globalData)); if (m_operationInProgress != NoOperation) CRASH(); m_operationInProgress = Collection; void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks()); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); } ConservativeRoots registerFileRoots(&m_objectSpace.blocks()); m_jettisonedCodeBlocks.clearMarks(); { GCPHASE(GatherRegisterFileRoots); registerFile().gatherConservativeRoots(registerFileRoots, m_jettisonedCodeBlocks); } m_jettisonedCodeBlocks.deleteUnmarkedCodeBlocks(); #if ENABLE(GGC) MarkedBlock::DirtyCellVector dirtyCells; if (!fullGC) { GCPHASE(GatheringDirtyCells); m_objectSpace.gatherDirtyCells(dirtyCells); } else #endif { GCPHASE(clearMarks); clearMarks(); } SlotVisitor& visitor = m_slotVisitor; HeapRootVisitor heapRootVisitor(visitor); #if ENABLE(GGC) { size_t dirtyCellCount = dirtyCells.size(); GCPHASE(VisitDirtyCells); GCCOUNTER(DirtyCellCount, dirtyCellCount); for (size_t i = 0; i < dirtyCellCount; i++) { heapRootVisitor.visitChildren(dirtyCells[i]); visitor.drain(); } } #endif if (m_globalData->codeBlocksBeingCompiled.size()) { GCPHASE(VisitActiveCodeBlock); for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); } { GCPHASE(VisitMachineRoots); visitor.append(machineThreadRoots); visitor.drain(); } { GCPHASE(VisitRegisterFileRoots); visitor.append(registerFileRoots); visitor.drain(); } { GCPHASE(VisitProtectedObjects); markProtectedObjects(heapRootVisitor); visitor.drain(); } { GCPHASE(VisitTempSortVectors); markTempSortVectors(heapRootVisitor); visitor.drain(); } { GCPHASE(MarkingArgumentBuffers); if (m_markListSet && m_markListSet->size()) { MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); visitor.drain(); } } if (m_globalData->exception) { GCPHASE(MarkingException); heapRootVisitor.visit(&m_globalData->exception); visitor.drain(); } { GCPHASE(VisitStrongHandles); m_handleHeap.visitStrongHandles(heapRootVisitor); visitor.drain(); } { GCPHASE(HandleStack); m_handleStack.visit(heapRootVisitor); visitor.drain(); } { GCPHASE(TraceCodeBlocks); m_jettisonedCodeBlocks.traceCodeBlocks(visitor); visitor.drain(); } // Weak handles must be marked last, because their owners use the set of // opaque roots to determine reachability. { GCPHASE(VisitingWeakHandles); int lastOpaqueRootCount; do { lastOpaqueRootCount = visitor.opaqueRootCount(); m_handleHeap.visitWeakHandles(heapRootVisitor); visitor.drain(); // If the set of opaque roots has grown, more weak handles may have become reachable. } while (lastOpaqueRootCount != visitor.opaqueRootCount()); } GCCOUNTER(VisitedValueCount, visitor.visitCount()); visitor.reset(); m_operationInProgress = NoOperation; }
void Heap::markRoots() { SamplingRegion samplingRegion("Garbage Collection: Tracing"); GCPHASE(MarkRoots); ASSERT(isValidThreadState(m_vm)); #if ENABLE(OBJECT_MARK_LOGGING) double gcStartTime = WTF::monotonicallyIncreasingTime(); #endif void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); m_jitStubRoutines.clearMarks(); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); } ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace); m_codeBlocks.clearMarks(); { GCPHASE(GatherStackRoots); stack().gatherConservativeRoots(stackRoots, m_jitStubRoutines, m_codeBlocks); stack().sanitizeStack(); } #if ENABLE(DFG_JIT) ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace); { GCPHASE(GatherScratchBufferRoots); m_vm->gatherConservativeRoots(scratchBufferRoots); } #endif { GCPHASE(ClearLivenessData); m_objectSpace.clearNewlyAllocated(); m_objectSpace.clearMarks(); } m_sharedData.didStartMarking(); SlotVisitor& visitor = m_slotVisitor; visitor.setup(); HeapRootVisitor heapRootVisitor(visitor); { ParallelModeEnabler enabler(visitor); m_vm->smallStrings.visitStrongReferences(visitor); { GCPHASE(VisitMachineRoots); MARK_LOG_ROOT(visitor, "C++ Stack"); visitor.append(machineThreadRoots); visitor.donateAndDrain(); } { GCPHASE(VisitStackRoots); MARK_LOG_ROOT(visitor, "Stack"); visitor.append(stackRoots); visitor.donateAndDrain(); } #if ENABLE(DFG_JIT) { GCPHASE(VisitScratchBufferRoots); MARK_LOG_ROOT(visitor, "Scratch Buffers"); visitor.append(scratchBufferRoots); visitor.donateAndDrain(); } #endif { GCPHASE(VisitProtectedObjects); MARK_LOG_ROOT(visitor, "Protected Objects"); markProtectedObjects(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(VisitTempSortVectors); MARK_LOG_ROOT(visitor, "Temp Sort Vectors"); markTempSortVectors(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(MarkingArgumentBuffers); if (m_markListSet && m_markListSet->size()) { MARK_LOG_ROOT(visitor, "Argument Buffers"); MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); visitor.donateAndDrain(); } } if (m_vm->exception()) { GCPHASE(MarkingException); MARK_LOG_ROOT(visitor, "Exceptions"); heapRootVisitor.visit(m_vm->addressOfException()); visitor.donateAndDrain(); } { GCPHASE(VisitStrongHandles); MARK_LOG_ROOT(visitor, "Strong Handles"); m_handleSet.visitStrongHandles(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(HandleStack); MARK_LOG_ROOT(visitor, "Handle Stack"); m_handleStack.visit(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(TraceCodeBlocksAndJITStubRoutines); MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines"); m_codeBlocks.traceMarked(visitor); m_jitStubRoutines.traceMarkedStubRoutines(visitor); visitor.donateAndDrain(); } #if ENABLE(PARALLEL_GC) { GCPHASE(Convergence); visitor.drainFromShared(SlotVisitor::MasterDrain); } #endif } // Weak references must be marked last because their liveness depends on // the liveness of the rest of the object graph. { GCPHASE(VisitingLiveWeakHandles); MARK_LOG_ROOT(visitor, "Live Weak Handles"); while (true) { m_objectSpace.visitWeakSets(heapRootVisitor); harvestWeakReferences(); if (visitor.isEmpty()) break; { ParallelModeEnabler enabler(visitor); visitor.donateAndDrain(); #if ENABLE(PARALLEL_GC) visitor.drainFromShared(SlotVisitor::MasterDrain); #endif } } } GCCOUNTER(VisitedValueCount, visitor.visitCount()); m_sharedData.didFinishMarking(); #if ENABLE(OBJECT_MARK_LOGGING) size_t visitCount = visitor.visitCount(); #if ENABLE(PARALLEL_GC) visitCount += m_sharedData.childVisitCount(); #endif MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::monotonicallyIncreasingTime() - gcStartTime); #endif m_totalBytesVisited = visitor.bytesVisited(); m_totalBytesCopied = visitor.bytesCopied(); #if ENABLE(PARALLEL_GC) m_totalBytesVisited += m_sharedData.childBytesVisited(); m_totalBytesCopied += m_sharedData.childBytesCopied(); #endif visitor.reset(); #if ENABLE(PARALLEL_GC) m_sharedData.resetChildren(); #endif m_sharedData.reset(); }
void Heap::markRoots(bool fullGC) { SamplingRegion samplingRegion("Garbage Collection: Tracing"); COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots); UNUSED_PARAM(fullGC); ASSERT(isValidThreadState(m_globalData)); if (m_operationInProgress != NoOperation) CRASH(); m_operationInProgress = Collection; void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); } ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace); m_dfgCodeBlocks.clearMarks(); { GCPHASE(GatherRegisterFileRoots); registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks); } #if ENABLE(GGC) MarkedBlock::DirtyCellVector dirtyCells; if (!fullGC) { GCPHASE(GatheringDirtyCells); m_objectSpace.gatherDirtyCells(dirtyCells); } else #endif { GCPHASE(clearMarks); clearMarks(); } m_storageSpace.startedCopying(); SlotVisitor& visitor = m_slotVisitor; HeapRootVisitor heapRootVisitor(visitor); { ParallelModeEnabler enabler(visitor); #if ENABLE(GGC) { size_t dirtyCellCount = dirtyCells.size(); GCPHASE(VisitDirtyCells); GCCOUNTER(DirtyCellCount, dirtyCellCount); for (size_t i = 0; i < dirtyCellCount; i++) { heapRootVisitor.visitChildren(dirtyCells[i]); visitor.donateAndDrain(); } } #endif if (m_globalData->codeBlocksBeingCompiled.size()) { GCPHASE(VisitActiveCodeBlock); for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); } { GCPHASE(VisitMachineRoots); visitor.append(machineThreadRoots); visitor.donateAndDrain(); } { GCPHASE(VisitRegisterFileRoots); visitor.append(registerFileRoots); visitor.donateAndDrain(); } { GCPHASE(VisitProtectedObjects); markProtectedObjects(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(VisitTempSortVectors); markTempSortVectors(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(MarkingArgumentBuffers); if (m_markListSet && m_markListSet->size()) { MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); visitor.donateAndDrain(); } } if (m_globalData->exception) { GCPHASE(MarkingException); heapRootVisitor.visit(&m_globalData->exception); visitor.donateAndDrain(); } { GCPHASE(VisitStrongHandles); m_handleSet.visitStrongHandles(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(HandleStack); m_handleStack.visit(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(TraceCodeBlocks); m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor); visitor.donateAndDrain(); } #if ENABLE(PARALLEL_GC) { GCPHASE(Convergence); visitor.drainFromShared(SlotVisitor::MasterDrain); } #endif } // Weak references must be marked last because their liveness depends on // the liveness of the rest of the object graph. { GCPHASE(VisitingLiveWeakHandles); while (true) { m_weakSet.visitLiveWeakImpls(heapRootVisitor); harvestWeakReferences(); if (visitor.isEmpty()) break; { ParallelModeEnabler enabler(visitor); visitor.donateAndDrain(); #if ENABLE(PARALLEL_GC) visitor.drainFromShared(SlotVisitor::MasterDrain); #endif } } } { GCPHASE(VisitingDeadWeakHandles); m_weakSet.visitDeadWeakImpls(heapRootVisitor); } GCCOUNTER(VisitedValueCount, visitor.visitCount()); visitor.doneCopying(); visitor.reset(); m_sharedData.reset(); m_storageSpace.doneCopying(); m_operationInProgress = NoOperation; }
void Heap::markRoots(bool fullGC) { SamplingRegion samplingRegion("Garbage Collection: Tracing"); COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots); UNUSED_PARAM(fullGC); ASSERT(isValidThreadState(m_globalData)); #if ENABLE(OBJECT_MARK_LOGGING) double gcStartTime = WTF::currentTime(); #endif void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); m_jitStubRoutines.clearMarks(); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); } ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace); m_dfgCodeBlocks.clearMarks(); { GCPHASE(GatherStackRoots); stack().gatherConservativeRoots( stackRoots, m_jitStubRoutines, m_dfgCodeBlocks); } #if ENABLE(DFG_JIT) ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace); { GCPHASE(GatherScratchBufferRoots); m_globalData->gatherConservativeRoots(scratchBufferRoots); } #endif #if ENABLE(GGC) MarkedBlock::DirtyCellVector dirtyCells; if (!fullGC) { GCPHASE(GatheringDirtyCells); m_objectSpace.gatherDirtyCells(dirtyCells); } else #endif { GCPHASE(clearMarks); m_objectSpace.clearMarks(); } m_sharedData.didStartMarking(); SlotVisitor& visitor = m_slotVisitor; visitor.setup(); HeapRootVisitor heapRootVisitor(visitor); { ParallelModeEnabler enabler(visitor); #if ENABLE(GGC) { size_t dirtyCellCount = dirtyCells.size(); GCPHASE(VisitDirtyCells); GCCOUNTER(DirtyCellCount, dirtyCellCount); for (size_t i = 0; i < dirtyCellCount; i++) { heapRootVisitor.visitChildren(dirtyCells[i]); visitor.donateAndDrain(); } } #endif if (m_globalData->codeBlocksBeingCompiled.size()) { GCPHASE(VisitActiveCodeBlock); for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); } { GCPHASE(VisitMachineRoots); MARK_LOG_ROOT(visitor, "C++ Stack"); visitor.append(machineThreadRoots); visitor.donateAndDrain(); } { GCPHASE(VisitStackRoots); MARK_LOG_ROOT(visitor, "Stack"); visitor.append(stackRoots); visitor.donateAndDrain(); } #if ENABLE(DFG_JIT) { GCPHASE(VisitScratchBufferRoots); MARK_LOG_ROOT(visitor, "Scratch Buffers"); visitor.append(scratchBufferRoots); visitor.donateAndDrain(); } #endif { GCPHASE(VisitProtectedObjects); MARK_LOG_ROOT(visitor, "Protected Objects"); markProtectedObjects(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(VisitTempSortVectors); MARK_LOG_ROOT(visitor, "Temp Sort Vectors"); markTempSortVectors(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(MarkingArgumentBuffers); if (m_markListSet && m_markListSet->size()) { MARK_LOG_ROOT(visitor, "Argument Buffers"); MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); visitor.donateAndDrain(); } } if (m_globalData->exception) { GCPHASE(MarkingException); MARK_LOG_ROOT(visitor, "Exceptions"); heapRootVisitor.visit(&m_globalData->exception); visitor.donateAndDrain(); } { GCPHASE(VisitStrongHandles); MARK_LOG_ROOT(visitor, "Strong Handles"); m_handleSet.visitStrongHandles(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(HandleStack); MARK_LOG_ROOT(visitor, "Handle Stack"); m_handleStack.visit(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(TraceCodeBlocksAndJITStubRoutines); MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines"); m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor); m_jitStubRoutines.traceMarkedStubRoutines(visitor); visitor.donateAndDrain(); } #if ENABLE(PARALLEL_GC) { GCPHASE(Convergence); visitor.drainFromShared(SlotVisitor::MasterDrain); } #endif } // Weak references must be marked last because their liveness depends on // the liveness of the rest of the object graph. { GCPHASE(VisitingLiveWeakHandles); MARK_LOG_ROOT(visitor, "Live Weak Handles"); while (true) { m_objectSpace.visitWeakSets(heapRootVisitor); harvestWeakReferences(); if (visitor.isEmpty()) break; { ParallelModeEnabler enabler(visitor); visitor.donateAndDrain(); #if ENABLE(PARALLEL_GC) visitor.drainFromShared(SlotVisitor::MasterDrain); #endif } } } GCCOUNTER(VisitedValueCount, visitor.visitCount()); m_sharedData.didFinishMarking(); #if ENABLE(OBJECT_MARK_LOGGING) size_t visitCount = visitor.visitCount(); #if ENABLE(PARALLEL_GC) visitCount += m_sharedData.childVisitCount(); #endif MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime); #endif visitor.reset(); #if ENABLE(PARALLEL_GC) m_sharedData.resetChildren(); #endif m_sharedData.reset(); }
void Heap::markRoots() { #ifndef NDEBUG if (m_globalData->isSharedInstance()) { ASSERT(JSLock::lockCount() > 0); ASSERT(JSLock::currentThreadIsHoldingLock()); } #endif void* dummy; ASSERT(m_operationInProgress == NoOperation); if (m_operationInProgress != NoOperation) CRASH(); m_operationInProgress = Collection; MarkStack& markStack = m_markStack; HeapRootMarker heapRootMarker(markStack); // We gather conservative roots before clearing mark bits because // conservative gathering uses the mark bits from our last mark pass to // determine whether a reference is valid. ConservativeRoots machineThreadRoots(this); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); ConservativeRoots registerFileRoots(this); registerFile().gatherConservativeRoots(registerFileRoots); m_markedSpace.clearMarks(); markStack.append(machineThreadRoots); markStack.drain(); markStack.append(registerFileRoots); markStack.drain(); markProtectedObjects(heapRootMarker); markStack.drain(); markTempSortVectors(heapRootMarker); markStack.drain(); if (m_markListSet && m_markListSet->size()) MarkedArgumentBuffer::markLists(heapRootMarker, *m_markListSet); if (m_globalData->exception) heapRootMarker.mark(&m_globalData->exception); markStack.drain(); m_handleHeap.markStrongHandles(heapRootMarker); markStack.drain(); m_handleStack.mark(heapRootMarker); markStack.drain(); // Mark the small strings cache as late as possible, since it will clear // itself if nothing else has marked it. // FIXME: Change the small strings cache to use Weak<T>. m_globalData->smallStrings.markChildren(heapRootMarker); markStack.drain(); // Weak handles must be marked last, because their owners use the set of // opaque roots to determine reachability. int lastOpaqueRootCount; do { lastOpaqueRootCount = markStack.opaqueRootCount(); m_handleHeap.markWeakHandles(heapRootMarker); markStack.drain(); // If the set of opaque roots has grown, more weak handles may have become reachable. } while (lastOpaqueRootCount != markStack.opaqueRootCount()); markStack.reset(); m_operationInProgress = NoOperation; }