void Heap::collect(SweepToggle sweepToggle) { SamplingRegion samplingRegion("Garbage Collection"); GCPHASE(Collect); ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); m_activityCallback->willCollect(); double lastGCStartTime = WTF::currentTime(); if (lastGCStartTime - m_lastCodeDiscardTime > minute) { discardAllCompiledCode(); m_lastCodeDiscardTime = WTF::currentTime(); } #if ENABLE(GGC) bool fullGC = sweepToggle == DoSweep; if (!fullGC) fullGC = (capacity() > 4 * m_sizeAfterLastCollect); #else bool fullGC = true; #endif { GCPHASE(Canonicalize); canonicalizeCellLivenessData(); } markRoots(fullGC); { GCPHASE(FinalizeUnconditionalFinalizers); finalizeUnconditionalFinalizers(); } { GCPHASE(FinalizeWeakHandles); m_weakSet.sweep(); m_globalData->smallStrings.finalizeSmallStrings(); } JAVASCRIPTCORE_GC_MARKED(); { GCPHASE(ResetAllocator); resetAllocators(); } { GCPHASE(DeleteCodeBlocks); m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks(); } if (sweepToggle == DoSweep) { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); GCPHASE(Sweeping); sweep(); m_objectSpace.shrink(); m_weakSet.shrink(); } // To avoid pathological GC churn in large heaps, we set the new allocation // limit to be the current size of the heap. This heuristic // is a bit arbitrary. Using the current size of the heap after this // collection gives us a 2X multiplier, which is a 1:1 (heap size : // new bytes allocated) proportion, and seems to work well in benchmarks. size_t newSize = size(); if (fullGC) { m_sizeAfterLastCollect = newSize; m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle); } m_bytesAllocated = 0; double lastGCEndTime = WTF::currentTime(); m_lastGCLength = lastGCEndTime - lastGCStartTime; JAVASCRIPTCORE_GC_END(); }
NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { #if ENABLE(ALLOCATION_LOGGING) dataLogF("JSC GC starting collection.\n"); #endif double before = 0; if (Options::logGC()) { dataLog("[GC: "); before = currentTimeMS(); } SamplingRegion samplingRegion("Garbage Collection"); if (vm()->typeProfiler()) { DeferGCForAWhile awhile(*this); vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC")); } RELEASE_ASSERT(!m_deferralDepth); ASSERT(vm()->currentThreadIsHoldingAPILock()); RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); RELEASE_ASSERT(m_operationInProgress == NoOperation); suspendCompilerThreads(); willStartCollection(collectionType); GCPHASE(Collect); double gcStartTime = WTF::monotonicallyIncreasingTime(); if (m_verifier) { // Verify that live objects from the last GC cycle haven't been corrupted by // mutators before we begin this new GC cycle. m_verifier->verify(HeapVerifier::Phase::BeforeGC); m_verifier->initializeGCCycle(); m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking); } flushOldStructureIDTables(); stopAllocation(); flushWriteBarrierBuffer(); markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters); if (m_verifier) { m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking); m_verifier->verify(HeapVerifier::Phase::AfterMarking); } JAVASCRIPTCORE_GC_MARKED(); if (vm()->typeProfiler()) vm()->typeProfiler()->invalidateTypeSetCache(); reapWeakHandles(); pruneStaleEntriesFromWeakGCMaps(); sweepArrayBuffers(); snapshotMarkedSpace(); copyBackingStores(); finalizeUnconditionalFinalizers(); removeDeadCompilerWorklistEntries(); deleteUnmarkedCompiledCode(); deleteSourceProviderCaches(); notifyIncrementalSweeper(); rememberCurrentlyExecutingCodeBlocks(); resetAllocators(); updateAllocationLimits(); didFinishCollection(gcStartTime); resumeCompilerThreads(); if (m_verifier) { m_verifier->trimDeadObjects(); m_verifier->verify(HeapVerifier::Phase::AfterGC); } if (Options::logGC()) { double after = currentTimeMS(); dataLog(after - before, " ms]\n"); } }