Exemplo n.º 1
0
void Heap::collect(SweepToggle sweepToggle)
{
    GCPHASE(Collect);
    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
#if ENABLE(GGC)
    bool fullGC = sweepToggle == DoSweep;
    if (!fullGC)
        fullGC = (capacity() > 4 * m_lastFullGCSize);  
#else
    bool fullGC = true;
#endif
    {
        GCPHASE(Canonicalize);
        canonicalizeCellLivenessData();
    }

    markRoots(fullGC);

    {
        GCPHASE(HarvestWeakReferences);
        harvestWeakReferences();
        m_handleHeap.finalizeWeakHandles();
        m_globalData->smallStrings.finalizeSmallStrings();
    }

    JAVASCRIPTCORE_GC_MARKED();

    {
        GCPHASE(ResetAllocator);
        resetAllocator();
    }

    if (sweepToggle == DoSweep) {
        GCPHASE(Sweeping);
        sweep();
        shrink();
    }

    // To avoid pathological GC churn in large heaps, we set the allocation high
    // water mark to be proportional to the current size of the heap. The exact
    // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
    // new bytes allocated) proportion, and seems to work well in benchmarks.
    size_t newSize = size();
    size_t proportionalBytes = 2 * newSize;
    if (fullGC) {
        m_lastFullGCSize = newSize;
        m_objectSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle));
    }
    JAVASCRIPTCORE_GC_END();

    (*m_activityCallback)();
}
Exemplo n.º 2
0
void Heap::willStartCollection(HeapOperation collectionType)
{
    GCPHASE(StartingCollection);
    if (shouldDoFullCollection(collectionType)) {
        m_operationInProgress = FullCollection;
        m_slotVisitor.clearMarkStack();
        m_shouldDoFullCollection = false;
        if (Options::logGC())
            dataLog("FullCollection, ");
    } else {
        m_operationInProgress = EdenCollection;
        if (Options::logGC())
            dataLog("EdenCollection, ");
    }
    if (m_operationInProgress == FullCollection) {
        m_sizeBeforeLastFullCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
        m_extraMemorySize = 0;
        m_deprecatedExtraMemorySize = 0;

        if (m_fullActivityCallback)
            m_fullActivityCallback->willCollect();
    } else {
        ASSERT(m_operationInProgress == EdenCollection);
        m_sizeBeforeLastEdenCollect = m_sizeAfterLastCollect + m_bytesAllocatedThisCycle;
    }

    if (m_edenActivityCallback)
        m_edenActivityCallback->willCollect();
}
Exemplo n.º 3
0
void Heap::didFinishCollection(double gcStartTime)
{
    GCPHASE(FinishingCollection);
    double gcEndTime = WTF::monotonicallyIncreasingTime();
    if (m_operationInProgress == FullCollection)
        m_lastFullGCLength = gcEndTime - gcStartTime;
    else
        m_lastEdenGCLength = gcEndTime - gcStartTime;

    if (Options::recordGCPauseTimes())
        HeapStatistics::recordGCPauseTime(gcStartTime, gcEndTime);

    if (Options::useZombieMode())
        zombifyDeadObjects();

    if (Options::objectsAreImmortal())
        markDeadObjects();

    if (Options::showObjectStatistics())
        HeapStatistics::showObjectStatistics(this);

    if (Options::logGC() == GCLogging::Verbose)
        GCLogging::dumpObjectGraph(this);

    RELEASE_ASSERT(m_operationInProgress == EdenCollection || m_operationInProgress == FullCollection);
    m_operationInProgress = NoOperation;
    JAVASCRIPTCORE_GC_END();
}
Exemplo n.º 4
0
void Heap::converge()
{
#if ENABLE(PARALLEL_GC)
    GCPHASE(Convergence);
    m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
}
Exemplo n.º 5
0
void Heap::deleteUnmarkedCompiledCode()
{
    GCPHASE(DeleteCodeBlocks);
    clearUnmarkedExecutables();
    m_codeBlocks.deleteUnmarkedAndUnreferenced(m_operationInProgress);
    m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
}
Exemplo n.º 6
0
void Heap::stopAllocation()
{
    GCPHASE(StopAllocation);
    m_objectSpace.stopAllocating();
    if (m_operationInProgress == FullCollection)
        m_storageSpace.didStartFullCollection();
}
Exemplo n.º 7
0
void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
    SamplingRegion samplingRegion("Garbage Collection: Marking");

    GCPHASE(MarkRoots);
    ASSERT(isValidThreadState(m_vm));

#if ENABLE(GGC)
    Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size());
    m_slotVisitor.markStack().fillVector(rememberedSet);
#else
    Vector<const JSCell*> rememberedSet;
#endif

#if ENABLE(DFG_JIT)
    DFG::clearCodeBlockMarks(*m_vm);
#endif
    if (m_operationInProgress == EdenCollection)
        m_codeBlocks.clearMarksForEdenCollection(rememberedSet);
    else
        m_codeBlocks.clearMarksForFullCollection();

    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace);
    gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
    gatherJSStackRoots(conservativeRoots);
    gatherScratchBufferRoots(conservativeRoots);

    clearLivenessData();

    m_sharedData.didStartMarking();
    m_slotVisitor.didStartMarking();
    HeapRootVisitor heapRootVisitor(m_slotVisitor);

    {
        ParallelModeEnabler enabler(m_slotVisitor);

        visitExternalRememberedSet();
        visitSmallStrings();
        visitConservativeRoots(conservativeRoots);
        visitProtectedObjects(heapRootVisitor);
        visitArgumentBuffers(heapRootVisitor);
        visitException(heapRootVisitor);
        visitStrongHandles(heapRootVisitor);
        visitHandleStack(heapRootVisitor);
        traceCodeBlocksAndJITStubRoutines();
        converge();
    }

    // Weak references must be marked last because their liveness depends on
    // the liveness of the rest of the object graph.
    visitWeakHandles(heapRootVisitor);

    clearRememberedSet(rememberedSet);
    m_sharedData.didFinishMarking();
    updateObjectCounts(gcStartTime);
    resetVisitors();
}
Exemplo n.º 8
0
void Heap::removeDeadCompilerWorklistEntries()
{
#if ENABLE(DFG_JIT)
    GCPHASE(FinalizeDFGWorklists);
    for (auto worklist : m_suspendedCompilerWorklists)
        worklist->removeDeadPlans(*m_vm);
#endif
}
Exemplo n.º 9
0
void Heap::pruneStaleEntriesFromWeakGCMaps()
{
    GCPHASE(PruningStaleEntriesFromWeakGCMaps);
    if (m_operationInProgress != FullCollection)
        return;
    for (auto& pruneCallback : m_weakGCMaps.values())
        pruneCallback();
}
Exemplo n.º 10
0
void Heap::gatherJSStackRoots(ConservativeRoots& roots)
{
#if !ENABLE(JIT)
    GCPHASE(GatherJSStackRoots);
    stack().gatherConservativeRoots(roots, m_jitStubRoutines, m_codeBlocks);
#else
    UNUSED_PARAM(roots);
#endif
}
Exemplo n.º 11
0
void Heap::resumeCompilerThreads()
{
#if ENABLE(DFG_JIT)
    GCPHASE(ResumeCompilerThreads);
    for (auto worklist : m_suspendedCompilerWorklists)
        worklist->resumeAllThreads();
    m_suspendedCompilerWorklists.clear();
#endif
}
Exemplo n.º 12
0
void Heap::flushWriteBarrierBuffer()
{
    GCPHASE(FlushWriteBarrierBuffer);
    if (m_operationInProgress == EdenCollection) {
        m_writeBarrierBuffer.flush(*this);
        return;
    }
    m_writeBarrierBuffer.reset();
}
Exemplo n.º 13
0
void Heap::gatherScratchBufferRoots(ConservativeRoots& roots)
{
#if ENABLE(DFG_JIT)
    GCPHASE(GatherScratchBufferRoots);
    m_vm->gatherConservativeRoots(roots);
#else
    UNUSED_PARAM(roots);
#endif
}
Exemplo n.º 14
0
void Heap::visitConservativeRoots(ConservativeRoots& roots)
{
    GCPHASE(VisitConservativeRoots);
    m_slotVisitor.append(roots);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Conservative Roots:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 15
0
void Heap::clearRememberedSet(Vector<const JSCell*>& rememberedSet)
{
#if ENABLE(GGC)
    GCPHASE(ClearRememberedSet);
    for (auto* cell : rememberedSet)
        const_cast<JSCell*>(cell)->setRemembered(false);
#else
    UNUSED_PARAM(rememberedSet);
#endif
}
Exemplo n.º 16
0
void Heap::visitHandleStack(HeapRootVisitor& visitor)
{
    GCPHASE(VisitHandleStack);
    m_handleStack.visit(visitor);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Handle Stack:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 17
0
void Heap::visitStrongHandles(HeapRootVisitor& visitor)
{
    GCPHASE(VisitStrongHandles);
    m_handleSet.visitStrongHandles(visitor);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Strong Handles:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 18
0
void Heap::notifyIncrementalSweeper()
{
    GCPHASE(NotifyIncrementalSweeper);

    if (m_operationInProgress == FullCollection) {
        if (!m_logicallyEmptyWeakBlocks.isEmpty())
            m_indexOfNextLogicallyEmptyWeakBlockToSweep = 0;
    }

    m_sweeper->startSweeping();
}
Exemplo n.º 19
0
void Heap::traceCodeBlocksAndJITStubRoutines()
{
    GCPHASE(TraceCodeBlocksAndJITStubRoutines);
    m_codeBlocks.traceMarked(m_slotVisitor);
    m_jitStubRoutines.traceMarkedStubRoutines(m_slotVisitor);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Code Blocks and JIT Stub Routines:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 20
0
void Heap::visitSmallStrings()
{
    GCPHASE(VisitSmallStrings);
    if (!m_vm->smallStrings.needsToBeVisited(m_operationInProgress))
        return;

    m_vm->smallStrings.visitStrongReferences(m_slotVisitor);
    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Small strings:\n", m_slotVisitor);
    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 21
0
void Heap::visitProtectedObjects(HeapRootVisitor& heapRootVisitor)
{
    GCPHASE(VisitProtectedObjects);

    for (auto& pair : m_protectedValues)
        heapRootVisitor.visit(&pair.key);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Protected Objects:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 22
0
void Heap::suspendCompilerThreads()
{
#if ENABLE(DFG_JIT)
    GCPHASE(SuspendCompilerThreads);
    ASSERT(m_suspendedCompilerWorklists.isEmpty());
    for (unsigned i = DFG::numberOfWorklists(); i--;) {
        if (DFG::Worklist* worklist = DFG::worklistForIndexOrNull(i)) {
            m_suspendedCompilerWorklists.append(worklist);
            worklist->suspendAllThreads();
        }
    }
#endif
}
Exemplo n.º 23
0
void Heap::visitArgumentBuffers(HeapRootVisitor& visitor)
{
    GCPHASE(MarkingArgumentBuffers);
    if (!m_markListSet || !m_markListSet->size())
        return;

    MarkedArgumentBuffer::markLists(visitor, *m_markListSet);

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Argument Buffers:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 24
0
void Heap::visitException(HeapRootVisitor& visitor)
{
    GCPHASE(MarkingException);
    if (!m_vm->exception() && !m_vm->lastException())
        return;

    visitor.visit(m_vm->addressOfException());
    visitor.visit(m_vm->addressOfLastException());

    if (Options::logGC() == GCLogging::Verbose)
        dataLog("Exceptions:\n", m_slotVisitor);

    m_slotVisitor.donateAndDrain();
}
Exemplo n.º 25
0
void Heap::snapshotMarkedSpace()
{
    GCPHASE(SnapshotMarkedSpace);

    if (m_operationInProgress == EdenCollection) {
        m_blockSnapshot.appendVector(m_objectSpace.blocksWithNewObjects());
        // Sort and deduplicate the block snapshot since we might be appending to an unfinished work list.
        std::sort(m_blockSnapshot.begin(), m_blockSnapshot.end());
        m_blockSnapshot.shrink(std::unique(m_blockSnapshot.begin(), m_blockSnapshot.end()) - m_blockSnapshot.begin());
    } else {
        m_blockSnapshot.resizeToFit(m_objectSpace.blocks().set().size());
        MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
        m_objectSpace.forEachBlock(functor);
    }
}
Exemplo n.º 26
0
void Heap::clearUnmarkedExecutables()
{
    GCPHASE(ClearUnmarkedExecutables);
    for (unsigned i = m_executables.size(); i--;) {
        ExecutableBase* current = m_executables[i];
        if (isMarked(current))
            continue;

        // We do this because executable memory is limited on some platforms and because
        // CodeBlock requires eager finalization.
        ExecutableBase::clearCodeVirtual(current);
        std::swap(m_executables[i], m_executables.last());
        m_executables.removeLast();
    }
}
Exemplo n.º 27
0
void Heap::visitWeakHandles(HeapRootVisitor& visitor)
{
    GCPHASE(VisitingLiveWeakHandles);
    while (true) {
        m_objectSpace.visitWeakSets(visitor);
        harvestWeakReferences();
        visitCompilerWorklistWeakReferences();
        if (m_slotVisitor.isEmpty())
            break;

        if (Options::logGC() == GCLogging::Verbose)
            dataLog("Live Weak Handles:\n", m_slotVisitor);

        {
            ParallelModeEnabler enabler(m_slotVisitor);
            m_slotVisitor.donateAndDrain();
#if ENABLE(PARALLEL_GC)
            m_slotVisitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
        }
    }
}
Exemplo n.º 28
0
void Heap::copyBackingStores()
{
    GCPHASE(CopyBackingStores);
    if (m_operationInProgress == EdenCollection)
        m_storageSpace.startedCopying<EdenCollection>();
    else {
        ASSERT(m_operationInProgress == FullCollection);
        m_storageSpace.startedCopying<FullCollection>();
    }

    if (m_storageSpace.shouldDoCopyPhase()) {
        m_sharedData.didStartCopying();
        m_copyVisitor.startCopying();
        m_copyVisitor.copyFromShared();
        m_copyVisitor.doneCopying();
        // We need to wait for everybody to finish and return their CopiedBlocks 
        // before signaling that the phase is complete.
        m_storageSpace.doneCopying();
        m_sharedData.didFinishCopying();
    } else
        m_storageSpace.doneCopying();
}
Exemplo n.º 29
0
void Heap::updateAllocationLimits()
{
    GCPHASE(UpdateAllocationLimits);
    size_t currentHeapSize = sizeAfterCollect();
    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
        HeapStatistics::exitWithFailure();

    if (m_operationInProgress == FullCollection) {
        // To avoid pathological GC churn in very small and very large heaps, we set
        // the new allocation limit based on the current size of the heap, with a
        // fixed minimum.
        m_maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
        m_sizeAfterLastFullCollect = currentHeapSize;
        m_bytesAbandonedSinceLastFullCollect = 0;
    } else {
        ASSERT(currentHeapSize >= m_sizeAfterLastCollect);
        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
        m_sizeAfterLastEdenCollect = currentHeapSize;
        double edenToOldGenerationRatio = (double)m_maxEdenSize / (double)m_maxHeapSize;
        double minEdenToOldGenerationRatio = 1.0 / 3.0;
        if (edenToOldGenerationRatio < minEdenToOldGenerationRatio)
            m_shouldDoFullCollection = true;
        m_maxHeapSize += currentHeapSize - m_sizeAfterLastCollect;
        m_maxEdenSize = m_maxHeapSize - currentHeapSize;
        if (m_fullActivityCallback) {
            ASSERT(currentHeapSize >= m_sizeAfterLastFullCollect);
            m_fullActivityCallback->didAllocate(currentHeapSize - m_sizeAfterLastFullCollect);
        }
    }

    m_sizeAfterLastCollect = currentHeapSize;
    m_bytesAllocatedThisCycle = 0;

    if (Options::logGC())
        dataLog(currentHeapSize / 1024, " kb, ");
}
Exemplo n.º 30
0
void Heap::markRoots(bool fullGC)
{
    COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
    UNUSED_PARAM(fullGC);
    ASSERT(isValidThreadState(m_globalData));
    if (m_operationInProgress != NoOperation)
        CRASH();
    m_operationInProgress = Collection;

    void* dummy;
    
    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_objectSpace.blocks());
    {
        GCPHASE(GatherConservativeRoots);
        m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
    }

    ConservativeRoots registerFileRoots(&m_objectSpace.blocks());
    m_jettisonedCodeBlocks.clearMarks();
    {
        GCPHASE(GatherRegisterFileRoots);
        registerFile().gatherConservativeRoots(registerFileRoots, m_jettisonedCodeBlocks);
    }
    m_jettisonedCodeBlocks.deleteUnmarkedCodeBlocks();
#if ENABLE(GGC)
    MarkedBlock::DirtyCellVector dirtyCells;
    if (!fullGC) {
        GCPHASE(GatheringDirtyCells);
        m_objectSpace.gatherDirtyCells(dirtyCells);
    } else
#endif
    {
        GCPHASE(clearMarks);
        clearMarks();
    }

    SlotVisitor& visitor = m_slotVisitor;
    HeapRootVisitor heapRootVisitor(visitor);

#if ENABLE(GGC)
    {
        size_t dirtyCellCount = dirtyCells.size();
        GCPHASE(VisitDirtyCells);
        GCCOUNTER(DirtyCellCount, dirtyCellCount);
        for (size_t i = 0; i < dirtyCellCount; i++) {
            heapRootVisitor.visitChildren(dirtyCells[i]);
            visitor.drain();
        }
    }
#endif
    
    if (m_globalData->codeBlocksBeingCompiled.size()) {
        GCPHASE(VisitActiveCodeBlock);
        for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
            m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
    }
    
    {
        GCPHASE(VisitMachineRoots);
        visitor.append(machineThreadRoots);
        visitor.drain();
    }
    {
        GCPHASE(VisitRegisterFileRoots);
        visitor.append(registerFileRoots);
        visitor.drain();
    }
    {
        GCPHASE(VisitProtectedObjects);
        markProtectedObjects(heapRootVisitor);
        visitor.drain();
    }
    {
        GCPHASE(VisitTempSortVectors);
        markTempSortVectors(heapRootVisitor);
        visitor.drain();
    }

    {
        GCPHASE(MarkingArgumentBuffers);
        if (m_markListSet && m_markListSet->size()) {
            MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
            visitor.drain();
        }
    }
    if (m_globalData->exception) {
        GCPHASE(MarkingException);
        heapRootVisitor.visit(&m_globalData->exception);
        visitor.drain();
    }
    
    {
        GCPHASE(VisitStrongHandles);
        m_handleHeap.visitStrongHandles(heapRootVisitor);
        visitor.drain();
    }
    
    {
        GCPHASE(HandleStack);
        m_handleStack.visit(heapRootVisitor);
        visitor.drain();
    }
    
    {
        GCPHASE(TraceCodeBlocks);
        m_jettisonedCodeBlocks.traceCodeBlocks(visitor);
        visitor.drain();
    }

    // Weak handles must be marked last, because their owners use the set of
    // opaque roots to determine reachability.
    {
        GCPHASE(VisitingWeakHandles);
        int lastOpaqueRootCount;
        do {
            lastOpaqueRootCount = visitor.opaqueRootCount();
            m_handleHeap.visitWeakHandles(heapRootVisitor);
            visitor.drain();
            // If the set of opaque roots has grown, more weak handles may have become reachable.
        } while (lastOpaqueRootCount != visitor.opaqueRootCount());
    }
    GCCOUNTER(VisitedValueCount, visitor.visitCount());
    visitor.reset();

    m_operationInProgress = NoOperation;
}