示例#1
0
void Heap::collect(SweepToggle sweepToggle)
{
    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
    JAVASCRIPTCORE_GC_BEGIN();

    markRoots();
    m_handleHeap.finalizeWeakHandles();
    m_globalData->smallStrings.finalizeSmallStrings();

    JAVASCRIPTCORE_GC_MARKED();
    
    resetAllocator();

#if ENABLE(JSC_ZOMBIES)
    sweepToggle = DoSweep;
#endif

    if (sweepToggle == DoSweep) {
        sweep();
        shrink();
    }

    // To avoid pathological GC churn in large heaps, we set the allocation high
    // water mark to be proportional to the current size of the heap. The exact
    // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
    // new bytes allocated) proportion, and seems to work well in benchmarks.
    size_t proportionalBytes = 2 * size();
    m_newSpace.setHighWaterMark(max(proportionalBytes, minBytesPerCycle));

    JAVASCRIPTCORE_GC_END();

    (*m_activityCallback)();
}
示例#2
0
void Heap::collect(SweepToggle sweepToggle)
{
    GCPHASE(Collect);
    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
#if ENABLE(GGC)
    bool fullGC = sweepToggle == DoSweep;
    if (!fullGC)
        fullGC = (capacity() > 4 * m_lastFullGCSize);  
#else
    bool fullGC = true;
#endif
    {
        GCPHASE(Canonicalize);
        canonicalizeCellLivenessData();
    }

    markRoots(fullGC);

    {
        GCPHASE(HarvestWeakReferences);
        harvestWeakReferences();
        m_handleHeap.finalizeWeakHandles();
        m_globalData->smallStrings.finalizeSmallStrings();
    }

    JAVASCRIPTCORE_GC_MARKED();

    {
        GCPHASE(ResetAllocator);
        resetAllocator();
    }

    if (sweepToggle == DoSweep) {
        GCPHASE(Sweeping);
        sweep();
        shrink();
    }

    // To avoid pathological GC churn in large heaps, we set the allocation high
    // water mark to be proportional to the current size of the heap. The exact
    // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
    // new bytes allocated) proportion, and seems to work well in benchmarks.
    size_t newSize = size();
    size_t proportionalBytes = 2 * newSize;
    if (fullGC) {
        m_lastFullGCSize = newSize;
        m_objectSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle));
    }
    JAVASCRIPTCORE_GC_END();

    (*m_activityCallback)();
}
示例#3
0
文件: Heap.cpp 项目: ewmailing/webkit
void Heap::collect(SweepToggle sweepToggle)
{
#if ENABLE(ALLOCATION_LOGGING)
    dataLogF("JSC GC starting collection.\n");
#endif
    
    double before = 0;
    if (Options::logGC()) {
        dataLog("[GC", sweepToggle == DoSweep ? " (eager sweep)" : "", ": ");
        before = currentTimeMS();
    }
    
    SamplingRegion samplingRegion("Garbage Collection");
    
    RELEASE_ASSERT(!m_deferralDepth);
    GCPHASE(Collect);
    ASSERT(vm()->currentThreadIsHoldingAPILock());
    RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
    RELEASE_ASSERT(m_operationInProgress == NoOperation);
    
    m_deferralDepth++; // Make sure that we don't GC in this call.
    m_vm->prepareToDiscardCode();
    m_deferralDepth--; // Decrement deferal manually, so we don't GC when we do so, since we are already GCing!.
    
    m_operationInProgress = Collection;
    m_extraMemoryUsage = 0;

    if (m_activityCallback)
        m_activityCallback->willCollect();

    double lastGCStartTime = WTF::monotonicallyIncreasingTime();
    if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
        deleteAllCompiledCode();
        m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
    }

    {
        GCPHASE(StopAllocation);
        m_objectSpace.stopAllocating();
    }

    markRoots();
    
    {
        GCPHASE(ReapingWeakHandles);
        m_objectSpace.reapWeakSets();
    }

    JAVASCRIPTCORE_GC_MARKED();
    
    {
        GCPHASE(SweepingArrayBuffers);
        m_arrayBuffers.sweep();
    }

    {
        m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
        MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
        m_objectSpace.forEachBlock(functor);
    }

    copyBackingStores();

    {
        GCPHASE(FinalizeUnconditionalFinalizers);
        finalizeUnconditionalFinalizers();
    }

    {
        GCPHASE(DeleteCodeBlocks);
        deleteUnmarkedCompiledCode();
    }

    {
        GCPHASE(DeleteSourceProviderCaches);
        m_vm->clearSourceProviderCaches();
    }

    if (sweepToggle == DoSweep) {
        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
        GCPHASE(Sweeping);
        m_objectSpace.sweep();
        m_objectSpace.shrink();
    }

    m_sweeper->startSweeping(m_blockSnapshot);
    m_bytesAbandoned = 0;

    {
        GCPHASE(ResetAllocators);
        m_objectSpace.resetAllocators();
    }
    
    size_t currentHeapSize = sizeAfterCollect();
    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
        HeapStatistics::exitWithFailure();

    m_sizeAfterLastCollect = currentHeapSize;

    // To avoid pathological GC churn in very small and very large heaps, we set
    // the new allocation limit based on the current size of the heap, with a
    // fixed minimum.
    size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
    m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;

    m_bytesAllocated = 0;
    double lastGCEndTime = WTF::monotonicallyIncreasingTime();
    m_lastGCLength = lastGCEndTime - lastGCStartTime;

    if (Options::recordGCPauseTimes())
        HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
    RELEASE_ASSERT(m_operationInProgress == Collection);

    m_operationInProgress = NoOperation;
    JAVASCRIPTCORE_GC_END();

    if (Options::useZombieMode())
        zombifyDeadObjects();

    if (Options::objectsAreImmortal())
        markDeadObjects();

    if (Options::showObjectStatistics())
        HeapStatistics::showObjectStatistics(this);
    
    if (Options::logGC()) {
        double after = currentTimeMS();
        dataLog(after - before, " ms, ", currentHeapSize / 1024, " kb]\n");
    }

#if ENABLE(ALLOCATION_LOGGING)
    dataLogF("JSC GC finishing collection.\n");
#endif
}
示例#4
0
void Heap::collect(SweepToggle sweepToggle)
{
    SamplingRegion samplingRegion("Garbage Collection");
    
    GCPHASE(Collect);
    ASSERT(globalData()->apiLock().currentThreadIsHoldingLock());
    RELEASE_ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
    RELEASE_ASSERT(m_operationInProgress == NoOperation);
    m_operationInProgress = Collection;

    m_activityCallback->willCollect();

    double lastGCStartTime = WTF::currentTime();
    if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
        deleteAllCompiledCode();
        m_lastCodeDiscardTime = WTF::currentTime();
    }

    {
        GCPHASE(Canonicalize);
        m_objectSpace.canonicalizeCellLivenessData();
    }

    markRoots();
    
    {
        GCPHASE(ReapingWeakHandles);
        m_objectSpace.reapWeakSets();
    }

    JAVASCRIPTCORE_GC_MARKED();

    {
        m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
        MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
        m_objectSpace.forEachBlock(functor);
    }

    copyBackingStores();

    {
        GCPHASE(FinalizeUnconditionalFinalizers);
        finalizeUnconditionalFinalizers();
    }

    {
        GCPHASE(finalizeSmallStrings);
        m_globalData->smallStrings.finalizeSmallStrings();
    }

    {
        GCPHASE(DeleteCodeBlocks);
        deleteUnmarkedCompiledCode();
    }

    {
        GCPHASE(DeleteSourceProviderCaches);
        m_globalData->clearSourceProviderCaches();
    }

    if (sweepToggle == DoSweep) {
        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
        GCPHASE(Sweeping);
        m_objectSpace.sweep();
        m_objectSpace.shrink();
    }

    m_sweeper->startSweeping(m_blockSnapshot);
    m_bytesAbandoned = 0;

    {
        GCPHASE(ResetAllocators);
        m_objectSpace.resetAllocators();
    }
    
    size_t currentHeapSize = size();
    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
        HeapStatistics::exitWithFailure();

    m_sizeAfterLastCollect = currentHeapSize;

    // To avoid pathological GC churn in very small and very large heaps, we set
    // the new allocation limit based on the current size of the heap, with a
    // fixed minimum.
    size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
    m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;

    m_bytesAllocated = 0;
    double lastGCEndTime = WTF::currentTime();
    m_lastGCLength = lastGCEndTime - lastGCStartTime;

    if (Options::recordGCPauseTimes())
        HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
    RELEASE_ASSERT(m_operationInProgress == Collection);

    m_operationInProgress = NoOperation;
    JAVASCRIPTCORE_GC_END();

    if (Options::useZombieMode())
        zombifyDeadObjects();

    if (Options::objectsAreImmortal())
        markDeadObjects();

    if (Options::showObjectStatistics())
        HeapStatistics::showObjectStatistics(this);
}
示例#5
0
文件: Heap.cpp 项目: Moondee/Artemis
void Heap::collect(SweepToggle sweepToggle)
{
    SamplingRegion samplingRegion("Garbage Collection");
    
    GCPHASE(Collect);
    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();

    m_activityCallback->willCollect();

    double lastGCStartTime = WTF::currentTime();
    if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
        discardAllCompiledCode();
        m_lastCodeDiscardTime = WTF::currentTime();
    }

#if ENABLE(GGC)
    bool fullGC = sweepToggle == DoSweep;
    if (!fullGC)
        fullGC = (capacity() > 4 * m_sizeAfterLastCollect);  
#else
    bool fullGC = true;
#endif
    {
        GCPHASE(Canonicalize);
        canonicalizeCellLivenessData();
    }

    markRoots(fullGC);
    
    {
        GCPHASE(FinalizeUnconditionalFinalizers);
        finalizeUnconditionalFinalizers();
    }
        
    {
        GCPHASE(FinalizeWeakHandles);
        m_weakSet.sweep();
        m_globalData->smallStrings.finalizeSmallStrings();
    }
    
    JAVASCRIPTCORE_GC_MARKED();

    {
        GCPHASE(ResetAllocator);
        resetAllocators();
    }
    
    {
        GCPHASE(DeleteCodeBlocks);
        m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
    }

    if (sweepToggle == DoSweep) {
        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
        GCPHASE(Sweeping);
        sweep();
        m_objectSpace.shrink();
        m_weakSet.shrink();
    }

    // To avoid pathological GC churn in large heaps, we set the new allocation 
    // limit to be the current size of the heap. This heuristic 
    // is a bit arbitrary. Using the current size of the heap after this 
    // collection gives us a 2X multiplier, which is a 1:1 (heap size :
    // new bytes allocated) proportion, and seems to work well in benchmarks.
    size_t newSize = size();
    if (fullGC) {
        m_sizeAfterLastCollect = newSize;
        m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle);
    }
    m_bytesAllocated = 0;
    double lastGCEndTime = WTF::currentTime();
    m_lastGCLength = lastGCEndTime - lastGCStartTime;
    JAVASCRIPTCORE_GC_END();
}
示例#6
0
文件: Heap.cpp 项目: allsmy/webkit
NEVER_INLINE void Heap::collectImpl(HeapOperation collectionType, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters)
{
#if ENABLE(ALLOCATION_LOGGING)
    dataLogF("JSC GC starting collection.\n");
#endif
    
    double before = 0;
    if (Options::logGC()) {
        dataLog("[GC: ");
        before = currentTimeMS();
    }
    
    SamplingRegion samplingRegion("Garbage Collection");
    
    if (vm()->typeProfiler()) {
        DeferGCForAWhile awhile(*this);
        vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
    }
    
    RELEASE_ASSERT(!m_deferralDepth);
    ASSERT(vm()->currentThreadIsHoldingAPILock());
    RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
    RELEASE_ASSERT(m_operationInProgress == NoOperation);

    suspendCompilerThreads();
    willStartCollection(collectionType);
    GCPHASE(Collect);

    double gcStartTime = WTF::monotonicallyIncreasingTime();
    if (m_verifier) {
        // Verify that live objects from the last GC cycle haven't been corrupted by
        // mutators before we begin this new GC cycle.
        m_verifier->verify(HeapVerifier::Phase::BeforeGC);

        m_verifier->initializeGCCycle();
        m_verifier->gatherLiveObjects(HeapVerifier::Phase::BeforeMarking);
    }

    flushOldStructureIDTables();
    stopAllocation();
    flushWriteBarrierBuffer();

    markRoots(gcStartTime, stackOrigin, stackTop, calleeSavedRegisters);

    if (m_verifier) {
        m_verifier->gatherLiveObjects(HeapVerifier::Phase::AfterMarking);
        m_verifier->verify(HeapVerifier::Phase::AfterMarking);
    }
    JAVASCRIPTCORE_GC_MARKED();

    if (vm()->typeProfiler())
        vm()->typeProfiler()->invalidateTypeSetCache();

    reapWeakHandles();
    pruneStaleEntriesFromWeakGCMaps();
    sweepArrayBuffers();
    snapshotMarkedSpace();

    copyBackingStores();

    finalizeUnconditionalFinalizers();
    removeDeadCompilerWorklistEntries();
    deleteUnmarkedCompiledCode();
    deleteSourceProviderCaches();
    notifyIncrementalSweeper();
    rememberCurrentlyExecutingCodeBlocks();

    resetAllocators();
    updateAllocationLimits();
    didFinishCollection(gcStartTime);
    resumeCompilerThreads();

    if (m_verifier) {
        m_verifier->trimDeadObjects();
        m_verifier->verify(HeapVerifier::Phase::AfterGC);
    }

    if (Options::logGC()) {
        double after = currentTimeMS();
        dataLog(after - before, " ms]\n");
    }
}