Beispiel #1
0
void Heap::markRoots()
{
    ASSERT(isValidThreadState(m_globalData));
    if (m_operationInProgress != NoOperation)
        CRASH();
    m_operationInProgress = Collection;

    void* dummy;

    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_blocks);
    m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);

    ConservativeRoots registerFileRoots(&m_blocks);
    registerFile().gatherConservativeRoots(registerFileRoots);

    clearMarks();

    SlotVisitor& visitor = m_slotVisitor;
    HeapRootVisitor heapRootVisitor(visitor);

    visitor.append(machineThreadRoots);
    visitor.drain();

    visitor.append(registerFileRoots);
    visitor.drain();

    markProtectedObjects(heapRootVisitor);
    visitor.drain();
    
    markTempSortVectors(heapRootVisitor);
    visitor.drain();

    if (m_markListSet && m_markListSet->size())
        MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
    if (m_globalData->exception)
        heapRootVisitor.visit(&m_globalData->exception);
    visitor.drain();

    m_handleHeap.visitStrongHandles(heapRootVisitor);
    visitor.drain();

    m_handleStack.visit(heapRootVisitor);
    visitor.drain();

    // Weak handles must be marked last, because their owners use the set of
    // opaque roots to determine reachability.
    int lastOpaqueRootCount;
    do {
        lastOpaqueRootCount = visitor.opaqueRootCount();
        m_handleHeap.visitWeakHandles(heapRootVisitor);
        visitor.drain();
    // If the set of opaque roots has grown, more weak handles may have become reachable.
    } while (lastOpaqueRootCount != visitor.opaqueRootCount());

    visitor.reset();

    m_operationInProgress = NoOperation;
}
Beispiel #2
0
void Heap::markRoots(bool fullGC)
{
    COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
    UNUSED_PARAM(fullGC);
    ASSERT(isValidThreadState(m_globalData));
    if (m_operationInProgress != NoOperation)
        CRASH();
    m_operationInProgress = Collection;

    void* dummy;
    
    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_objectSpace.blocks());
    {
        GCPHASE(GatherConservativeRoots);
        m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
    }

    ConservativeRoots registerFileRoots(&m_objectSpace.blocks());
    m_jettisonedCodeBlocks.clearMarks();
    {
        GCPHASE(GatherRegisterFileRoots);
        registerFile().gatherConservativeRoots(registerFileRoots, m_jettisonedCodeBlocks);
    }
    m_jettisonedCodeBlocks.deleteUnmarkedCodeBlocks();
#if ENABLE(GGC)
    MarkedBlock::DirtyCellVector dirtyCells;
    if (!fullGC) {
        GCPHASE(GatheringDirtyCells);
        m_objectSpace.gatherDirtyCells(dirtyCells);
    } else
#endif
    {
        GCPHASE(clearMarks);
        clearMarks();
    }

    SlotVisitor& visitor = m_slotVisitor;
    HeapRootVisitor heapRootVisitor(visitor);

#if ENABLE(GGC)
    {
        size_t dirtyCellCount = dirtyCells.size();
        GCPHASE(VisitDirtyCells);
        GCCOUNTER(DirtyCellCount, dirtyCellCount);
        for (size_t i = 0; i < dirtyCellCount; i++) {
            heapRootVisitor.visitChildren(dirtyCells[i]);
            visitor.drain();
        }
    }
#endif
    
    if (m_globalData->codeBlocksBeingCompiled.size()) {
        GCPHASE(VisitActiveCodeBlock);
        for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
            m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
    }
    
    {
        GCPHASE(VisitMachineRoots);
        visitor.append(machineThreadRoots);
        visitor.drain();
    }
    {
        GCPHASE(VisitRegisterFileRoots);
        visitor.append(registerFileRoots);
        visitor.drain();
    }
    {
        GCPHASE(VisitProtectedObjects);
        markProtectedObjects(heapRootVisitor);
        visitor.drain();
    }
    {
        GCPHASE(VisitTempSortVectors);
        markTempSortVectors(heapRootVisitor);
        visitor.drain();
    }

    {
        GCPHASE(MarkingArgumentBuffers);
        if (m_markListSet && m_markListSet->size()) {
            MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
            visitor.drain();
        }
    }
    if (m_globalData->exception) {
        GCPHASE(MarkingException);
        heapRootVisitor.visit(&m_globalData->exception);
        visitor.drain();
    }
    
    {
        GCPHASE(VisitStrongHandles);
        m_handleHeap.visitStrongHandles(heapRootVisitor);
        visitor.drain();
    }
    
    {
        GCPHASE(HandleStack);
        m_handleStack.visit(heapRootVisitor);
        visitor.drain();
    }
    
    {
        GCPHASE(TraceCodeBlocks);
        m_jettisonedCodeBlocks.traceCodeBlocks(visitor);
        visitor.drain();
    }

    // Weak handles must be marked last, because their owners use the set of
    // opaque roots to determine reachability.
    {
        GCPHASE(VisitingWeakHandles);
        int lastOpaqueRootCount;
        do {
            lastOpaqueRootCount = visitor.opaqueRootCount();
            m_handleHeap.visitWeakHandles(heapRootVisitor);
            visitor.drain();
            // If the set of opaque roots has grown, more weak handles may have become reachable.
        } while (lastOpaqueRootCount != visitor.opaqueRootCount());
    }
    GCCOUNTER(VisitedValueCount, visitor.visitCount());
    visitor.reset();

    m_operationInProgress = NoOperation;
}
Beispiel #3
0
void Heap::markRoots()
{
    SamplingRegion samplingRegion("Garbage Collection: Tracing");

    GCPHASE(MarkRoots);
    ASSERT(isValidThreadState(m_vm));

#if ENABLE(OBJECT_MARK_LOGGING)
    double gcStartTime = WTF::monotonicallyIncreasingTime();
#endif

    void* dummy;
    
    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
    m_jitStubRoutines.clearMarks();
    {
        GCPHASE(GatherConservativeRoots);
        m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
    }

    ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
    m_codeBlocks.clearMarks();
    {
        GCPHASE(GatherStackRoots);
        stack().gatherConservativeRoots(stackRoots, m_jitStubRoutines, m_codeBlocks);
        stack().sanitizeStack();
    }

#if ENABLE(DFG_JIT)
    ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
    {
        GCPHASE(GatherScratchBufferRoots);
        m_vm->gatherConservativeRoots(scratchBufferRoots);
    }
#endif

    {
        GCPHASE(ClearLivenessData);
        m_objectSpace.clearNewlyAllocated();
        m_objectSpace.clearMarks();
    }

    m_sharedData.didStartMarking();
    SlotVisitor& visitor = m_slotVisitor;
    visitor.setup();
    HeapRootVisitor heapRootVisitor(visitor);

    {
        ParallelModeEnabler enabler(visitor);

        m_vm->smallStrings.visitStrongReferences(visitor);

        {
            GCPHASE(VisitMachineRoots);
            MARK_LOG_ROOT(visitor, "C++ Stack");
            visitor.append(machineThreadRoots);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitStackRoots);
            MARK_LOG_ROOT(visitor, "Stack");
            visitor.append(stackRoots);
            visitor.donateAndDrain();
        }
#if ENABLE(DFG_JIT)
        {
            GCPHASE(VisitScratchBufferRoots);
            MARK_LOG_ROOT(visitor, "Scratch Buffers");
            visitor.append(scratchBufferRoots);
            visitor.donateAndDrain();
        }
#endif
        {
            GCPHASE(VisitProtectedObjects);
            MARK_LOG_ROOT(visitor, "Protected Objects");
            markProtectedObjects(heapRootVisitor);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitTempSortVectors);
            MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
            markTempSortVectors(heapRootVisitor);
            visitor.donateAndDrain();
        }

        {
            GCPHASE(MarkingArgumentBuffers);
            if (m_markListSet && m_markListSet->size()) {
                MARK_LOG_ROOT(visitor, "Argument Buffers");
                MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
                visitor.donateAndDrain();
            }
        }
        if (m_vm->exception()) {
            GCPHASE(MarkingException);
            MARK_LOG_ROOT(visitor, "Exceptions");
            heapRootVisitor.visit(m_vm->addressOfException());
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(VisitStrongHandles);
            MARK_LOG_ROOT(visitor, "Strong Handles");
            m_handleSet.visitStrongHandles(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(HandleStack);
            MARK_LOG_ROOT(visitor, "Handle Stack");
            m_handleStack.visit(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(TraceCodeBlocksAndJITStubRoutines);
            MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
            m_codeBlocks.traceMarked(visitor);
            m_jitStubRoutines.traceMarkedStubRoutines(visitor);
            visitor.donateAndDrain();
        }
    
#if ENABLE(PARALLEL_GC)
        {
            GCPHASE(Convergence);
            visitor.drainFromShared(SlotVisitor::MasterDrain);
        }
#endif
    }

    // Weak references must be marked last because their liveness depends on
    // the liveness of the rest of the object graph.
    {
        GCPHASE(VisitingLiveWeakHandles);
        MARK_LOG_ROOT(visitor, "Live Weak Handles");
        while (true) {
            m_objectSpace.visitWeakSets(heapRootVisitor);
            harvestWeakReferences();
            if (visitor.isEmpty())
                break;
            {
                ParallelModeEnabler enabler(visitor);
                visitor.donateAndDrain();
#if ENABLE(PARALLEL_GC)
                visitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
            }
        }
    }

    GCCOUNTER(VisitedValueCount, visitor.visitCount());

    m_sharedData.didFinishMarking();
#if ENABLE(OBJECT_MARK_LOGGING)
    size_t visitCount = visitor.visitCount();
#if ENABLE(PARALLEL_GC)
    visitCount += m_sharedData.childVisitCount();
#endif
    MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::monotonicallyIncreasingTime() - gcStartTime);
#endif

    m_totalBytesVisited = visitor.bytesVisited();
    m_totalBytesCopied = visitor.bytesCopied();
#if ENABLE(PARALLEL_GC)
    m_totalBytesVisited += m_sharedData.childBytesVisited();
    m_totalBytesCopied += m_sharedData.childBytesCopied();
#endif

    visitor.reset();
#if ENABLE(PARALLEL_GC)
    m_sharedData.resetChildren();
#endif
    m_sharedData.reset();
}
bool Collector::collect()
{
  assert(Interpreter::lockCount() > 0);

  bool deleted = false;

#if TEST_CONSERVATIVE_GC
  // CONSERVATIVE MARK: mark the root set using conservative GC bit (will compare later)
  ValueImp::useConservativeMark(true);
#endif

#if USE_CONSERVATIVE_GC || TEST_CONSERVATIVE_GC
  if (InterpreterImp::s_hook) {
    InterpreterImp *scr = InterpreterImp::s_hook;
    do {
      //fprintf( stderr, "Collector marking interpreter %p\n",(void*)scr);
      scr->mark();
      scr = scr->next;
    } while (scr != InterpreterImp::s_hook);
  }

  markStackObjectsConservatively();
  markProtectedObjects();
#endif

#if TEST_CONSERVATIVE_GC
  ValueImp::useConservativeMark(false);
#endif

#if !USE_CONSERVATIVE_GC
  // MARK: first mark all referenced objects recursively
  // starting out from the set of root objects
  if (InterpreterImp::s_hook) {
    InterpreterImp *scr = InterpreterImp::s_hook;
    do {
      //fprintf( stderr, "Collector marking interpreter %p\n",(void*)scr);
      scr->mark();
      scr = scr->next;
    } while (scr != InterpreterImp::s_hook);
  }
  
  // mark any other objects that we wouldn't delete anyway
  for (int block = 0; block < heap.usedBlocks; block++) {

    int minimumCellsToProcess = heap.blocks[block]->usedCells;
    CollectorBlock *curBlock = heap.blocks[block];

    for (int cell = 0; cell < CELLS_PER_BLOCK; cell++) {
      if (minimumCellsToProcess < cell) {
	goto skip_block_mark;
      }
	
      ValueImp *imp = (ValueImp *)(curBlock->cells + cell);

      if (((CollectorCell *)imp)->u.freeCell.zeroIfFree != 0) {
	
	if ((imp->_flags & (ValueImp::VI_CREATED|ValueImp::VI_MARKED)) == ValueImp::VI_CREATED &&
	    ((imp->_flags & ValueImp::VI_GCALLOWED) == 0 || imp->refcount != 0)) {
	  imp->mark();
	}
      } else {
	minimumCellsToProcess++;
      }
    }
  skip_block_mark: ;
  }
  
  for (int cell = 0; cell < heap.usedOversizeCells; cell++) {
    ValueImp *imp = (ValueImp *)heap.oversizeCells[cell];
    if ((imp->_flags & (ValueImp::VI_CREATED|ValueImp::VI_MARKED)) == ValueImp::VI_CREATED &&
	((imp->_flags & ValueImp::VI_GCALLOWED) == 0 || imp->refcount != 0)) {
      imp->mark();
    }
  }
#endif

  // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
  
  int emptyBlocks = 0;

  for (int block = 0; block < heap.usedBlocks; block++) {
    CollectorBlock *curBlock = heap.blocks[block];

    int minimumCellsToProcess = curBlock->usedCells;

    for (int cell = 0; cell < CELLS_PER_BLOCK; cell++) {
      if (minimumCellsToProcess < cell) {
	goto skip_block_sweep;
      }

      ValueImp *imp = (ValueImp *)(curBlock->cells + cell);

      if (((CollectorCell *)imp)->u.freeCell.zeroIfFree != 0) {
#if USE_CONSERVATIVE_GC
	if (!imp->_marked)
#else
	if (!imp->refcount && imp->_flags == (ValueImp::VI_GCALLOWED | ValueImp::VI_CREATED))
#endif
	{
	  //fprintf( stderr, "Collector::deleting ValueImp %p (%s)\n", (void*)imp, typeid(*imp).name());
	  // emulate destructing part of 'operator delete()'
	  imp->~ValueImp();
	  curBlock->usedCells--;
	  heap.numLiveObjects--;
	  deleted = true;

	  // put it on the free list
	  ((CollectorCell *)imp)->u.freeCell.zeroIfFree = 0;
	  ((CollectorCell *)imp)->u.freeCell.next = curBlock->freeList;
	  curBlock->freeList = (CollectorCell *)imp;

	} else {
#if USE_CONSERVATIVE_GC
	  imp->_marked = 0;
#elif TEST_CONSERVATIVE_GC
	  imp->_flags &= ~(ValueImp::VI_MARKED | ValueImp::VI_CONSERVATIVE_MARKED);
#else
	  imp->_flags &= ~ValueImp::VI_MARKED;
#endif
	}
      } else {
	minimumCellsToProcess++;
      }
    }

  skip_block_sweep:

    if (heap.blocks[block]->usedCells == 0) {
      emptyBlocks++;
      if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
#if !DEBUG_COLLECTOR
	free(heap.blocks[block]);
#endif
	// swap with the last block so we compact as we go
	heap.blocks[block] = heap.blocks[heap.usedBlocks - 1];
	heap.usedBlocks--;
	block--; // Don't move forward a step in this case

	if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) {
	  heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; 
	  heap.blocks = (CollectorBlock **)realloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock *));
	}
      } 
    }
  }

  if (deleted) {
    heap.firstBlockWithPossibleSpace = 0;
  }
  
  int cell = 0;
  while (cell < heap.usedOversizeCells) {
    ValueImp *imp = (ValueImp *)heap.oversizeCells[cell];
    
#if USE_CONSERVATIVE_GC
    if (!imp->_marked) {
#else
    if (!imp->refcount && 
	imp->_flags == (ValueImp::VI_GCALLOWED | ValueImp::VI_CREATED)) {
#endif

      imp->~ValueImp();
#if DEBUG_COLLECTOR
      heap.oversizeCells[cell]->u.freeCell.zeroIfFree = 0;
#else
      free((void *)imp);
#endif

      // swap with the last oversize cell so we compact as we go
      heap.oversizeCells[cell] = heap.oversizeCells[heap.usedOversizeCells - 1];

      heap.usedOversizeCells--;
      deleted = true;
      heap.numLiveObjects--;

      if (heap.numOversizeCells > MIN_ARRAY_SIZE && heap.usedOversizeCells < heap.numOversizeCells / LOW_WATER_FACTOR) {
	heap.numOversizeCells = heap.numOversizeCells / GROWTH_FACTOR; 
	heap.oversizeCells = (CollectorCell **)realloc(heap.oversizeCells, heap.numOversizeCells * sizeof(CollectorCell *));
      }

    } else {
#if USE_CONSERVATIVE_GC
      imp->_marked = 0;
#elif TEST_CONSERVATIVE_GC
      imp->_flags &= ~(ValueImp::VI_MARKED | ValueImp::VI_CONSERVATIVE_MARKED);
#else
      imp->_flags &= ~ValueImp::VI_MARKED;
#endif
      cell++;
    }
  }
  
  heap.numAllocationsSinceLastCollect = 0;
  
  memoryFull = (heap.numLiveObjects >= KJS_MEM_LIMIT);

  return deleted;
}

int Collector::size() 
{
  return heap.numLiveObjects; 
}

#ifdef KJS_DEBUG_MEM
void Collector::finalCheck()
{
}
#endif

#if APPLE_CHANGES

int Collector::numInterpreters()
{
  int count = 0;
  if (InterpreterImp::s_hook) {
    InterpreterImp *scr = InterpreterImp::s_hook;
    do {
      ++count;
      scr = scr->next;
    } while (scr != InterpreterImp::s_hook);
  }
  return count;
}
Beispiel #5
0
void Heap::markRoots(bool fullGC)
{
    SamplingRegion samplingRegion("Garbage Collection: Tracing");

    COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
    UNUSED_PARAM(fullGC);
    ASSERT(isValidThreadState(m_globalData));
    if (m_operationInProgress != NoOperation)
        CRASH();
    m_operationInProgress = Collection;

    void* dummy;
    
    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
    {
        GCPHASE(GatherConservativeRoots);
        m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
    }

    ConservativeRoots registerFileRoots(&m_objectSpace.blocks(), &m_storageSpace);
    m_dfgCodeBlocks.clearMarks();
    {
        GCPHASE(GatherRegisterFileRoots);
        registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks);
    }
#if ENABLE(GGC)
    MarkedBlock::DirtyCellVector dirtyCells;
    if (!fullGC) {
        GCPHASE(GatheringDirtyCells);
        m_objectSpace.gatherDirtyCells(dirtyCells);
    } else
#endif
    {
        GCPHASE(clearMarks);
        clearMarks();
    }

    m_storageSpace.startedCopying();
    SlotVisitor& visitor = m_slotVisitor;
    HeapRootVisitor heapRootVisitor(visitor);

    {
        ParallelModeEnabler enabler(visitor);
#if ENABLE(GGC)
        {
            size_t dirtyCellCount = dirtyCells.size();
            GCPHASE(VisitDirtyCells);
            GCCOUNTER(DirtyCellCount, dirtyCellCount);
            for (size_t i = 0; i < dirtyCellCount; i++) {
                heapRootVisitor.visitChildren(dirtyCells[i]);
                visitor.donateAndDrain();
            }
        }
#endif
    
        if (m_globalData->codeBlocksBeingCompiled.size()) {
            GCPHASE(VisitActiveCodeBlock);
            for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
                m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
        }
    
        {
            GCPHASE(VisitMachineRoots);
            visitor.append(machineThreadRoots);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitRegisterFileRoots);
            visitor.append(registerFileRoots);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitProtectedObjects);
            markProtectedObjects(heapRootVisitor);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitTempSortVectors);
            markTempSortVectors(heapRootVisitor);
            visitor.donateAndDrain();
        }

        {
            GCPHASE(MarkingArgumentBuffers);
            if (m_markListSet && m_markListSet->size()) {
                MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
                visitor.donateAndDrain();
            }
        }
        if (m_globalData->exception) {
            GCPHASE(MarkingException);
            heapRootVisitor.visit(&m_globalData->exception);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(VisitStrongHandles);
            m_handleSet.visitStrongHandles(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(HandleStack);
            m_handleStack.visit(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(TraceCodeBlocks);
            m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
            visitor.donateAndDrain();
        }
    
#if ENABLE(PARALLEL_GC)
        {
            GCPHASE(Convergence);
            visitor.drainFromShared(SlotVisitor::MasterDrain);
        }
#endif
    }

    // Weak references must be marked last because their liveness depends on
    // the liveness of the rest of the object graph.
    {
        GCPHASE(VisitingLiveWeakHandles);
        while (true) {
            m_weakSet.visitLiveWeakImpls(heapRootVisitor);
            harvestWeakReferences();
            if (visitor.isEmpty())
                break;
            {
                ParallelModeEnabler enabler(visitor);
                visitor.donateAndDrain();
#if ENABLE(PARALLEL_GC)
                visitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
            }
        }
    }

    {
        GCPHASE(VisitingDeadWeakHandles);
        m_weakSet.visitDeadWeakImpls(heapRootVisitor);
    }

    GCCOUNTER(VisitedValueCount, visitor.visitCount());

    visitor.doneCopying();
    visitor.reset();
    m_sharedData.reset();
    m_storageSpace.doneCopying();

    m_operationInProgress = NoOperation;
}
Beispiel #6
0
void Heap::markRoots(bool fullGC)
{
    SamplingRegion samplingRegion("Garbage Collection: Tracing");

    COND_GCPHASE(fullGC, MarkFullRoots, MarkYoungRoots);
    UNUSED_PARAM(fullGC);
    ASSERT(isValidThreadState(m_globalData));

#if ENABLE(OBJECT_MARK_LOGGING)
    double gcStartTime = WTF::currentTime();
#endif

    void* dummy;
    
    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
    m_jitStubRoutines.clearMarks();
    {
        GCPHASE(GatherConservativeRoots);
        m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
    }

    ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
    m_dfgCodeBlocks.clearMarks();
    {
        GCPHASE(GatherStackRoots);
        stack().gatherConservativeRoots(
            stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
    }

#if ENABLE(DFG_JIT)
    ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
    {
        GCPHASE(GatherScratchBufferRoots);
        m_globalData->gatherConservativeRoots(scratchBufferRoots);
    }
#endif

#if ENABLE(GGC)
    MarkedBlock::DirtyCellVector dirtyCells;
    if (!fullGC) {
        GCPHASE(GatheringDirtyCells);
        m_objectSpace.gatherDirtyCells(dirtyCells);
    } else
#endif
    {
        GCPHASE(clearMarks);
        m_objectSpace.clearMarks();
    }

    m_sharedData.didStartMarking();
    SlotVisitor& visitor = m_slotVisitor;
    visitor.setup();
    HeapRootVisitor heapRootVisitor(visitor);

    {
        ParallelModeEnabler enabler(visitor);
#if ENABLE(GGC)
        {
            size_t dirtyCellCount = dirtyCells.size();
            GCPHASE(VisitDirtyCells);
            GCCOUNTER(DirtyCellCount, dirtyCellCount);
            for (size_t i = 0; i < dirtyCellCount; i++) {
                heapRootVisitor.visitChildren(dirtyCells[i]);
                visitor.donateAndDrain();
            }
        }
#endif
    
        if (m_globalData->codeBlocksBeingCompiled.size()) {
            GCPHASE(VisitActiveCodeBlock);
            for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++)
                m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
        }
    
        {
            GCPHASE(VisitMachineRoots);
            MARK_LOG_ROOT(visitor, "C++ Stack");
            visitor.append(machineThreadRoots);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitStackRoots);
            MARK_LOG_ROOT(visitor, "Stack");
            visitor.append(stackRoots);
            visitor.donateAndDrain();
        }
#if ENABLE(DFG_JIT)
        {
            GCPHASE(VisitScratchBufferRoots);
            MARK_LOG_ROOT(visitor, "Scratch Buffers");
            visitor.append(scratchBufferRoots);
            visitor.donateAndDrain();
        }
#endif
        {
            GCPHASE(VisitProtectedObjects);
            MARK_LOG_ROOT(visitor, "Protected Objects");
            markProtectedObjects(heapRootVisitor);
            visitor.donateAndDrain();
        }
        {
            GCPHASE(VisitTempSortVectors);
            MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
            markTempSortVectors(heapRootVisitor);
            visitor.donateAndDrain();
        }

        {
            GCPHASE(MarkingArgumentBuffers);
            if (m_markListSet && m_markListSet->size()) {
                MARK_LOG_ROOT(visitor, "Argument Buffers");
                MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
                visitor.donateAndDrain();
            }
        }
        if (m_globalData->exception) {
            GCPHASE(MarkingException);
            MARK_LOG_ROOT(visitor, "Exceptions");
            heapRootVisitor.visit(&m_globalData->exception);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(VisitStrongHandles);
            MARK_LOG_ROOT(visitor, "Strong Handles");
            m_handleSet.visitStrongHandles(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(HandleStack);
            MARK_LOG_ROOT(visitor, "Handle Stack");
            m_handleStack.visit(heapRootVisitor);
            visitor.donateAndDrain();
        }
    
        {
            GCPHASE(TraceCodeBlocksAndJITStubRoutines);
            MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
            m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
            m_jitStubRoutines.traceMarkedStubRoutines(visitor);
            visitor.donateAndDrain();
        }
    
#if ENABLE(PARALLEL_GC)
        {
            GCPHASE(Convergence);
            visitor.drainFromShared(SlotVisitor::MasterDrain);
        }
#endif
    }

    // Weak references must be marked last because their liveness depends on
    // the liveness of the rest of the object graph.
    {
        GCPHASE(VisitingLiveWeakHandles);
        MARK_LOG_ROOT(visitor, "Live Weak Handles");
        while (true) {
            m_objectSpace.visitWeakSets(heapRootVisitor);
            harvestWeakReferences();
            if (visitor.isEmpty())
                break;
            {
                ParallelModeEnabler enabler(visitor);
                visitor.donateAndDrain();
#if ENABLE(PARALLEL_GC)
                visitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
            }
        }
    }

    GCCOUNTER(VisitedValueCount, visitor.visitCount());

    m_sharedData.didFinishMarking();
#if ENABLE(OBJECT_MARK_LOGGING)
    size_t visitCount = visitor.visitCount();
#if ENABLE(PARALLEL_GC)
    visitCount += m_sharedData.childVisitCount();
#endif
    MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
#endif

    visitor.reset();
#if ENABLE(PARALLEL_GC)
    m_sharedData.resetChildren();
#endif
    m_sharedData.reset();
}
void Heap::markRoots()
{
#ifndef NDEBUG
    if (m_globalData->isSharedInstance()) {
        ASSERT(JSLock::lockCount() > 0);
        ASSERT(JSLock::currentThreadIsHoldingLock());
    }
#endif

    void* dummy;

    ASSERT(m_operationInProgress == NoOperation);
    if (m_operationInProgress != NoOperation)
        CRASH();

    m_operationInProgress = Collection;

    MarkStack& markStack = m_markStack;
    HeapRootMarker heapRootMarker(markStack);
    
    // We gather conservative roots before clearing mark bits because
    // conservative gathering uses the mark bits from our last mark pass to
    // determine whether a reference is valid.
    ConservativeRoots machineThreadRoots(this);
    m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);

    ConservativeRoots registerFileRoots(this);
    registerFile().gatherConservativeRoots(registerFileRoots);

    m_markedSpace.clearMarks();

    markStack.append(machineThreadRoots);
    markStack.drain();

    markStack.append(registerFileRoots);
    markStack.drain();

    markProtectedObjects(heapRootMarker);
    markStack.drain();
    
    markTempSortVectors(heapRootMarker);
    markStack.drain();

    if (m_markListSet && m_markListSet->size())
        MarkedArgumentBuffer::markLists(heapRootMarker, *m_markListSet);
    if (m_globalData->exception)
        heapRootMarker.mark(&m_globalData->exception);
    markStack.drain();

    m_handleHeap.markStrongHandles(heapRootMarker);
    markStack.drain();

    m_handleStack.mark(heapRootMarker);
    markStack.drain();

    // Mark the small strings cache as late as possible, since it will clear
    // itself if nothing else has marked it.
    // FIXME: Change the small strings cache to use Weak<T>.
    m_globalData->smallStrings.markChildren(heapRootMarker);
    markStack.drain();
    
    // Weak handles must be marked last, because their owners use the set of
    // opaque roots to determine reachability.
    int lastOpaqueRootCount;
    do {
        lastOpaqueRootCount = markStack.opaqueRootCount();
        m_handleHeap.markWeakHandles(heapRootMarker);
        markStack.drain();
    // If the set of opaque roots has grown, more weak handles may have become reachable.
    } while (lastOpaqueRootCount != markStack.opaqueRootCount());

    markStack.reset();

    m_operationInProgress = NoOperation;
}