void CopiedSpace::didStartFullCollection() { ASSERT(heap()->operationInProgress() == FullCollection); ASSERT(m_fromSpace->isEmpty()); for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) block->didSurviveGC(); for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) block->didSurviveGC(); }
size_t CopiedSpace::capacity() { size_t calculatedCapacity = 0; for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) calculatedCapacity += block->capacity(); for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) calculatedCapacity += block->capacity(); for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) calculatedCapacity += block->capacity(); return calculatedCapacity; }
size_t CopiedSpace::size() { size_t calculatedSize = 0; for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) calculatedSize += block->size(); for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) calculatedSize += block->size(); for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) calculatedSize += block->size(); return calculatedSize; }
void CopiedSpace::startedCopying() { std::swap(m_fromSpace, m_toSpace); m_blockFilter.reset(); m_allocator.resetCurrentBlock(); CopiedBlock* next = 0; size_t totalLiveBytes = 0; size_t totalUsableBytes = 0; for (CopiedBlock* block = m_fromSpace->head(); block; block = next) { next = block->next(); if (!block->isPinned() && block->canBeRecycled()) { recycleEvacuatedBlock(block); continue; } totalLiveBytes += block->liveBytes(); totalUsableBytes += block->payloadCapacity(); } CopiedBlock* block = m_oversizeBlocks.head(); while (block) { CopiedBlock* next = block->next(); if (block->isPinned()) { m_blockFilter.add(reinterpret_cast<Bits>(block)); totalLiveBytes += block->payloadCapacity(); totalUsableBytes += block->payloadCapacity(); block->didSurviveGC(); } else { m_oversizeBlocks.remove(block); m_blockSet.remove(block); m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block)); } block = next; } double markedSpaceBytes = m_heap->objectSpace().capacity(); double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes); m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization(); if (!m_shouldDoCopyPhase) return; ASSERT(m_shouldDoCopyPhase); ASSERT(!m_inCopyingPhase); ASSERT(!m_numberOfLoanedBlocks); m_inCopyingPhase = true; }
void CopiedSpace::didStartFullCollection() { ASSERT(heap()->operationInProgress() == FullCollection); ASSERT(m_oldGen.fromSpace->isEmpty()); ASSERT(m_newGen.fromSpace->isEmpty()); #ifndef NDEBUG for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next()) ASSERT(!block->liveBytes()); for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next()) ASSERT(!block->liveBytes()); #endif for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next()) block->didSurviveGC(); for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next()) block->didSurviveGC(); }
static bool isBlockListPagedOut(double deadline, DoublyLinkedList<CopiedBlock>* list) { unsigned itersSinceLastTimeCheck = 0; CopiedBlock* current = list->head(); while (current) { current = current->next(); ++itersSinceLastTimeCheck; if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) { double currentTime = WTF::monotonicallyIncreasingTime(); if (currentTime > deadline) return true; itersSinceLastTimeCheck = 0; } } return false; }
void CopiedSpace::doneCopying() { { MutexLocker locker(m_loanedBlocksLock); while (m_numberOfLoanedBlocks > 0) m_loanedBlocksCondition.wait(m_loanedBlocksLock); } ASSERT(m_inCopyingPhase); m_inCopyingPhase = false; while (!m_fromSpace->isEmpty()) { CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead()); if (block->m_isPinned) { block->m_isPinned = false; // We don't add the block to the toSpaceSet because it was never removed. ASSERT(m_toSpaceSet.contains(block)); m_toSpaceFilter.add(reinterpret_cast<Bits>(block)); m_toSpace->push(block); continue; } m_toSpaceSet.remove(block); m_heap->blockAllocator().deallocate(block); } CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head()); while (curr) { CopiedBlock* next = static_cast<CopiedBlock*>(curr->next()); if (!curr->m_isPinned) { m_oversizeBlocks.remove(curr); curr->m_allocation.deallocate(); } else curr->m_isPinned = false; curr = next; } if (!m_toSpace->head()) { if (!addNewBlock()) CRASH(); } else m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head())); }
void GCThreadSharedData::didStartCopying() { { SpinLockHolder locker(&m_copyLock); if (m_vm->heap.operationInProgress() == EdenCollection) { // Reset the vector to be empty, but don't throw away the backing store. m_blocksToCopy.shrink(0); for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next()) m_blocksToCopy.append(block); } else { ASSERT(m_vm->heap.operationInProgress() == FullCollection); WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); } m_copyIndex = 0; } // We do this here so that we avoid a race condition where the main thread can // blow through all of the copying work before the GCThreads fully wake up. // The GCThreads then request a block from the CopiedSpace when the copying phase // has completed, which isn't allowed. for (size_t i = 0; i < m_gcThreads.size(); i++) m_gcThreads[i]->copyVisitor()->startCopying(); startNextPhase(Copy); }