void GCThreadSharedData::didStartCopying() { { SpinLockHolder locker(&m_copyLock); if (m_vm->heap.operationInProgress() == EdenCollection) { // Reset the vector to be empty, but don't throw away the backing store. m_blocksToCopy.shrink(0); for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next()) m_blocksToCopy.append(block); } else { ASSERT(m_vm->heap.operationInProgress() == FullCollection); WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); } m_copyIndex = 0; } // We do this here so that we avoid a race condition where the main thread can // blow through all of the copying work before the GCThreads fully wake up. // The GCThreads then request a block from the CopiedSpace when the copying phase // has completed, which isn't allowed. for (size_t i = 0; i < m_gcThreads.size(); i++) m_gcThreads[i]->copyVisitor()->startCopying(); startNextPhase(Copy); }
void GCThreadSharedData::didStartMarking() { if (m_vm->heap.operationInProgress() == FullCollection) { #if ENABLE(PARALLEL_GC) m_opaqueRoots.clear(); #else ASSERT(m_opaqueRoots.isEmpty()); #endif } std::lock_guard<std::mutex> lock(m_markingMutex); m_parallelMarkersShouldExit = false; startNextPhase(Mark); }
void GCThreadSharedData::didStartCopying() { { SpinLockHolder locker(&m_copyLock); WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); m_copyIndex = 0; } // We do this here so that we avoid a race condition where the main thread can // blow through all of the copying work before the GCThreads fully wake up. // The GCThreads then request a block from the CopiedSpace when the copying phase // has completed, which isn't allowed. for (size_t i = 0; i < m_gcThreads.size(); i++) m_gcThreads[i]->copyVisitor()->startCopying(); startNextPhase(Copy); }
void GCThreadSharedData::didStartMarking() { MutexLocker markingLocker(m_markingLock); m_parallelMarkersShouldExit = false; startNextPhase(Mark); }