void resumeOthers(bool barrierLocked = false) { ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(); atomicSubtract(&m_unparkedThreadCount, threads.size()); releaseStore(&m_canResume, 1); // FIXME: Resumed threads will all contend for m_mutex just to unlock it // later which is a waste of resources. if (UNLIKELY(barrierLocked)) { m_resume.broadcast(); } else { // FIXME: Resumed threads will all contend for // m_mutex just to unlock it later which is a waste of // resources. MutexLocker locker(m_mutex); m_resume.broadcast(); } ThreadState* current = ThreadState::current(); for (ThreadState* state : threads) { if (state == current) continue; for (ThreadState::Interruptor* interruptor : state->interruptors()) interruptor->clearInterrupt(); } threadAttachMutex().unlock(); ASSERT(ThreadState::current()->isAtSafePoint()); }
void ThreadState::attach() { RELEASE_ASSERT(!Heap::s_shutdownCalled); MutexLocker locker(threadAttachMutex()); ThreadState* state = new ThreadState(); attachedThreads().add(state); }
// Request other attached threads that are not at safe points to park themselves on safepoints. bool parkOthers() { ASSERT(ThreadState::current()->isAtSafePoint()); // Lock threadAttachMutex() to prevent threads from attaching. threadAttachMutex().lock(); ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(); MutexLocker locker(m_mutex); atomicAdd(&m_unparkedThreadCount, threads.size()); releaseStore(&m_canResume, 0); ThreadState* current = ThreadState::current(); for (ThreadState* state : threads) { if (state == current) continue; for (ThreadState::Interruptor* interruptor : state->interruptors()) interruptor->requestInterrupt(); } while (acquireLoad(&m_unparkedThreadCount) > 0) { double expirationTime = currentTime() + lockingTimeout(); if (!m_parked.timedWait(m_mutex, expirationTime)) { // One of the other threads did not return to a safepoint within the maximum // time we allow for threads to be parked. Abandon the GC and resume the // currently parked threads. resumeOthers(true); return false; } } return true; }
void ThreadState::detach() { ThreadState* state = current(); MutexLocker locker(threadAttachMutex()); attachedThreads().remove(state); delete state; }
void ThreadState::addInterruptor(Interruptor* interruptor) { checkThread(); SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting); { MutexLocker locker(threadAttachMutex()); m_interruptors.append(interruptor); } }
void ThreadState::shutdownHeapIfNecessary() { // We don't need to enter a safe point before acquiring threadAttachMutex // because this thread is already detached. MutexLocker locker(threadAttachMutex()); // We start shutting down the heap if there is no running thread // and Heap::shutdown() is already called. if (!attachedThreads().size() && Heap::s_shutdownCalled) Heap::doShutdown(); }
void ThreadState::removeInterruptor(Interruptor* interruptor) { checkThread(); SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting); { MutexLocker locker(threadAttachMutex()); size_t index = m_interruptors.find(interruptor); RELEASE_ASSERT(index >= 0); m_interruptors.remove(index); } }
void ThreadState::cleanup() { checkThread(); // Finish sweeping. completeSweep(); { // Grab the threadAttachMutex to ensure only one thread can shutdown at // a time and that no other thread can do a global GC. It also allows // safe iteration of the attachedThreads set which happens as part of // thread local GC asserts. We enter a safepoint while waiting for the // lock to avoid a dead-lock where another thread has already requested // GC. SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack); // From here on ignore all conservatively discovered // pointers into the heap owned by this thread. m_isTerminating = true; // Set the terminate flag on all heap pages of this thread. This is used to // ensure we don't trace pages on other threads that are not part of the // thread local GC. prepareHeapForTermination(); // Do thread local GC's as long as the count of thread local Persistents // changes and is above zero. PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents.get()); int oldCount = -1; int currentCount = anchor->numberOfPersistents(); ASSERT(currentCount >= 0); while (currentCount != oldCount) { Heap::collectGarbageForTerminatingThread(this); oldCount = currentCount; currentCount = anchor->numberOfPersistents(); } // We should not have any persistents left when getting to this point, // if we have it is probably a bug so adding a debug ASSERT to catch this. ASSERT(!currentCount); // All of pre-finalizers should be consumed. ASSERT(m_preFinalizers.isEmpty()); RELEASE_ASSERT(gcState() == NoGCScheduled); // Add pages to the orphaned page pool to ensure any global GCs from this point // on will not trace objects on this thread's heaps. cleanupPages(); ASSERT(attachedThreads().contains(this)); attachedThreads().remove(this); } for (auto& task : m_cleanupTasks) task->postCleanup(); m_cleanupTasks.clear(); }
void ThreadState::detachMainThread() { // Enter a safe point before trying to acquire threadAttachMutex // to avoid dead lock if another thread is preparing for GC, has acquired // threadAttachMutex and waiting for other threads to pause or reach a // safepoint. ThreadState* state = mainThreadState(); // 1. Finish sweeping. state->completeSweep(); { SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack); // 2. Add the main thread's heap pages to the orphaned pool. state->cleanupPages(); // 3. Detach the main thread. ASSERT(attachedThreads().contains(state)); attachedThreads().remove(state); state->~ThreadState(); } shutdownHeapIfNecessary(); }
void ThreadState::attach(intptr_t* startOfStack) { MutexLocker locker(threadAttachMutex()); ThreadState* state = new ThreadState(startOfStack); attachedThreads().add(state); }