// Request other attached threads that are not at safe points to park themselves on safepoints. bool parkOthers() { ASSERT(ThreadState::current()->isAtSafePoint()); // Lock threadAttachMutex() to prevent threads from attaching. threadAttachMutex().lock(); ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(); MutexLocker locker(m_mutex); atomicAdd(&m_unparkedThreadCount, threads.size()); releaseStore(&m_canResume, 0); ThreadState* current = ThreadState::current(); for (ThreadState* state : threads) { if (state == current) continue; for (ThreadState::Interruptor* interruptor : state->interruptors()) interruptor->requestInterrupt(); } while (acquireLoad(&m_unparkedThreadCount) > 0) { double expirationTime = currentTime() + lockingTimeout(); if (!m_parked.timedWait(m_mutex, expirationTime)) { // One of the other threads did not return to a safepoint within the maximum // time we allow for threads to be parked. Abandon the GC and resume the // currently parked threads. resumeOthers(true); return false; } } return true; }
void doPark(ThreadState* state, intptr_t* stackEnd) { state->recordStackEnd(stackEnd); MutexLocker locker(m_mutex); if (!atomicDecrement(&m_unparkedThreadCount)) m_parked.signal(); while (!acquireLoad(&m_canResume)) m_resume.wait(m_mutex); atomicIncrement(&m_unparkedThreadCount); }
void checkAndPark(ThreadState* state, SafePointAwareMutexLocker* locker = nullptr) { ASSERT(!state->sweepForbidden()); if (!acquireLoad(&m_canResume)) { // If we are leaving the safepoint from a SafePointAwareMutexLocker // call out to release the lock before going to sleep. This enables the // lock to be acquired in the sweep phase, e.g. during weak processing // or finalization. The SafePointAwareLocker will reenter the safepoint // and reacquire the lock after leaving this safepoint. if (locker) locker->reset(); pushAllRegisters(this, state, parkAfterPushRegisters); } }
bool CryptoResultImpl::ResultCancel::cancelled() const { return acquireLoad(&m_cancelled); }