c_AsyncFunctionWaitHandle* AsioContext::maybePopFast() { assertx(this == AsioSession::Get()->getCurrentContext()); while (!m_fastRunnableQueue.empty()) { auto wh = m_fastRunnableQueue.back(); m_fastRunnableQueue.pop_back(); if (wh->getState() == c_ResumableWaitHandle::STATE_READY && wh->isFastResumable()) { // We only call maybePopFast() on the current context. Since `wh' was // scheduled in this context at some point, it must still be scheduled // here now, since the only way it could leave the context is if the // context was destroyed. (Being scheduled here supercedes it having // been scheduled in earlier contexts.) assertx(wh->getContextIdx() == AsioSession::Get()->getCurrentContextIdx()); return wh; } else { // `wh' is blocked or finished in some other context. m_fastRunnableQueue.push_back(wh); return nullptr; } } return nullptr; }
void c_AsyncFunctionWaitHandle::onUnblocked() { setState(STATE_READY); if (isInContext()) { if (isFastResumable()) { getContext()->scheduleFast(this); } else { getContext()->schedule(this); } } else { decRefObj(this); } }
void c_AsyncFunctionWaitHandle::exitContext(context_idx_t ctx_idx) { assert(AsioSession::Get()->getContext(ctx_idx)); // stop before corrupting unioned data if (isFinished()) { decRefObj(this); return; } // not in a context being exited assert(getContextIdx() <= ctx_idx); if (getContextIdx() != ctx_idx) { decRefObj(this); return; } switch (getState()) { case STATE_BLOCKED: // we were already ran due to duplicit scheduling; the context will be // updated thru exitContext() call on the non-blocked wait handle we // recursively depend on decRefObj(this); break; case STATE_READY: // Recursively move all wait handles blocked by us. getParentChain().exitContext(ctx_idx); // Move us to the parent context. setContextIdx(getContextIdx() - 1); // Reschedule if still in a context. if (isInContext()) { if (isFastResumable()) { getContext()->scheduleFast(this); } else { getContext()->schedule(this); } } else { decRefObj(this); } break; default: assert(false); } }