/// <summary>
    ///     Wait for new work
    /// </summary>
    UMSThreadProxy * TransmogrifiedPrimary::WaitForWork()
    {
        // 
        // There are 3 possibilities here
        // 1. A proxy needs to be polled for execution
        // 2. A proxy needs to be transmogrified/retired/run to thread main
        // 3. This background thread needs to be retired
        //
        const int maxCount = 3;
        HANDLE hObjects[maxCount];
        int count = 0;
        hObjects[count++] = m_poller.GetEvent();
        hObjects[count++] = m_hBlock;
        hObjects[count++] = m_hRetire;

        CONCRT_COREASSERT(count == maxCount);       

        DWORD timeout = INFINITE;

        for(;;)
        {
            DWORD result = WaitForMultipleObjectsEx(count, hObjects, FALSE, timeout, FALSE);
            DWORD index = (result == WAIT_TIMEOUT) ? 0 : (result - WAIT_OBJECT_0);

            if (index == 0)
            {
                bool done = m_poller.DoPolling();
                
                //
                // Poll every interval
                //
                timeout = done ? INFINITE : UMSBackgroundPoller::PollInterval();
            }
            else if (index == 1)
            {
                //
                // Dequeue new work and bind it to the primary. It is possible
                // that we already picked up the entry that signalled the event.
                //
                m_pBoundProxy = m_queuedExecutions.Dequeue();
                if (m_pBoundProxy != NULL)
                {
                    return m_pBoundProxy;
                }
            }
            else
            {
                // 
                // Canceled
                //
                CONCRT_COREASSERT(index == 2);

                CONCRT_COREASSERT(m_queueCount == 0);
                CONCRT_COREASSERT(timeout == INFINITE);
                return NULL;
            }
        }
    }
    /// <summary>
    ///     Wait for a proxy to appear on the completion list
    /// </summary>
    UMSThreadProxy * TransmogrifiedPrimary::WaitForBlockedThread(UMSThreadProxy * pProxy)
    {
        //
        // While waiting on the completion list we need to poll proxies for execution, if any.
        // This is required because the current proxy could be blocked for a resource that is
        // held by a UT that is suspended (and needs to be polled for subsequent execution).
        //
        const int maxCount = 2;
        HANDLE hObjects[maxCount];
        int count = 0;
        hObjects[count++] = m_poller.GetEvent();
        hObjects[count++] = m_hCompletionListEvent;
        
        CONCRT_COREASSERT(count == maxCount);

        DWORD timeout = INFINITE;

        for(;;)
        {
            DWORD result = WaitForMultipleObjectsEx(count, hObjects, FALSE, timeout, FALSE);
            DWORD index = (result == WAIT_TIMEOUT) ? 0 : (result - WAIT_OBJECT_0);

            if (index == 0)
            {
                bool done = m_poller.DoPolling();

                //
                // Poll every interval
                //
                timeout = done ? INFINITE : UMSBackgroundPoller::PollInterval();
            }
            else
            {
                CONCRT_COREASSERT(index == 1);

                // Proxy came back on the completion list
                PUMS_CONTEXT pUMSContext = NULL;
                if (!UMS::DequeueUmsCompletionListItems(m_pCompletionList, 0, &pUMSContext))
                    throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));

                //
                // The completed thread should be the one we are running
                //
                UMSThreadProxy *pCompletedProxy = UMSThreadProxy::FromUMSContext(pUMSContext);
                CONCRT_COREASSERT(pCompletedProxy == pProxy && UMS::GetNextUmsListItem(pUMSContext) == NULL);
                return pCompletedProxy;
            }
        }
    }
    /// <summary>
    ///     Handle blocking for an UT on this primary
    /// </summary>
    UMSThreadProxy * TransmogrifiedPrimary::HandleBlocking()
    {
        UMSThreadProxy *pProxy = m_pBoundProxy;
        CONCRT_COREASSERT(pProxy != NULL);

        //
        // Wait for the blocked thread to complete
        //
        WaitForBlockedThread(pProxy);      

        //
        // If the thread terminated, either someone did an ExitThread or the thread we meant to run to completion did.  If so, block.
        //
        if (pProxy->IsTerminated())
        {
            //
            // This is the **FIRST** place it's safe to delete the proxy and move on. 
            //
            delete pProxy;
            m_pBoundProxy = NULL;

            // 
            // Search for new work
            //
            return SearchForWork();
        } 
        else
        {
            //
            // proxy has not run to completion yet.
            //
            return pProxy;
        }
    }
    /// <summary>
    ///     Handle yielding for an UT on this primary
    /// </summary>
    UMSThreadProxy * TransmogrifiedPrimary::HandleYielding()
    {
        UMSThreadProxy *pProxy = m_pBoundProxy;
        CONCRT_COREASSERT(pProxy != NULL);

        switch(pProxy->m_yieldAction)
        {
            case UMSThreadProxy::ActionStartup:
            {
                //
                // UT startup
                //
                UMSFreeThreadProxy * pStartedProxy = static_cast<UMSFreeThreadProxy *>(pProxy);
                pStartedProxy->m_yieldAction = UMSThreadProxy::ActionNone;
                SetEvent(pStartedProxy->m_hBlock);
                break;
            }

            default:
            {
                //
                // When the thread explicity yields, it's blocked as far as we're concerned and someone else can run it.  This would be the case
                // on an exit from nesting.
                //
                pProxy->NotifyBlocked(false);
                break;
            }
        };

        m_pBoundProxy = NULL;
        return SearchForWork();
    }
示例#5
0
    /// <summary>
    ///     Sweeps the completion list looking for critically blocked items or the sought item before moving everything to
    ///     the second stage transfer list.  If the sought item is found, true is returned and it is NOT enqueued to the
    ///     transfer list.  Any critically blocked item signals a critical notification event of the appropriate primary
    ///     and is NOT enqueued to the transfer list.
    /// </summary>
    /// <param name="pSought">
    ///     The thread proxy to sweep for.  If NULL, everything but critically blocked items are moved to the transfer list.
    /// </param>
    /// <param name="fWait">
    ///     An indication as to whether or not to wait for something to come onto the completion list.
    /// </param>
    /// <returns>
    ///     An indication as to whether the swept item was found.  The caller owns it if true was returned.  It is NOT moved
    ///     to the transfer list.
    /// </returns>
    bool UMSSchedulerProxy::SweepCompletionList(UMSThreadProxy *pSought, bool fWait)
    {
        PUMS_CONTEXT pFirstContext;
        PUMS_CONTEXT pContext;

        bool fFound = false;

        if (!UMS::DequeueUmsCompletionListItems(m_pCompletionList, fWait ? INFINITE : 0, &pFirstContext))
            throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));

        pContext = pFirstContext;
        while (pContext != NULL)
        {
            UMSThreadProxy *pProxy = UMSThreadProxy::FromUMSContext(pContext);
            PUMS_CONTEXT pNext = UMS::GetNextUmsListItem(pContext);

#if defined(_DEBUG)
            CONCRT_COREASSERT((pProxy->m_UMSDebugBits & (UMS_DEBUGBIT_HANDEDTOPOLLER | UMS_DEBUGBIT_POLLERFOUNDCOMPLETION)) != UMS_DEBUGBIT_HANDEDTOPOLLER);
            pProxy->m_UMSDebugBits |= UMS_DEBUGBIT_COMPLETION;
#endif // _DEBUG

            if (pProxy == pSought)
                fFound = true;
            else
                HandleCompletion(pProxy);

            pContext = pNext;

        }

        return fFound;
    }
示例#6
0
    /// <summary>
    ///     Returns an **unstarted** thread proxy attached to pContext, to the thread proxy factory.
    ///     Such a thread proxy **must** be unstarted.
    ///     This API should *NOT* be called in the vast majority of circumstances.
    /// </summary>
    /// <param name="pContext">
    ///     The context to unbind.
    /// </param>
    void UMSSchedulerProxy::UnbindContext(IExecutionContext *pContext)
    {
        if (pContext == NULL)
            throw std::invalid_argument("pContext");

        UMSFreeThreadProxy * pProxy = static_cast<UMSFreeThreadProxy *> (pContext->GetProxy());

        CONCRT_COREASSERT(pProxy != NULL);
        RPMTRACE(MTRACE_EVT_CONTEXTUNBOUND, pProxy, NULL, pContext);
        pProxy->ReturnIdleProxy();
    }
    /// <summary>
    ///     Execute the given proxy on this primary
    /// </summary>
    /// <param name="pProxy">
    ///     The proxy to execute
    /// </param>
    void TransmogrifiedPrimary::Execute(UMSThreadProxy *pProxy)
    {
        CONCRT_COREASSERT(pProxy != NULL);       

        m_pBoundProxy = pProxy;

        int retryCount = 0;
        for(;;)
        {
            UMS::ExecuteUmsThread(pProxy->GetUMSContext());
            CONCRT_COREASSERT(!pProxy->IsTerminated());
            Sleep(0);

            // Poll at regular intervals
            if (++retryCount == 100)
            {
                m_poller.DoPolling();
                retryCount = 0;
            }
        }
    }
    /// <summary>
    ///     Sets all blocked status on a given context.
    /// </summary>
    /// <param name="pPreviousContext">
    ///     The previously running context.
    /// </param>
    /// <param name="fAsynchronous">
    ///     Is previously running context asynchronously blocked.
    /// </param>
    void UMSSchedulingContext::SetUMSBlocked(UMSThreadInternalContext *pPreviousContext, bool fAsynchronous)
    {
#if defined(_DEBUG)
        //
        // If this assertion fires, someone has called a blocking API between a ReleaseInternalContext and the time we switch off it.  Doing this
        // will corrupt state within the scheduler.
        //
        CONCRT_COREASSERT((pPreviousContext->GetDebugBits() & CTX_DEBUGBIT_RELEASED) == 0);
        pPreviousContext->ClearDebugBits(CTX_DEBUGBIT_AFFINITIZED);
        pPreviousContext->SetDebugBits(CTX_DEBUGBIT_UMSBLOCKED);
#endif // _DEBUG

        CONCRT_COREASSERT(pPreviousContext->m_pThreadProxy != NULL);

        pPreviousContext->NotifyBlocked(fAsynchronous);

        //
        // After this point, it might be running atop another vproc.  Remember that it may have come back on the completion list and been affinitized
        // prior to even getting into this code!
        //
    }
示例#9
0
    /// <summary>
    ///     Destroys a scheduler proxy for a UMS scheduler.
    /// </summary>
    UMSSchedulerProxy::~UMSSchedulerProxy()
    {
        UMSThreadProxy *pProxy = GetCompletionListItems();
        CONCRT_COREASSERT(pProxy == NULL);

        if (m_hTransferListEvent != NULL)
            CloseHandle(m_hTransferListEvent);

        if (m_hCompletionListEvent != NULL)
            CloseHandle(m_hCompletionListEvent);

        if (m_pCompletionList != NULL)
            UMS::DeleteUmsCompletionList(m_pCompletionList);
    }
    /// <summary>
    ///     Search for work queued in the case of multiple binding
    /// </summary>
    UMSThreadProxy * TransmogrifiedPrimary::SearchForWork()
    {
        CONCRT_COREASSERT(m_pBoundProxy == NULL);

        //
        // This decrement is for the PREVIOUSLY executed work item.
        //
        _InterlockedDecrement(&m_queueCount);

        m_pBoundProxy = m_queuedExecutions.Dequeue();

        if (m_pBoundProxy != NULL)
        {           
            return m_pBoundProxy;
        }              
        
        CompletedTransmogrification();

        //
        // Dequeue new work and bind it to the primary
        //
        return WaitForWork();
    }
    /// <summary>
    ///     The UMS primary function.  This is invoked when the primary switches into UMS scheduling mode or whenever a given
    ///     context blocks or yields.
    /// </summary>
    /// <param name="reason">
    ///     The reason for the UMS invocation.
    /// </param>
    /// <param name="activationPayload">
    ///     The activation payload (depends on reason)
    /// </param>
    /// <param name="pData">
    ///     The context (the primary pointer)
    /// </param>
    void NTAPI TransmogrifiedPrimary::PrimaryInvocation(UMS_SCHEDULER_REASON reason, ULONG_PTR activationPayload, PVOID pData)
    {
        TransmogrifiedPrimary *pRoot = NULL;
        PUMS_CONTEXT pPrimaryContext = UMS::GetCurrentUmsThread();

        if (reason != UmsSchedulerStartup)
        {
            //
            // activationPayload and pData might be NULL (blocking), so we're left with storing the TransmogrifiedPrimary in either
            // TLS or the UMS context (the primary does have one).  At present, it's in the UMS context.
            //
            if (!UMS::QueryUmsThreadInformation(pPrimaryContext, UmsThreadUserContext, &pRoot, sizeof(pRoot), NULL))
                throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));
        }
        else
        {
            pRoot = reinterpret_cast<TransmogrifiedPrimary *>(pData);

            if (!UMS::SetUmsThreadInformation(pPrimaryContext, UmsThreadUserContext, &pRoot, sizeof(pRoot)))
                throw scheduler_resource_allocation_error(HRESULT_FROM_WIN32(GetLastError()));
        }

        UMSThreadProxy *pProxy = NULL;

        switch(reason)
        {
            case UmsSchedulerStartup:
            {
                pProxy = pRoot->WaitForWork();

                if (pProxy == NULL)
                {
                    //
                    // No work was found. We are done
                    //
                    return;
                }

                pRoot->Execute(pProxy);

                CONCRT_COREASSERT(false);
                break;
            }

            case UmsSchedulerThreadBlocked:
            {
                pProxy = pRoot->HandleBlocking();

                if (pProxy == NULL)
                {
                    //
                    // No work was found. We are done
                    //
                    return;
                }

                pRoot->Execute(pProxy);

                CONCRT_COREASSERT(false);
                break;
            }
            case UmsSchedulerThreadYield:
            {
                pProxy = pRoot->HandleYielding();

                if (pProxy == NULL)
                {
                    //
                    // No work was found.  We are done.
                    //
                    return;
                }

                pRoot->Execute(pProxy);

                CONCRT_COREASSERT(false);
                break;
            }
            default:
                CONCRT_COREASSERT(false);
                break;
        }
    }
示例#12
0
    /// <summary>
    ///     Handles the completion of a UMS thread.
    /// </summary>
    /// <param name="pCompletion">
    ///     The thread which was noticed on the completion list
    /// </param>
    /// <returns>
    ///     An indication as to whether the thread moved to the transfer list (true).  If false is returned, the thread was special
    ///     (e.g.: a critically blocked thread) and was handled through a different path.
    /// </returns>
    bool UMSSchedulerProxy::HandleCompletion(UMSThreadProxy *pCompletion)
    {
        //
        // We need to make absolutely certain that we know *WHY* the context blocked so we can tell what to do when it comes off the completion list.
        // This is not known until the primary which was invoked sets appropriate flags and then notifies the proxy that it is blocked.  In order to
        // read those bits, we must spin until the proxy has set those flags.
        //
        UMSThreadProxy::BlockingType blockingType = pCompletion->SpinOnAndReturnBlockingType();

        //
        // We are allowing thread termination on the way out of the RM's main loop in order to retire virtual processors and threads simultaneously
        // (a necessary condition in order to work around a win7 issue).  This means that terminated threads can come back on the completion list.  We
        // do not want to pop this back for the scheduler -- the scheduler should already know (this is *NOT* TerminateThread friendly).
        //
        // Termination will take the same path as critical blocking.  We must ensure elsewhere in the scheduler that threads we allow to terminate in
        // this manner are in hyper-critical regions.
        //
        CONCRT_COREASSERT(!pCompletion->IsTerminated() || blockingType == UMSThreadProxy::BlockingCritical);

#if defined(_DEBUG)
        if (pCompletion->IsTerminated())
        {
            pCompletion->m_UMSDebugBits |= UMS_DEBUGBIT_COMPLETIONTERMINATED;
        }
#endif // _DEBUG

        RPMTRACE(MTRACE_EVT_PULLEDFROMCOMPLETION, pCompletion, NULL, blockingType);

#if defined(_DEBUG)
        CONCRT_COREASSERT(pCompletion->m_UMSDebugBits != UMS_DEBUGBIT_YIELDED);
#endif // _DEBUG

        if (blockingType == UMSThreadProxy::BlockingCritical)
        {
#if defined(_DEBUG)
            pCompletion->m_UMSDebugBits |= UMS_DEBUGBIT_CRITICALNOTIFY;
#endif // _DEBUG
            pCompletion->m_pLastRoot->CriticalNotify();
        }
        else if (!pCompletion->MessagedYield())
        {
#if defined(_DEBUG)
            pCompletion->m_UMSDebugBits |= UMS_DEBUGBIT_TRANSFERLIST;
#endif // _DEBUG

            //
            // Right now, just move the entry to the transfer list.
            //
            InterlockedPushEntrySList(&m_transferList, &(pCompletion->m_listEntry));

            //
            // Set the transferlist event that should wake up vprocs that are deactivated
            //
            if (InterlockedIncrement(&m_pushedBackCount) == 1)
            {
                SetEvent(m_hTransferListEvent);
            }

            return true;
        }
        else
        {
#if defined(_DEBUG)
            pCompletion->m_UMSDebugBits |= UMS_DEBUGBIT_SKIPPEDCOMPLETION;
#endif // _DEBUG
        }

        return false;
    }
    /// <summary>
    ///     The method that is called when a thread proxy starts executing a particular context.  The thread proxy which executes
    ///     the context is set in SetProxy before entering the dispatch loop and must be saved and returned on a call to the GetProxy method.
    /// </summary>
    /// <param name="pDispatchState">
    ///     The state under which this IExecutionContext is being dispatched.
    /// </param>
    void UMSSchedulingContext::Dispatch(DispatchState * pDispatchState)
    {
        const int PASS_COUNT_BEFORE_SLEEP_NORMAL = 1;
        const int PASS_COUNT_BEFORE_SLEEP_ORIGINALLY_ACTIVATED = 5;

        CONCRT_COREASSERT(m_pThreadProxy != NULL); 
        SetAsCurrentTls();

#if defined(_DEBUG)
        DWORD fromBits = 0;
#endif // _DEBUG
	
        for(;;)
        {
            int pass = 0;
            UMSThreadInternalContext *pPreviousContext = static_cast<UMSThreadInternalContext *> (m_pBoundVProc->GetExecutingContext());
            ScheduleGroupSegmentBase *pSegment = (pPreviousContext == NULL ? m_pBoundVProc->m_pStartingSegment : pPreviousContext->m_pSegment);

            // **************************************************
            // READ THIS:
            //
            // Yet another incredibly subtle point about where we get suspended..  There are times in the scheduling context's
            // dispatch loop where we can't find work (the critical context is blocked, etc...) and we want to run through a
            // Deactivate pass in order to put the vproc to sleep much as we do with an ordinary search for work in the dispatch loop.  The unfortunate thing
            // is that there's another context which thinks it this is its exclusive purview.  We aren't going to try to maintain a complex state machine to
            // be able to restore his expected state, so we spin if that's the case.
            //
            // Ordinarily, you might think that we can simply check m_pBoundVProc->IsAvailable, however, there might be a race on that such as what follows:
            //
            // - Context 1 on vproc A makes the vproc available and then blocks
            // - Context 2 on vproc B claims exclusive ownership of the virtual processor (it suspends, takes a while, take your pick)
            // - We get in here and see the virtual processor as not available so we think we're safe to make it available
            // - We make the context available
            // - Context 3 on vproc C claims exclusive ownership of the virtual processor (now 2 contexts think they have exclusive ownership)
            //
            // There are other potential races as well.  What we really need to know is if there IS a context in the dispatch loop that has made the virtual
            // processor available.  It doesn't necessarily need to be pPreviousContext because the original context might have critically blocked in that region
            // and we might be running someone else.  Hence the rule -- you **MUST** stay in a critical region between the call to MakeAvailable and the call to Deactivate
            // without exception.  No other MakeAvailable is permitted.  Once we know what the critical context is, we can check it to see if IT thinks IT has flagged
            // the virtual processor.  That check must come **BEFORE** the call to MakeAvailable and must be fenced by the time m_fAvailable is set to true.
            // **************************************************

            bool fOriginallyAvailable = false;
            bool fMadeAvailable = false;

            int passes = fOriginallyAvailable ? PASS_COUNT_BEFORE_SLEEP_ORIGINALLY_ACTIVATED : PASS_COUNT_BEFORE_SLEEP_NORMAL;

            UMSThreadInternalContext::BlockingType blockingType = UMSThreadInternalContext::BlockingNormal;
            CriticalRegionType type = OutsideCriticalRegion;

            //
            // If someone explicitly switched back to the primary, don't do the UMS blocked bit.  Instead, just conduct the search from
            // the primary for runnables or invoke the reserved context as appropriate. This is accomplished by the fact that affinitize would clear
            // the executing proxy.
            //
            if (pPreviousContext != NULL)
            {
                VCMTRACE(MTRACE_EVT_UMSBLOCKED, pPreviousContext, m_pBoundVProc, NULL);

                CONCRT_COREASSERT(pPreviousContext->UNSAFE_CurrentVirtualProcessor() == m_pBoundVProc);
                CONCRT_COREASSERT(!pPreviousContext->IsBlocked());
                CONCRT_COREASSERT(pPreviousContext->m_pThreadProxy != NULL);
#if defined(_DEBUG)
                //
                // If the context UMS blocks while it's holding a UMS blocked context prior to the switch, we can deadlock in a variety of ways.
                // Assert this instead of relying on stress to ferret this out.
                //
                CONCRT_COREASSERT((pPreviousContext->GetDebugBits() & CTX_DEBUGBIT_HOLDINGUMSBLOCKEDCONTEXT) == 0);
#endif // _DEBUG
                type = pPreviousContext->GetCriticalRegionType();

            }

            CONCRT_COREASSERT(type != InsideHyperCriticalRegion);

            if (m_pBoundVProc->m_pCriticalContext != NULL)
            {
                //
                // Only 1 context can be inside the critical region at a time
                //
                CONCRT_COREASSERT(pPreviousContext->GetCriticalRegionType() == OutsideCriticalRegion);
            }
            else if (type != OutsideCriticalRegion)
            {
                //
                // A thread/context inside a critical region blocked
                //
                CONCRT_COREASSERT(m_pBoundVProc->m_pCriticalContext == NULL);
                VCMTRACE(MTRACE_EVT_CRITICALBLOCK, pPreviousContext, m_pBoundVProc, NULL);
                m_pBoundVProc->m_pCriticalContext = pPreviousContext;
                blockingType = UMSThreadInternalContext::BlockingCritical;
            }

            bool fCritical = (m_pBoundVProc->m_pCriticalContext != NULL);

            //
            // Any context which made a virtual processor available darn well better be in a critical region until they claim it again.
            //
            UMSThreadInternalContext *pCriticalContext = m_pBoundVProc->m_pCriticalContext;
            CONCRT_COREASSERT(!fOriginallyAvailable || pCriticalContext != NULL);

            if (pCriticalContext != NULL && pCriticalContext->m_fIsVisibleVirtualProcessor)
            {
                fOriginallyAvailable = true;
            }

            //
            // pSegment might be NULL because we've looped around,  because someone blocked during a context recycling
            // after we've already NULL'd the group out.  In any of these cases, we go to the anonymous schedule group to start the search.
            //
            if (pSegment == NULL)
            {
                pSegment = m_pBoundVProc->GetOwningRing()->GetAnonymousScheduleGroupSegment();
            }

            if (pPreviousContext != NULL)
            {
                pPreviousContext->SetBlockingType(blockingType);
            }

            //
            // The push context comes first (it is what the vproc was started with).  We do *NOT* push to idle vprocs -- only to inactive ones.
            //
            InternalContextBase *pContext = m_pBoundVProc->m_pPushContext;
            m_pBoundVProc->m_pPushContext = NULL;

            while (pContext == NULL)
            {
                if (m_pBoundVProc->m_pCriticalContext != NULL)
                {
                    //
                    // Sweep the completion list if we are waiting for a critical context.
                    // Otherwise the search for runnable would do the sweep.
                    //
                    m_pScheduler->MoveCompletionListToRunnables();

                    //
                    // The critical context is **ALWAYS** first priority -- no matter what!  Since we are the only thread that picks up critical contexts
                    // due to SFW happening in a critical region, there's no CAS.  We simply can clear the flag when appropriate.
                    //
                    if (m_pBoundVProc->m_fCriticalIsReady)
                    {
                        pContext = m_pBoundVProc->m_pCriticalContext;
                        m_pBoundVProc->m_fCriticalIsReady = FALSE;
                        m_pBoundVProc->m_pCriticalContext = NULL;

#if defined(_DEBUG)
                        fromBits = CTX_DEBUGBIT_PRIMARYAFFINITIZEFROMCRITICAL;
#endif // _DEBUG
                        CONCRT_COREASSERT(pContext != NULL);
                    }
                }
                else
                {
                    CONCRT_COREASSERT(!m_pBoundVProc->m_fCriticalIsReady);
                }

                //
                // Next priority is searching for contexts to run.  
                //
                if (pContext == NULL)
                {
                    //
                    // We need to do a full search for runnables.  This means all scheduling rings, nodes, LRCs, etc...  The reason for this is subtle.  Normally,
                    // if we can't quickly find something to run, we switch to the reserved context which is a real search context and everyone is happy (we keep the virtual
                    // processor active).  The only time we'll put the virtual processor to sleep HERE is when there's a critical context blocked or there are no reserved
                    // contexts.
                    // You might think we're okay to do that because the wakings there explicitly notify us.  Unfortunately, those special contexts might be blocked
                    // on a lock held by an ARBITRARY context.  That ARBITRARY context might have been moved to a runnables list in a different scheduling ring/node by
                    // the MoveCompletionListToRunnables above.  Therefore, we must do a FULL search for runnables here across all rings.
                    //
                    WorkItem work;
                    if (m_pBoundVProc->SearchForWork(&work, pSegment, false, WorkItem::WorkItemTypeContext))
                    {
                        pContext = work.GetContext();
#if defined(_DEBUG)
                        CMTRACE(MTRACE_EVT_SFW_FOUNDBY, pContext, m_pBoundVProc, NULL);

                        fromBits = CTX_DEBUGBIT_PRIMARYAFFINITIZEFROMSEARCH;
#endif // _DEBUG
                    }

                }

                //
                // If we could not find anyone to run by this point, we're stuck having to create a new SFW context.  This should only happen
                // if we're **NOT** critically blocked.
                //
                if (!fCritical && pContext == NULL)
                {
                    pContext = m_pScheduler->GetReservedContext();
                    if (pContext == NULL)
                        m_pScheduler->DeferredGetInternalContext();
                    else
                        pContext->PrepareForUse(m_pScheduler->GetAnonymousScheduleGroupSegment(), NULL, false);

#if defined(_DEBUG)
                    fromBits = CTX_DEBUGBIT_PRIMARYRESERVEDCONTEXT;
#endif // _DEBUG
                }

                if (pPreviousContext != NULL)
                {
                    //
                    // After one time through the search loop from the source, let go of the previous context.  This means we can no longer originate
                    // a search from the source group.  We cannot place a reference here because removing it might entail a deletion from the ListArray
                    // which cannot happen on the primary.  Just search outward from the anonymous schedule group if we cannot find anything the first time
                    // through 
                    //
                    if (pContext == NULL)
                    {
                        pSegment = m_pBoundVProc->GetOwningRing()->GetAnonymousScheduleGroupSegment();
                    }

                    SetUMSBlocked(pPreviousContext, pDispatchState->m_fIsPreviousContextAsynchronouslyBlocked);
                    pPreviousContext = NULL;
                }

                if (pContext == NULL)
                {
                    //
                    // Make a series of passes through the "special SFW" above and then put the virtual processor to sleep.
                    //
                    pass++;
                    if (pass == passes)
                    {
                        //
                        // Make the virtual processor available and perform a flush.  We need to make one more loop to "search for work"
                        // as it's entirely possible we raced with a wake notification on the critical context or reserved context list event.
                        //
                        // It's also entirely possible that a context in its last SFW loop after making the virtual processor available UMS triggered and got us
                        // back here.  In that case, we need to remember this because special handling is required.  Instead of having a horribly complex state 
                        // machine to manage this particular race, we simply don't Deactivate here and instead, we poll.  Much safer.
                        //
                        if (!fOriginallyAvailable)
                        {
                            fMadeAvailable = true;
                            m_pBoundVProc->MakeAvailableFromSchedulingContext();
                        }

                        //
                        // Currently safe because this is simply a flush that doesn't restore any state or wait on any events.
                        //
                        m_pBoundVProc->EnsureAllTasksVisible(this);
                    }
                    else if (pass > passes)
                    {
                        //
                        // Because we're not running on a context, we cannot participate in finalization and yet we are putting this virtual processor
                        // to sleep.  In order to do that safely, we must have a guarantee that something will wake *US* up.  That basically means that
                        // we have a special context blocked -- either a critically blocked context or waiting on reserved context event.

                        //
                        // Put the virtual processor to sleep for real.  If we wake up for *ANY* reason (doesn't matter if it's the completion notification
                        // or not), loop back up and perform another SFW.
                        //
                        if (!fOriginallyAvailable)
                        {
                            if (!m_pBoundVProc->Deactivate(this))
                            {
                                //
                                // This indicates that something came back on the completion list.  We really do want to do a FULL SFW here.  We need to claim
                                // ownership of the VProc.
                                //
                                ClaimBoundProcessorAndSwallowActivation();
                            }

                            CONCRT_COREASSERT(!m_pBoundVProc->IsAvailable());

                            fMadeAvailable = false;
                        }
                        else
                        {
                            //
                            // In order to avoid horrible race conditions with the context which made this virtual processor available, we simply sleep, loop back
                            // up and check again.
                            //
                            // MINIMIZE blocking between MakeAvailable and Deactivate within the dispatch loop.  This path has a big performance penalty.
                            // Also -- NEVER  release the critical region between those paths (see above).
                            //
                            Sleep(100);
                        }

                        pass = 0;
                    }
                }
            }

            //
            // If we made the virtual processor available, we need to make it not so right now -- we're going to execute a context.
            //
            if (fMadeAvailable)
            {
                ClaimBoundProcessorAndSwallowActivation();
            }

            CONCRT_COREASSERT(!m_pBoundVProc->IsAvailable());

            m_pBoundVProc->Affinitize(pContext);
#if defined(_DEBUG)
            pContext->SetDebugBits(fromBits);
#endif // _DEBUG
            m_pThreadProxy->SwitchTo(pContext, Blocking);

            //
            // If we get here, it indicates that the SwitchTo failed as a result of the underlying thread blocking asynchronously (e.g.: it was suspended or
            // had a kernel APC running atop it when we tried to SwitchTo it).  In this case, just go back up and pick another runnable.  There's one absolutely
            // critical thing here.  We affinitized the vproc to pContext.  It isn't executing pContext and never was.  The execute failed because of a thread
            // suspension, kernel APC, etc...  After looping back, we *CANNOT* rely on vproc relative fields.  We simply pick another context on the basis of 
            // information we already know and switch.
            //
            // On success, SwitchTo will snap out our stack (such is the way of the world on the UMS primary).
            //
#if defined(_DEBUG)
            pContext->SetDebugBits(CTX_DEBUGBIT_PRIMARYSWITCHTOFAILED);
#endif // _DEBUG

        }

        return;
        
    }