예제 #1
0
RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
{
    PRTFILEAIOREQINTERNAL pReqInt = hReq;
    RTFILEAIOREQ_VALID_RETURN(pReqInt);
    RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);


    int rcBSD = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);

    if (rcBSD == AIO_CANCELED)
    {
        /*
         * Decrement request count because the request will never arrive at the
         * completion port.
         */
        AssertMsg(VALID_PTR(pReqInt->pCtxInt),
                  ("Invalid state. Request was canceled but wasn't submitted\n"));

        ASMAtomicDecS32(&pReqInt->pCtxInt->cRequests);
        pReqInt->Rc = VERR_FILE_AIO_CANCELED;
        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
        return VINF_SUCCESS;
    }
    else if (rcBSD == AIO_ALLDONE)
        return VERR_FILE_AIO_COMPLETED;
    else if (rcBSD == AIO_NOTCANCELED)
        return VERR_FILE_AIO_IN_PROGRESS;
    else
        return RTErrConvertFromErrno(errno);
}
RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
{
    RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
    RTNATIVETHREAD          hSelf = RTThreadNativeSelf();
    uint32_t                cLockers;
    RTSEMSPINMUTEXSTATE     State;
    bool                    fRc;

    Assert(hSelf != NIL_RTNATIVETHREAD);
    RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);

    /*
     * Get the saved state and try release the semaphore.
     */
    State = pThis->SavedState;
    ASMCompilerBarrier();
    ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc);
    AssertMsgReturn(fRc,
                    ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers),
                    VERR_NOT_OWNER);

    cLockers = ASMAtomicDecS32(&pThis->cLockers);
    rtSemSpinMutexLeave(&State);
    if (cLockers > 0)
    {
        int rc = RTSemEventSignal(pThis->hEventSem);
        AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
    }
    return VINF_SUCCESS;
}
/**
 * The slave DPC callback for an omni timer.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());

        if (!pTimer->u64NanoInterval)
            if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
                ASMAtomicWriteBool(&pTimer->fSuspended, true);

        pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
예제 #4
0
/**
 * Releases a VBoxFUSE node reference and unlocks it.
 *
 * @returns true if deleted, false if not.
 * @param   pNode   The node.
 */
static bool vboxfuseNodeReleaseAndUnlock(PVBOXFUSENODE pNode)
{
    if (ASMAtomicDecS32(&pNode->cRefs) == 0)
        return vboxfuseNodeDestroy(pNode, true /* fLocked */);
    vboxfuseNodeUnlock(pNode);
    return false;
}
예제 #5
0
RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
{
    PRTFILEAIOREQINTERNAL pReqInt = hReq;
    RTFILEAIOREQ_VALID_RETURN(pReqInt);
    RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);

    LNXKAIOIOEVENT AioEvent;
    int rc = rtFileAsyncIoLinuxCancel(pReqInt->AioContext, &pReqInt->AioCB, &AioEvent);
    if (RT_SUCCESS(rc))
    {
        /*
         * Decrement request count because the request will never arrive at the
         * completion port.
         */
        AssertMsg(VALID_PTR(pReqInt->pCtxInt),
                  ("Invalid state. Request was canceled but wasn't submitted\n"));

        ASMAtomicDecS32(&pReqInt->pCtxInt->cRequests);
        pReqInt->Rc = VERR_FILE_AIO_CANCELED;
        RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
        return VINF_SUCCESS;
    }
    if (rc == VERR_TRY_AGAIN)
        return VERR_FILE_AIO_IN_PROGRESS;
    return rc;
}
/**
 * Undoing rtstrFormatTypeReadLock.
 */
DECLINLINE(void) rtstrFormatTypeReadUnlock(void)
{
#if defined(RTSTRFORMATTYPE_WITH_LOCKING)
    Assert(g_i32Spinlock > 0);
    ASMAtomicDecS32(&g_i32Spinlock);
#endif
}
예제 #7
0
/**
 * @copydoc RAWPCIFACTORY::pfnRelease
 */
static DECLCALLBACK(void) vboxPciFactoryRelease(PRAWPCIFACTORY pFactory)
{
    PVBOXRAWPCIGLOBALS pGlobals = (PVBOXRAWPCIGLOBALS)((uint8_t *)pFactory - RT_OFFSETOF(VBOXRAWPCIGLOBALS, RawPciFactory));

    int32_t cRefs = ASMAtomicDecS32(&pGlobals->cFactoryRefs);
    Assert(cRefs >= 0); NOREF(cRefs);
    LogFlow(("vboxPciFactoryRelease: cRefs=%d (new)\n", cRefs));
}
예제 #8
0
/**
 * @copydoc INTNETTRUNKFACTORY::pfnRelease
 */
static DECLCALLBACK(void) vboxNetAdpFactoryRelease(PINTNETTRUNKFACTORY pIfFactory)
{
    PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pIfFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, TrunkFactory));

    int32_t cRefs = ASMAtomicDecS32(&pGlobals->cFactoryRefs);
    Assert(cRefs >= 0); NOREF(cRefs);
    LogFlow(("vboxNetAdpFactoryRelease: cRefs=%d (new)\n", cRefs));
}
예제 #9
0
RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
{
    /*
     * Assert sanity and check for NOP.
     */
    Assert(pCritSect);
    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * Assert ownership and so on.
     */
    Assert(pCritSect->cNestings > 0);
    Assert(pCritSect->cLockers >= 0);
    Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());

#ifdef RTCRITSECT_STRICT
    int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1);
    if (RT_FAILURE(rc9))
        return rc9;
#endif

    /*
     * Decrement nestings, if <= 0 when we'll release the critsec.
     */
    pCritSect->cNestings--;
    if (pCritSect->cNestings > 0)
        ASMAtomicDecS32(&pCritSect->cLockers);
    else
    {
        /*
         * Set owner to zero.
         * Decrement waiters, if >= 0 then we have to wake one of them up.
         */
        ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
        if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
        {
            int rc = RTSemEventSignal(pCritSect->EventSem);
            AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
        }
    }
    return VINF_SUCCESS;
}
예제 #10
0
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

#ifdef RTSEMMUTEX_STRICT
    int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, pThis->cNestings == 1);
    if (RT_FAILURE(rc9))
        return rc9;
#endif

    /*
     * Check if nested.
     */
    pthread_t Self = pthread_self();
    if (RT_UNLIKELY(    pThis->Owner != Self
                        ||  pThis->cNestings == 0))
    {
        AssertMsgFailed(("Not owner of mutex %p!! Self=%08x Owner=%08x cNestings=%d\n",
                         pThis, Self, pThis->Owner, pThis->cNestings));
        return VERR_NOT_OWNER;
    }

    /*
     * If nested we'll just pop a nesting.
     */
    if (pThis->cNestings > 1)
    {
        ASMAtomicDecU32(&pThis->cNestings);
        return VINF_SUCCESS;
    }

    /*
     * Clear the state. (cNestings == 1)
     */
    pThis->Owner = (pthread_t)~0;
    ASMAtomicWriteU32(&pThis->cNestings, 0);

    /*
     * Release the mutex.
     */
    int32_t iNew = ASMAtomicDecS32(&pThis->iState);
    if (RT_UNLIKELY(iNew != 0))
    {
        /* somebody is waiting, try wake up one of them. */
        ASMAtomicXchgS32(&pThis->iState, 0);
        (void)sys_futex(&pThis->iState, FUTEX_WAKE, 1, NULL, NULL, 0);
    }
    return VINF_SUCCESS;
}
예제 #11
0
/**
 * Initializes the ring-0 driver runtime library.
 *
 * @returns iprt status code.
 * @param   fReserved       Flags reserved for the future.
 */
RTR0DECL(int) RTR0Init(unsigned fReserved)
{
    int rc;
    uint32_t cNewUsers;
    Assert(fReserved == 0);
#ifndef RT_OS_SOLARIS       /* On Solaris our thread preemption information is only obtained in rtR0InitNative().*/
    RT_ASSERT_PREEMPTIBLE();
#endif

    /*
     * The first user initializes it.
     * We rely on the module loader to ensure that there are no
     * initialization races should two modules share the IPRT.
     */
    cNewUsers = ASMAtomicIncS32(&g_crtR0Users);
    if (cNewUsers != 1)
    {
        if (cNewUsers > 1)
            return VINF_SUCCESS;
        ASMAtomicDecS32(&g_crtR0Users);
        return VERR_INTERNAL_ERROR_3;
    }

    rc = rtR0InitNative();
    if (RT_SUCCESS(rc))
    {
#ifdef RTR0MEM_WITH_EF_APIS
        rtR0MemEfInit();
#endif
        rc = rtThreadInit();
        if (RT_SUCCESS(rc))
        {
#ifndef IN_GUEST /* play safe for now */
            rc = rtR0MpNotificationInit();
            if (RT_SUCCESS(rc))
            {
                rc = rtR0PowerNotificationInit();
                if (RT_SUCCESS(rc))
                    return rc;
                rtR0MpNotificationTerm();
            }
#else
            if (RT_SUCCESS(rc))
                return rc;
#endif
            rtThreadTerm();
        }
#ifdef RTR0MEM_WITH_EF_APIS
        rtR0MemEfTerm();
#endif
        rtR0TermNative();
    }
    return rc;
}
예제 #12
0
/**
 * Terminates the ring-0 driver runtime library.
 */
RTR0DECL(void) RTR0Term(void)
{
    int32_t cNewUsers;
    RT_ASSERT_PREEMPTIBLE();

    cNewUsers = ASMAtomicDecS32(&g_crtR0Users);
    Assert(cNewUsers >= 0);
    if (cNewUsers == 0)
        rtR0Term();
    else if (cNewUsers < 0)
        ASMAtomicIncS32(&g_crtR0Users);
}
/**
 * Wrapper between the native nt per-cpu callbacks and PFNRTWORKER
 *
 * @param   Dpc                 DPC object
 * @param   DeferredContext     Context argument specified by KeInitializeDpc
 * @param   SystemArgument1     Argument specified by KeInsertQueueDpc
 * @param   SystemArgument2     Argument specified by KeInsertQueueDpc
 */
static VOID __stdcall rtmpNtDPCWrapper(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTMPARGS pArgs = (PRTMPARGS)DeferredContext;

    ASMAtomicIncU32(&pArgs->cHits);
    pArgs->pfnWorker(KeGetCurrentProcessorNumber(), pArgs->pvUser1, pArgs->pvUser2);

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);
}
예제 #14
0
HRESULT ListenerRecord::dequeue(IEvent **aEvent,
                                LONG aTimeout,
                                AutoLockBase &aAlock)
{
    if (mActive)
        return VBOX_E_INVALID_OBJECT_STATE;

    // retain listener record
    RecordHolder<ListenerRecord> holder(this);

    ::RTCritSectEnter(&mcsQLock);

    mLastRead = RTTimeMilliTS();

    /*
     * If waiting both desired and necessary, then try grab the event
     * semaphore and mark it busy.  If it's NIL we've been shut down already.
     */
    if (aTimeout != 0 && mQueue.empty())
    {
        RTSEMEVENT hEvt = mQEvent;
        if (hEvt != NIL_RTSEMEVENT)
        {
            ASMAtomicIncS32(&mQEventBusyCnt);
            ::RTCritSectLeave(&mcsQLock);

            // release lock while waiting, listener will not go away due to above holder
            aAlock.release();

            ::RTSemEventWait(hEvt, aTimeout);
            ASMAtomicDecS32(&mQEventBusyCnt);

            // reacquire lock
            aAlock.acquire();
            ::RTCritSectEnter(&mcsQLock);
        }
    }

    if (mQueue.empty())
        *aEvent = NULL;
    else
    {
        mQueue.front().queryInterfaceTo(aEvent);
        mQueue.pop_front();
    }

    ::RTCritSectLeave(&mcsQLock);
    return S_OK;
}
예제 #15
0
/**
 * Internal initialization worker.
 *
 * @returns IPRT status code.
 * @param   fFlags          Flags, see RTR3INIT_XXX.
 * @param   cArgs           Pointer to the argument count.
 * @param   ppapszArgs      Pointer to the argument vector pointer. NULL
 *                          allowed if @a cArgs is 0.
 * @param   pszProgramPath  The program path.  Pass NULL if we're to figure it
 *                          out ourselves.
 */
static int rtR3Init(uint32_t fFlags, int cArgs, char ***papszArgs, const char *pszProgramPath)
{
    /* no entry log flow, because prefixes and thread may freak out. */
    Assert(!(fFlags & ~(RTR3INIT_FLAGS_DLL | RTR3INIT_FLAGS_SUPLIB)));
    Assert(!(fFlags & RTR3INIT_FLAGS_DLL) || cArgs == 0);

    /*
     * Do reference counting, only initialize the first time around.
     *
     * We are ASSUMING that nobody will be able to race RTR3Init* calls when the
     * first one, the real init, is running (second assertion).
     */
    int32_t cUsers = ASMAtomicIncS32(&g_cUsers);
    if (cUsers != 1)
    {
        AssertMsg(cUsers > 1, ("%d\n", cUsers));
        Assert(!g_fInitializing);
#if !defined(IN_GUEST) && !defined(RT_NO_GIP)
        if (fFlags & RTR3INIT_FLAGS_SUPLIB)
            SUPR3Init(NULL);
#endif
        if (!pszProgramPath)
            return VINF_SUCCESS;

        int rc = rtR3InitProgramPath(pszProgramPath);
        if (RT_SUCCESS(rc))
            rc = rtR3InitArgv(fFlags, cArgs, papszArgs);
        return rc;
    }
    ASMAtomicWriteBool(&g_fInitializing, true);

    /*
     * Do the initialization.
     */
    int rc = rtR3InitBody(fFlags, cArgs, papszArgs, pszProgramPath);
    if (RT_FAILURE(rc))
    {
        /* failure */
        ASMAtomicWriteBool(&g_fInitializing, false);
        ASMAtomicDecS32(&g_cUsers);
        return rc;
    }

    /* success */
    LogFlow(("rtR3Init: returns VINF_SUCCESS\n"));
    ASMAtomicWriteBool(&g_fInitializing, false);
    return VINF_SUCCESS;
}
예제 #16
0
HRESULT ListenerRecord::enqueue(IEvent *aEvent)
{
    AssertMsg(!mActive, ("must be passive\n"));

    // put an event the queue
    ::RTCritSectEnter(&mcsQLock);

    // If there was no events reading from the listener for the long time,
    // and events keep coming, or queue is oversized we shall unregister this listener.
    uint64_t sinceRead = RTTimeMilliTS() - mLastRead;
    size_t queueSize = mQueue.size();
    if (queueSize > 1000 || (queueSize > 500 && sinceRead > 60 * 1000))
    {
        ::RTCritSectLeave(&mcsQLock);
        return E_ABORT;
    }


    RTSEMEVENT hEvt = mQEvent;
    if (queueSize != 0 && mQueue.back() == aEvent)
        /* if same event is being pushed multiple times - it's reusable event and
           we don't really need multiple instances of it in the queue */
        hEvt = NIL_RTSEMEVENT;
    else if (hEvt != NIL_RTSEMEVENT) /* don't bother queuing after shutdown */
    {
        mQueue.push_back(aEvent);
        ASMAtomicIncS32(&mQEventBusyCnt);
    }

    ::RTCritSectLeave(&mcsQLock);

    // notify waiters unless we've been shut down.
    if (hEvt != NIL_RTSEMEVENT)
    {
        ::RTSemEventSignal(hEvt);
        ASMAtomicDecS32(&mQEventBusyCnt);
    }

    return S_OK;
}
예제 #17
0
/**
 * Common worker for clientClose and VBoxDrvDarwinClose.
 */
/* static */ void org_virtualbox_SupDrvClient::sessionClose(RTPROCESS Process)
{
    /*
     * Find the session and remove it from the hash table.
     *
     * Note! Only one session per process. (Both start() and
     * VBoxDrvDarwinOpen makes sure this is so.)
     */
    const unsigned  iHash = SESSION_HASH(Process);
    RTSpinlockAcquire(g_Spinlock);
    PSUPDRVSESSION  pSession = g_apSessionHashTab[iHash];
    if (pSession)
    {
        if (pSession->Process == Process)
        {
            g_apSessionHashTab[iHash] = pSession->pNextHash;
            pSession->pNextHash = NULL;
            ASMAtomicDecS32(&g_cSessions);
        }
        else
        {
            PSUPDRVSESSION pPrev = pSession;
            pSession = pSession->pNextHash;
            while (pSession)
            {
                if (pSession->Process == Process)
                {
                    pPrev->pNextHash = pSession->pNextHash;
                    pSession->pNextHash = NULL;
                    ASMAtomicDecS32(&g_cSessions);
                    break;
                }

                /* next */
                pPrev = pSession;
                pSession = pSession->pNextHash;
            }
        }
    }
    RTSpinlockReleaseNoInts(g_Spinlock);
    if (!pSession)
    {
        Log(("SupDrvClient::sessionClose: pSession == NULL, pid=%d; freed already?\n", (int)Process));
        return;
    }

    /*
     * Remove it from the client object.
     */
    org_virtualbox_SupDrvClient *pThis = (org_virtualbox_SupDrvClient *)pSession->pvSupDrvClient;
    pSession->pvSupDrvClient = NULL;
    if (pThis)
    {
        Assert(pThis->m_pSession == pSession);
        pThis->m_pSession = NULL;
    }

    /*
     * Close the session.
     */
    supdrvCloseSession(&g_DevExt, pSession);
}
예제 #18
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume)
{
    PCRTLOCKVALSRCPOS pSrcPos = NULL;

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Quickly check whether it's signaled.
     */
    /** @todo this isn't fair if someone is already waiting on it.  They should
     *        have the first go at it!
     *  (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */
    if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
        return VINF_SUCCESS;

    /*
     * Convert the timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        if (!cMillies)
            return VERR_TIMEOUT;
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    ASMAtomicIncS32(&pThis->cWaiters);

    /*
     * The wait loop.
     */
#ifdef RTSEMEVENT_STRICT
    RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                         ? RTThreadSelfAutoAdopt()
                         : RTThreadSelf();
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    int rc = VINF_SUCCESS;
    for (;;)
    {
#ifdef RTSEMEVENT_STRICT
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                       cMillies, RTTHREADSTATE_EVENT, true);
            if (RT_FAILURE(rc))
                break;
        }
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
        long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
        if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK))
        {
            /* successful wakeup or fSignalled > 0 in the meantime */
            if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
                break;
        }
        else if (lrc == -ETIMEDOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
        else if (lrc == -EINTR)
        {
            if (!fAutoResume)
            {
                rc = VERR_INTERRUPTED;
                break;
            }
        }
        else
        {
            /* this shouldn't happen! */
            AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno));
            rc = RTErrConvertFromErrno(lrc);
            break;
        }
        /* adjust the relative timeout */
        if (pTimeout)
        {
            int64_t i64Diff = u64End - RTTimeSystemNanoTS();
            if (i64Diff < 1000)
            {
                rc = VERR_TIMEOUT;
                break;
            }
            ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
            ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
        }
    }

    ASMAtomicDecS32(&pThis->cWaiters);
    return rc;
}
예제 #20
0
void BusAssignmentManager::Release()
{
    if (ASMAtomicDecS32(&pState->cRefCnt) == 0)
        delete this;
}
예제 #21
0
/**
 * Notify the XPCOM thread that we consumed an XPCOM event.
 */
void consumedXPCOMUserEvent(void)
{
    ASMAtomicDecS32(&g_s32XPCOMEventsPending);
}
예제 #22
0
 void release()
 {
     if (ASMAtomicDecS32(&mRefCnt) <= 0) delete this;
 }
예제 #23
0
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertPtr(pCritSect);
    AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * How is calling and is the order right?
     */
    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
#ifdef RTCRITSECT_STRICT
    RTTHREAD        hThreadSelf = pCritSect->pValidatorRec
                                ? RTThreadSelfAutoAdopt()
                                : RTThreadSelf();
    int             rc9;
    if (pCritSect->pValidatorRec) /* (bootstap) */
    {
         rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
         if (RT_FAILURE(rc9))
             return rc9;
    }
#endif

    /*
     * Increment the waiter counter.
     * This becomes 0 when the section is free.
     */
    if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    {
        /*
         * Nested?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                {
                    ASMAtomicDecS32(&pCritSect->cLockers);
                    return rc9;
                }
#endif
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }

            AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
            ASMAtomicDecS32(&pCritSect->cLockers);
            return VERR_SEM_NESTED;
        }

        /*
         * Wait for the current owner to release it.
         */
#ifndef RTCRITSECT_STRICT
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        for (;;)
        {
#ifdef RTCRITSECT_STRICT
            rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
            if (RT_FAILURE(rc9))
            {
                ASMAtomicDecS32(&pCritSect->cLockers);
                return rc9;
            }
#else
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
#endif
            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);

            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
                return VERR_SEM_DESTROYED;
            if (rc == VINF_SUCCESS)
                break;
            AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
        }
        AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
/**
 * Releases a handle reference.
 * @param   pHandle             The handle to release.
 */
DECLINLINE(void) MsiHackHandleRelease(PMSIHACKHANDLE pHandle)
{
    if (ASMAtomicDecS32(&pHandle->cRefs) != 0)
        return;
    MsiHackHandleDestroy(pHandle);
}
/**
 * The timer callback for an omni-timer.
 *
 * This is responsible for queueing the DPCs for the other CPUs and
 * perform the callback on the CPU on which it is called.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before scheduling the other DPCs
     * and doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        RTCPUSET    OnlineSet;
        RTMpGetOnlineSet(&OnlineSet);

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());

        if (pTimer->u64NanoInterval)
        {
            /*
             * Recurring timer.
             */
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);

            uint64_t iTick = ++pSubTimer->iTick;
            rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
            pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
        }
        else
        {
            /*
             * Single shot timers gets complicated wrt to fSuspended maintance.
             */
            uint32_t cCpus = 0;
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
                    cCpus++;
            ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);

            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
                        ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */

            if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
                ASMAtomicWriteBool(&pTimer->fSuspended, true);

            pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
        }

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
/**
 * Internal worker for the RTMpOn* APIs.
 *
 * @returns IPRT status code.
 * @param   pfnWorker   The callback.
 * @param   pvUser1     User argument 1.
 * @param   pvUser2     User argument 2.
 * @param   enmCpuid    What to do / is idCpu valid.
 * @param   idCpu       Used if enmCpuid is RT_NT_CPUID_SPECIFIC or
 *                      RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   idCpu2      Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored.
 * @param   pcHits      Where to return the number of this. Optional.
 */
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2,
                             RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits)
{
    PRTMPARGS pArgs;
    KDPC     *paExecCpuDpcs;

#if 0
    /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
     * driver verifier doesn't complain...
     */
    AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif

#ifdef IPRT_TARGET_NT4
    KAFFINITY Mask;
    /* g_pfnrtNt* are not present on NT anyway. */
    return VERR_NOT_SUPPORTED;
#else
    KAFFINITY Mask = KeQueryActiveProcessors();
#endif

    /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
    if (!g_pfnrtNtKeFlushQueuedDpcs)
        return VERR_NOT_SUPPORTED;

    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;

    pArgs->pfnWorker = pfnWorker;
    pArgs->pvUser1   = pvUser1;
    pArgs->pvUser2   = pvUser2;
    pArgs->idCpu     = NIL_RTCPUID;
    pArgs->idCpu2    = NIL_RTCPUID;
    pArgs->cHits     = 0;
    pArgs->cRefs     = 1;

    paExecCpuDpcs = (KDPC *)(pArgs + 1);

    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;
    }
    else if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
        pArgs->idCpu = idCpu;

        KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2);
        pArgs->idCpu2 = idCpu2;
    }
    else
    {
        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
        }
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    /*
     * We cannot do other than assume a 1:1 relationship between the
     * affinity mask and the process despite the warnings in the docs.
     * If someone knows a better way to get this done, please let bird know.
     */
    ASMCompilerBarrier(); /* paranoia */
    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);
    }
    else if (enmCpuid == RT_NT_CPUID_PAIR)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);

        ASMAtomicIncS32(&pArgs->cRefs);
        ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0);
        Assert(ret);
    }
    else
    {
        unsigned iSelf = KeGetCurrentProcessorNumber();

        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            if (    (i != iSelf)
                &&  (Mask & RT_BIT_64(i)))
            {
                ASMAtomicIncS32(&pArgs->cRefs);
                BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
                Assert(ret);
            }
        }
        if (enmCpuid != RT_NT_CPUID_OTHERS)
            pfnWorker(iSelf, pvUser1, pvUser2);
    }

    KeLowerIrql(oldIrql);

    /* Flush all DPCs and wait for completion. (can take long!) */
    /** @todo Consider changing this to an active wait using some atomic inc/dec
     *  stuff (and check for the current cpu above in the specific case). */
    /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
     *        executed. Seen pArgs being freed while some CPU was using it before
     *        cRefs was added. */
    g_pfnrtNtKeFlushQueuedDpcs();

    if (pcHits)
        *pcHits = pArgs->cHits;

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);

    return VINF_SUCCESS;
}
/**
 * Internal worker processing events and inserting new requests into the waiting list.
 */
static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
{
    int rc = VINF_SUCCESS;

    /* Process new requests first. */
    bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
    if (fWokenUp)
    {
        for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
        {
            PRTFILEAIOREQINTERNAL pReqHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[iSlot], NULL, PRTFILEAIOREQINTERNAL);

            while (  (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax)
                   && pReqHead)
            {
                RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED);
                pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
                pReqHead->iWaitingList = pCtxInt->iFirstFree;
                pReqHead = pReqHead->pNext;

                /* Clear pointer to next and previous element just for safety. */
                pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
                pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL;
                pCtxInt->iFirstFree++;

                Assert(   (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests)
                       && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax));
            }

            /* Append the rest to the wait list. */
            if (pReqHead)
            {
                RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED);
                if (!pCtxInt->pReqsWaitHead)
                {
                    Assert(!pCtxInt->pReqsWaitTail);
                    pCtxInt->pReqsWaitHead = pReqHead;
                    pReqHead->pPrev = NULL;
                }
                else
                {
                    AssertPtr(pCtxInt->pReqsWaitTail);

                    pCtxInt->pReqsWaitTail->pNext = pReqHead;
                    pReqHead->pPrev = pCtxInt->pReqsWaitTail;
                }

                /* Update tail. */
                while (pReqHead->pNext)
                {
                    RTFIELAIOREQ_ASSERT_STATE(pReqHead->pNext, SUBMITTED);
                    pReqHead = pReqHead->pNext;
                }

                pCtxInt->pReqsWaitTail = pReqHead;
                pCtxInt->pReqsWaitTail->pNext = NULL;
            }
        }

        /* Check if a request needs to be canceled. */
        PRTFILEAIOREQINTERNAL pReqToCancel = ASMAtomicReadPtrT(&pCtxInt->pReqToCancel, PRTFILEAIOREQINTERNAL);
        if (pReqToCancel)
        {
            /* The request can be in the array waiting for completion or still in the list because it is full. */
            if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID)
            {
                /* Put it out of the waiting list. */
                pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
                pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
            }
            else
            {
                /* Unlink from the waiting list. */
                PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev;
                PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext;

                if (pNext)
                    pNext->pPrev = pPrev;
                else
                {
                    /* We canceled the tail. */
                    pCtxInt->pReqsWaitTail = pPrev;
                }

                if (pPrev)
                    pPrev->pNext = pNext;
                else
                {
                    /* We canceled the head. */
                    pCtxInt->pReqsWaitHead = pNext;
                }
            }

            ASMAtomicDecS32(&pCtxInt->cRequests);
            AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n"));
            RTSemEventSignal(pCtxInt->SemEventCancel);
        }
    }
    else
    {
        if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
            rc = VERR_INTERRUPTED;
    }

    return rc;
}