コード例 #1
0
DECLHIDDEN(void) rtR0PowerNotificationTerm(void)
{
    PRTPOWERNOTIFYREG   pHead;
    RTSPINLOCKTMP       Tmp       = RTSPINLOCKTMP_INITIALIZER;
    RTSPINLOCK          hSpinlock = g_hRTPowerNotifySpinLock;
    AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK);

    /** @todo OS specific term here */

    /* pick up the list and the spinlock. */
    RTSpinlockAcquire(hSpinlock, &Tmp);
    ASMAtomicWriteHandle(&g_hRTPowerNotifySpinLock, NIL_RTSPINLOCK);
    pHead = g_pRTPowerCallbackHead;
    g_pRTPowerCallbackHead = NULL;
    ASMAtomicIncU32(&g_iRTPowerGeneration);
    RTSpinlockRelease(hSpinlock, &Tmp);

    /* free the list. */
    while (pHead)
    {
        PRTPOWERNOTIFYREG pFree = pHead;
        pHead = pHead->pNext;

        pFree->pNext = NULL;
        pFree->pfnCallback = NULL;
        RTMemFree(pFree);
    }

    RTSpinlockDestroy(hSpinlock);
}
コード例 #2
0
DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect);
    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * Try take the lock. (cLockers is -1 if it's free)
     */
    RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
    if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
    {
        /*
         * Somebody is owning it (or will be soon). Perhaps it's us?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                    return rc9;
#endif
                ASMAtomicIncS32(&pCritSect->cLockers);
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }
            AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
            return VERR_SEM_NESTED;
        }
        return VERR_SEM_BUSY;
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check ownership and recursions.
     */
    RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
    RTNATIVETHREAD hNativeOwner;
    ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner);
    if (RT_UNLIKELY(hNativeOwner != hNativeSelf))
    {
        AssertMsgFailed(("Not owner of mutex %p!! hNativeSelf=%RTntrd Owner=%RTntrd cRecursions=%d\n",
                         pThis, hNativeSelf, hNativeOwner, pThis->cRecursions));
        return VERR_NOT_OWNER;
    }
    if (pThis->cRecursions > 1)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorRec);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicDecU32(&pThis->cRecursions);
        return VINF_SUCCESS;
    }

    /*
     * Unlock mutex semaphore.
     */
#ifdef RTSEMMUTEX_STRICT
    int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, false);
    if (RT_FAILURE(rc9))
        return rc9;
#endif
    ASMAtomicWriteU32(&pThis->cRecursions, 0);
    ASMAtomicWriteHandle(&pThis->hNativeOwner, NIL_RTNATIVETHREAD);

    if (ReleaseMutex(pThis->hMtx))
        return VINF_SUCCESS;

    int rc = RTErrConvertFromWin32(GetLastError());
    AssertMsgFailed(("%p/%p, rc=%Rrc lasterr=%d\n", pThis, pThis->hMtx, rc, GetLastError()));
    return rc;
}
コード例 #4
0
/**
 * Tail code called when we've won the battle for the lock.
 *
 * @returns VINF_SUCCESS.
 *
 * @param   pCritSect       The critical section.
 * @param   hNativeSelf     The native handle of this thread.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
    Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));

    ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
    Assert(pCritSect->s.Core.cNestings == 1);
    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);

# ifdef PDMCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
# endif

    STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
    return VINF_SUCCESS;
}
コード例 #5
0
RTDECL(int) RTCritSectLeave(PRTCRITSECT pCritSect)
{
    /*
     * Assert sanity and check for NOP.
     */
    Assert(pCritSect);
    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * Assert ownership and so on.
     */
    Assert(pCritSect->cNestings > 0);
    Assert(pCritSect->cLockers >= 0);
    Assert(pCritSect->NativeThreadOwner == RTThreadNativeSelf());

#ifdef RTCRITSECT_STRICT
    int rc9 = RTLockValidatorRecExclReleaseOwner(pCritSect->pValidatorRec, pCritSect->cNestings == 1);
    if (RT_FAILURE(rc9))
        return rc9;
#endif

    /*
     * Decrement nestings, if <= 0 when we'll release the critsec.
     */
    pCritSect->cNestings--;
    if (pCritSect->cNestings > 0)
        ASMAtomicDecS32(&pCritSect->cLockers);
    else
    {
        /*
         * Set owner to zero.
         * Decrement waiters, if >= 0 then we have to wake one of them up.
         */
        ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NIL_RTNATIVETHREAD);
        if (ASMAtomicDecS32(&pCritSect->cLockers) >= 0)
        {
            int rc = RTSemEventSignal(pCritSect->EventSem);
            AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
        }
    }
    return VINF_SUCCESS;
}
/**
 * Internal worker for RTSemMutexRequestNoResume and it's debug companion.
 *
 * @returns Same as RTSEmMutexRequestNoResume
 * @param   hMutexSem            The mutex handle.
 * @param   cMillies            The number of milliseconds to wait.
 * @param   pSrcPos             The source position of the caller.
 */
DECL_FORCE_INLINE(int) rtSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check for recursive entry.
     */
    RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
    RTNATIVETHREAD hNativeOwner;
    ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner);
    if (hNativeOwner == hNativeSelf)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cRecursions);
        return VINF_SUCCESS;
    }

    /*
     * Lock mutex semaphore.
     */
    RTTHREAD        hThreadSelf = NIL_RTTHREAD;
    if (cMillies > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_MUTEX, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
    }
    DWORD rc = WaitForSingleObjectEx(pThis->hMtx,
                                     cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies,
                                     TRUE /*fAlertable*/);
    RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
    switch (rc)
    {
        case WAIT_OBJECT_0:
#ifdef RTSEMMUTEX_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
            ASMAtomicWriteHandle(&pThis->hNativeOwner, hNativeSelf);
            ASMAtomicWriteU32(&pThis->cRecursions, 1);
            return VINF_SUCCESS;

        case WAIT_TIMEOUT:          return VERR_TIMEOUT;
        case WAIT_IO_COMPLETION:    return VERR_INTERRUPTED;
        case WAIT_ABANDONED:        return VERR_SEM_OWNER_DIED;
        default:
            AssertMsgFailed(("%u\n",  rc));
        case WAIT_FAILED:
        {
            int rc2 = RTErrConvertFromWin32(GetLastError());
            AssertMsgFailed(("Wait on hMutexSem %p failed, rc=%d lasterr=%d\n", hMutexSem, rc, GetLastError()));
            if (rc2 != VINF_SUCCESS)
                return rc2;

            AssertMsgFailed(("WaitForSingleObject(event) -> rc=%d while converted lasterr=%d\n", rc, rc2));
            return VERR_INTERNAL_ERROR;
        }
    }
}
コード例 #7
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
コード例 #8
0
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertPtr(pCritSect);
    AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * How is calling and is the order right?
     */
    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
#ifdef RTCRITSECT_STRICT
    RTTHREAD        hThreadSelf = pCritSect->pValidatorRec
                                ? RTThreadSelfAutoAdopt()
                                : RTThreadSelf();
    int             rc9;
    if (pCritSect->pValidatorRec) /* (bootstap) */
    {
         rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
         if (RT_FAILURE(rc9))
             return rc9;
    }
#endif

    /*
     * Increment the waiter counter.
     * This becomes 0 when the section is free.
     */
    if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    {
        /*
         * Nested?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                {
                    ASMAtomicDecS32(&pCritSect->cLockers);
                    return rc9;
                }
#endif
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }

            AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
            ASMAtomicDecS32(&pCritSect->cLockers);
            return VERR_SEM_NESTED;
        }

        /*
         * Wait for the current owner to release it.
         */
#ifndef RTCRITSECT_STRICT
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        for (;;)
        {
#ifdef RTCRITSECT_STRICT
            rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
            if (RT_FAILURE(rc9))
            {
                ASMAtomicDecS32(&pCritSect->cLockers);
                return rc9;
            }
#else
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
#endif
            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);

            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
                return VERR_SEM_DESTROYED;
            if (rc == VINF_SUCCESS)
                break;
            AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
        }
        AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
コード例 #9
0
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;
    PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
    struct timespec Timeout;
    struct timespec *pTimeout = NULL;
    uint64_t         StartNanoTS = 0;

    LogFlowFunc(("hAioCtx=%#p cMinReqs=%zu cMillies=%u pahReqs=%#p cReqs=%zu pcbReqs=%#p\n",
                 hAioCtx, cMinReqs, cMillies, pahReqs, cReqs, pcReqs));

    /* Check parameters. */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    rtFileAioCtxDump(pCtxInt);

    int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);

    if (   RT_UNLIKELY(cRequestsWaiting <= 0)
        && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
        return VERR_INVALID_PARAMETER;

    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = (cMillies % 1000) * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    /* Update the waiting list once before we enter the loop. */
    rc = rtFileAioCtxProcessEvents(pCtxInt);

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
#ifdef RT_STRICT
        if (RT_UNLIKELY(!pCtxInt->iFirstFree))
        {
            for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
                RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);

            AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
                            pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
        }
#endif

        LogFlow(("Waiting for %d requests to complete\n", pCtxInt->iFirstFree));
        rtFileAioCtxDump(pCtxInt);

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
                                  pCtxInt->iFirstFree, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
        if (rcPosix < 0)
        {
            LogFlow(("aio_suspend failed %d nent=%u\n", errno, pCtxInt->iFirstFree));
            /* Check that this is an external wakeup event. */
            if (errno == EINTR)
                rc = rtFileAioCtxProcessEvents(pCtxInt);
            else
                rc = RTErrConvertFromErrno(errno);
        }
        else
        {
            /* Requests finished. */
            unsigned iReqCurr = 0;
            unsigned cDone = 0;

            /* Remove completed requests from the waiting list. */
            while (   (iReqCurr < pCtxInt->iFirstFree)
                   && (cDone < cReqs))
            {
                PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
                int rcReq = aio_error(&pReq->AioCB);

                if (rcReq != EINPROGRESS)
                {
                    /* Completed store the return code. */
                    if (rcReq == 0)
                    {
                        pReq->Rc = VINF_SUCCESS;
                        /* Call aio_return() to free resources. */
                        pReq->cbTransfered = aio_return(&pReq->AioCB);
                    }
                    else
                    {
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        pReq->Rc = RTErrConvertFromErrno(errno);
#else
                        pReq->Rc = RTErrConvertFromErrno(rcReq);
#endif
                    }

                    /* Mark the request as finished. */
                    RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
                    cDone++;

                    /* If there are other entries waiting put the head into the now free entry. */
                    if (pCtxInt->pReqsWaitHead)
                    {
                        PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;

                        pCtxInt->pReqsWaitHead = pReqInsert->pNext;
                        if (!pCtxInt->pReqsWaitHead)
                        {
                            /* List is empty now. Clear tail too. */
                            pCtxInt->pReqsWaitTail = NULL;
                        }

                        pReqInsert->iWaitingList = pReq->iWaitingList;
                        pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
                        iReqCurr++;
                    }
                    else
                    {
                        /*
                         * Move the last entry into the current position to avoid holes
                         * but only if it is not the last element already.
                         */
                        if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
                        {
                            pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
                            pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
                        }
                        else
                            pCtxInt->iFirstFree--;

                        pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
                    }

                    /* Put the request into the completed list. */
                    pahReqs[cRequestsCompleted++] = pReq;
                    pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
                }
                else
                    iReqCurr++;
            }

            AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
                                         cReqs, cDone));
            cReqs    -= cDone;
            cMinReqs  = RT_MAX(cMinReqs, cDone) - cDone;
            ASMAtomicSubS32(&pCtxInt->cRequests, cDone);

            AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));

            if (!cMinReqs)
                break;

            if (cMillies != RT_INDEFINITE_WAIT)
            {
                uint64_t TimeDiff;

                /* Recalculate the timeout. */
                TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
                Timeout.tv_sec  = Timeout.tv_sec  - (TimeDiff / 1000000);
                Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
            }

            /* Check for new elements. */
            rc = rtFileAioCtxProcessEvents(pCtxInt);
        }
    }

    *pcReqs = cRequestsCompleted;
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
コード例 #10
0
ファイル: fileaio-freebsd.cpp プロジェクト: ryenus/vbox
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;

    /*
     * Validate the parameters, making sure to always set pcReqs.
     */
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    *pcReqs = 0; /* always set */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0))
        return VERR_FILE_AIO_NO_REQUEST;

    /*
     * Convert the timeout if specified.
     */
    struct timespec    *pTimeout = NULL;
    struct timespec     Timeout = {0,0};
    uint64_t            StartNanoTS = 0;
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = cMillies % 1000 * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
        struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
        int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
        int rcBSD;
        uint64_t StartTime;

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);

        if (RT_UNLIKELY(rcBSD < 0))
        {
            rc = RTErrConvertFromErrno(errno);
            break;
        }

        uint32_t const cDone = rcBSD;

        /* Process received events. */
        for (uint32_t i = 0; i < cDone; i++)
        {
            PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
            AssertPtr(pReqInt);
            Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);

            /*
             * Retrieve the status code here already because the
             * user may omit the RTFileAioReqGetRC() call and
             * we will leak kernel resources then.
             * This will result in errors during submission
             * of other requests as soon as the max_aio_queue_per_proc
             * limit is reached.
             */
            int cbTransfered = aio_return(&pReqInt->AioCB);

            if (cbTransfered < 0)
            {
                pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
                pReqInt->cbTransfered = 0;
            }
            else
            {
                pReqInt->Rc = VINF_SUCCESS;
                pReqInt->cbTransfered = cbTransfered;
            }
            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
            pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
        }

        /*
         * Done Yet? If not advance and try again.
         */
        if (cDone >= cMinReqs)
            break;
        cMinReqs -= cDone;
        cReqs    -= cDone;

        if (cMillies != RT_INDEFINITE_WAIT)
        {
            /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
            uint64_t NanoTS = RTTimeNanoTS();
            uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
            if (cMilliesElapsed >= cMillies)
            {
                rc = VERR_TIMEOUT;
                break;
            }

            /* The syscall supposedly updates it, but we're paranoid. :-) */
            Timeout.tv_sec  = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
            Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
        }
    }

    /*
     * Update the context state and set the return value.
     */
    *pcReqs = cRequestsCompleted;
    ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    /*
     * Clear the wakeup flag and set rc.
     */
    if (    pCtxInt->fWokenUp
        &&  RT_SUCCESS(rc))
    {
        ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
        rc = VERR_INTERRUPTED;
    }

    return rc;
}
コード例 #11
0
/**
 * The timer callback for an omni-timer.
 *
 * This is responsible for queueing the DPCs for the other CPUs and
 * perform the callback on the CPU on which it is called.
 *
 * @param   pDpc                The DPC object.
 * @param   pvUser              Pointer to the sub-timer.
 * @param   SystemArgument1     Some system stuff.
 * @param   SystemArgument2     Some system stuff.
 */
static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
{
    PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
    PRTTIMER pTimer = pSubTimer->pParent;
    int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());

    AssertPtr(pTimer);
#ifdef RT_STRICT
    if (KeGetCurrentIrql() < DISPATCH_LEVEL)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
    if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
        RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
#endif

    /*
     * Check that we haven't been suspended before scheduling the other DPCs
     * and doing the callout.
     */
    if (    !ASMAtomicUoReadBool(&pTimer->fSuspended)
        &&  pTimer->u32Magic == RTTIMER_MAGIC)
    {
        RTCPUSET    OnlineSet;
        RTMpGetOnlineSet(&OnlineSet);

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());

        if (pTimer->u64NanoInterval)
        {
            /*
             * Recurring timer.
             */
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);

            uint64_t iTick = ++pSubTimer->iTick;
            rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
            pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
        }
        else
        {
            /*
             * Single shot timers gets complicated wrt to fSuspended maintance.
             */
            uint32_t cCpus = 0;
            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
                    cCpus++;
            ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);

            for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
                if (    RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
                    &&  iCpuSelf != iCpu)
                    if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
                        ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */

            if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
                ASMAtomicWriteBool(&pTimer->fSuspended, true);

            pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
        }

        ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
    }

    NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
}
コード例 #12
0
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    /*
     * Validate the parameters, making sure to always set pcReqs.
     */
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    *pcReqs = 0; /* always set */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    /*
     * Can't wait if there are not requests around.
     */
    if (   RT_UNLIKELY(ASMAtomicUoReadS32(&pCtxInt->cRequests) == 0)
            && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    /*
     * Convert the timeout if specified.
     */
    struct timespec    *pTimeout = NULL;
    struct timespec     Timeout = {0,0};
    uint64_t            StartNanoTS = 0;
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = cMillies % 1000 * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    /*
     * Loop until we're woken up, hit an error (incl timeout), or
     * have collected the desired number of requests.
     */
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;
    while (!pCtxInt->fWokenUp)
    {
        LNXKAIOIOEVENT  aPortEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
        int             cRequestsToWait = RT_MIN(cReqs, AIO_MAXIMUM_REQUESTS_PER_CONTEXT);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        rc = rtFileAsyncIoLinuxGetEvents(pCtxInt->AioContext, cMinReqs, cRequestsToWait, &aPortEvents[0], pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
        if (RT_FAILURE(rc))
            break;
        uint32_t const cDone = rc;
        rc = VINF_SUCCESS;

        /*
         * Process received events / requests.
         */
        for (uint32_t i = 0; i < cDone; i++)
        {
            /*
             * The iocb is the first element in our request structure.
             * So we can safely cast it directly to the handle (see above)
             */
            PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aPortEvents[i].pIoCB;
            AssertPtr(pReqInt);
            Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);

            /** @todo aeichner: The rc field contains the result code
             *  like you can find in errno for the normal read/write ops.
             *  But there is a second field called rc2. I don't know the
             *  purpose for it yet.
             */
            if (RT_UNLIKELY(aPortEvents[i].rc < 0))
                pReqInt->Rc = RTErrConvertFromErrno(-aPortEvents[i].rc); /* Convert to positive value. */
            else
            {
                pReqInt->Rc = VINF_SUCCESS;
                pReqInt->cbTransfered = aPortEvents[i].rc;
            }

            /* Mark the request as finished. */
            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);

            pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
        }

        /*
         * Done Yet? If not advance and try again.
         */
        if (cDone >= cMinReqs)
            break;
        cMinReqs -= cDone;
        cReqs    -= cDone;

        if (cMillies != RT_INDEFINITE_WAIT)
        {
            /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
            uint64_t NanoTS = RTTimeNanoTS();
            uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
            if (cMilliesElapsed >= cMillies)
            {
                rc = VERR_TIMEOUT;
                break;
            }

            /* The syscall supposedly updates it, but we're paranoid. :-) */
            Timeout.tv_sec  = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
            Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
        }
    }

    /*
     * Update the context state and set the return value.
     */
    *pcReqs = cRequestsCompleted;
    ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    /*
     * Clear the wakeup flag and set rc.
     */
    if (    pCtxInt->fWokenUp
            &&  RT_SUCCESS(rc))
    {
        ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
        rc = VERR_INTERRUPTED;
    }

    return rc;
}
コード例 #13
0
ファイル: initterm.cpp プロジェクト: quiquetux/jokte-ba-as
HRESULT Shutdown()
{
    HRESULT rc = S_OK;

#if !defined(VBOX_WITH_XPCOM)

    /* EventQueue::uninit reference counting fun. */
    RTTHREAD hSelf = RTThreadSelf();
    if (    hSelf == gCOMMainThread
        &&  hSelf != NIL_RTTHREAD)
    {
        if (-- gCOMMainInitCount == 0)
        {
            EventQueue::uninit();
            ASMAtomicWriteHandle(&gCOMMainThread, NIL_RTTHREAD);
        }
    }

    CoUninitialize();

#else /* !defined (VBOX_WITH_XPCOM) */

    nsCOMPtr<nsIEventQueue> eventQ;
    rc = NS_GetMainEventQ(getter_AddRefs(eventQ));

    if (NS_SUCCEEDED(rc) || rc == NS_ERROR_NOT_AVAILABLE)
    {
        /* NS_ERROR_NOT_AVAILABLE seems to mean that
         * nsIEventQueue::StopAcceptingEvents() has been called (see
         * nsEventQueueService.cpp). We hope that this error code always means
         * just that in this case and assume that we're on the main thread
         * (it's a kind of unexpected behavior if a non-main thread ever calls
         * StopAcceptingEvents() on the main event queue). */

        PRBool isOnMainThread = PR_FALSE;
        if (NS_SUCCEEDED(rc))
        {
            rc = eventQ->IsOnCurrentThread(&isOnMainThread);
            eventQ = nsnull; /* early release before shutdown */
        }
        else
        {
            isOnMainThread = PR_TRUE;
            rc = NS_OK;
        }

        if (NS_SUCCEEDED(rc) && isOnMainThread)
        {
            /* only the main thread needs to uninitialize XPCOM and only if
             * init counter drops to zero */
            if (--gXPCOMInitCount == 0)
            {
                EventQueue::uninit();
                rc = NS_ShutdownXPCOM(nsnull);

                /* This is a thread initialized XPCOM and set gIsXPCOMInitialized to
                 * true. Reset it back to false. */
                bool wasInited = ASMAtomicXchgBool(&gIsXPCOMInitialized, false);
                Assert(wasInited == true);
                NOREF(wasInited);

# if defined (XPCOM_GLUE)
                XPCOMGlueShutdown();
# endif
            }
        }
    }

#endif /* !defined(VBOX_WITH_XPCOM) */

    AssertComRC(rc);

    return rc;
}