/**
 * Locks the stuff for updating.
 *
 * Mostly for check that the caller is doing his job.
 */
DECLINLINE(void) rtstrFormatTypeWriteLock(void)
{
#if defined(RTSTRFORMATTYPE_WITH_LOCKING)
    if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&g_i32Spinlock, -RTSTRFORMATTYPE_LOCK_OFFSET, 0)))
    {
        unsigned volatile i;

        AssertFailed();
        for (i = 0;; i++)
            if (    !g_i32Spinlock
                &&  ASMAtomicCmpXchgS32(&g_i32Spinlock, -RTSTRFORMATTYPE_LOCK_OFFSET, 0))
                break;
    }
#endif
}
Ejemplo n.º 2
0
DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect);
    Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC);
    /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * Try take the lock. (cLockers is -1 if it's free)
     */
    RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf();
    if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1))
    {
        /*
         * Somebody is owning it (or will be soon). Perhaps it's us?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                    return rc9;
#endif
                ASMAtomicIncS32(&pCritSect->cLockers);
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }
            AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect));
            return VERR_SEM_NESTED;
        }
        return VERR_SEM_BUSY;
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
Ejemplo n.º 3
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_BUSY if the critsect was owned.
 * @retval  VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
 * @retval  VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
 *
 * @param   pCritSect   The critical section.
 */
static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /* no spinning */

    /*
     * Return busy.
     */
#ifdef IN_RING3
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
#else
    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
#endif
    LogFlow(("PDMCritSectTryEnter: locked\n"));
    return VERR_SEM_BUSY;
}
RTDECL(int)  RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem)
{
    /*
     * Validate input.
     */
    struct RTSEMEVENTMULTIINTERNAL *pThis = hEventMultiSem;
    AssertReturn(VALID_PTR(pThis) && pThis->u32Magic == RTSEMEVENTMULTI_MAGIC,
                 VERR_INVALID_HANDLE);
#ifdef RT_STRICT
    int32_t i = pThis->iState;
    Assert(i == 0 || i == -1 || i == 1);
#endif

    /*
     * Reset it.
     */
    ASMAtomicCmpXchgS32(&pThis->iState, 0, -1);
    return VINF_SUCCESS;
}
Ejemplo n.º 5
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
            &&  pThis->cNestings > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNestings);
        return VINF_SUCCESS;
    }

#ifdef RTSEMMUTEX_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies)
    {
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif

    /*
     * Convert timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    /*
     * Lock the mutex.
     * Optimize for the uncontended case (makes 1-2 ns difference).
     */
    if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)))
    {
        for (;;)
        {
            int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2);

            /*
             * Was the lock released in the meantime? This is unlikely (but possible)
             */
            if (RT_UNLIKELY(iOld == 0))
                break;

            /*
             * Go to sleep.
             */
            if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec ))
            {
#ifdef RTSEMMUTEX_STRICT
                int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                          cMillies, RTTHREADSTATE_MUTEX, true);
                if (RT_FAILURE(rc9))
                    return rc9;
#else
                RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
            }

            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0);

            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC))
                return VERR_SEM_DESTROYED;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == 0)
                /* we'll leave the loop now unless another thread is faster */;
            else if (rc == -EWOULDBLOCK)
                /* retry with new value. */;
            else if (rc == -EINTR)
            {
                if (!fAutoResume)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }

            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64End - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                {
                    rc = VERR_TIMEOUT;
                    break;
                }
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
        }

        /*
         * When leaving this loop, iState is set to 2. This means that we gained the
         * lock and there are _possibly_ some waiters. We don't know exactly as another
         * thread might entered this loop at nearly the same time. Therefore we will
         * call futex_wakeup once too often (if _no_ other thread entered this loop).
         * The key problem is the simple futex_wait test for x != y (iState != 2) in
         * our case).
         */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNestings, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
Ejemplo n.º 6
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
Ejemplo n.º 7
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}
DECLINLINE(int) rtSemEventLnxMultiWait(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout,
                                       PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE);
    AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);

    /*
     * Quickly check whether it's signaled.
     */
    int32_t iCur = ASMAtomicUoReadS32(&pThis->iState);
    Assert(iCur == 0 || iCur == -1 || iCur == 1);
    if (iCur == -1)
        return VINF_SUCCESS;

    /*
     * Check and convert the timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64Deadline = 0; /* shut up gcc */
    if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
    {
        /* If the timeout is zero, then we're done. */
        if (!uTimeout)
            return VERR_TIMEOUT;

        /* Convert it to a deadline + interval timespec. */
        if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
            uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
                     ? uTimeout * UINT32_C(1000000)
                     : UINT64_MAX;
        if (uTimeout != UINT64_MAX) /* unofficial way of indicating an indefinite wait */
        {
            if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
                u64Deadline = RTTimeSystemNanoTS() + uTimeout;
            else
            {
                uint64_t u64Now = RTTimeSystemNanoTS();
                if (uTimeout <= u64Now)
                    return VERR_TIMEOUT;
                u64Deadline = uTimeout;
                uTimeout   -= u64Now;
            }
            if (   sizeof(ts.tv_sec) >= sizeof(uint64_t)
                || uTimeout <= UINT64_C(1000000000) * UINT32_MAX)
            {
                ts.tv_nsec = uTimeout % UINT32_C(1000000000);
                ts.tv_sec  = uTimeout / UINT32_C(1000000000);
                pTimeout = &ts;
            }
        }
    }

    /*
     * The wait loop.
     */
#ifdef RTSEMEVENTMULTI_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (unsigned i = 0;; i++)
    {
        /*
         * Start waiting. We only account for there being or having been
         * threads waiting on the semaphore to keep things simple.
         */
        iCur = ASMAtomicUoReadS32(&pThis->iState);
        Assert(iCur == 0 || iCur == -1 || iCur == 1);
        if (    iCur == 1
            ||  ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))
        {
            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64Deadline - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                    return VERR_TIMEOUT;
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
#ifdef RTSEMEVENTMULTI_STRICT
            if (pThis->fEverHadSignallers)
            {
                int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                                uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true);
                if (RT_FAILURE(rc9))
                    return rc9;
            }
#endif
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true);
            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 1, pTimeout, NULL, 0);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
                return VERR_SEM_DESTROYED;
            if (rc == 0)
                return VINF_SUCCESS;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
/** @todo something is broken here. shows up every now and again in the ata
 *        code. Should try to run the timeout against RTTimeMilliTS to
 *        check that it's doing the right thing... */
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == -EWOULDBLOCK)
                /* retry, the value changed. */;
            else if (rc == -EINTR)
            {
                if (fFlags & RTSEMWAIT_FLAGS_NORESUME)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }
        }
        else if (iCur == -1)
            return VINF_SUCCESS;
    }
}
Ejemplo n.º 9
0
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc)
{
    PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser;

    LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc));

    if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
        pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true);
    else
    {
        Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0);
        uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg);

        /* The first error will be returned. */
        if (RT_FAILURE(rc))
            ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
#ifdef VBOX_WITH_DEBUGGER
        else
        {
            PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;

            /* Overwrite with injected error code. */
            if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
                rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS);
            else
                rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS);

            if (RT_FAILURE(rc))
                ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
        }
#endif

        if (!(uOld - pTask->DataSeg.cbSeg)
            && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true))
        {
#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
            PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;
            PPDMASYNCCOMPLETIONEPCLASSFILE  pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEpFile->Core.pEpClass;

            /* Check if we should delay completion of the request. */
            if (   ASMAtomicReadU32(&pEpFile->msDelay) > 0
                && ASMAtomicReadU32(&pEpFile->cReqsDelay) > 0)
            {
                uint64_t tsDelay = pEpFile->msDelay;

                if (pEpFile->msJitter)
                    tsDelay = (RTRandU32() % 100) > 50 ? pEpFile->msDelay + (RTRandU32() % pEpFile->msJitter)
                                                       : pEpFile->msDelay - (RTRandU32() % pEpFile->msJitter);
                ASMAtomicDecU32(&pEpFile->cReqsDelay);

                /* Arm the delay. */
                pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + tsDelay;

                /* Append to the list. */
                PPDMASYNCCOMPLETIONTASKFILE pHead = NULL;
                do
                {
                    pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE);
                    pTaskFile->pDelayedNext = pHead;
                } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTaskFile, pHead));

                if (tsDelay < pEpClassFile->cMilliesNext)
                {
                    ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, tsDelay);
                    TMTimerSetMillies(pEpClassFile->pTimer, tsDelay);
                }

                LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, tsDelay));
            }
            else
#endif
                pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);
        }
    }
}
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc)
{
    PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser;

    LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc));

    if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
    {
        pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true);
    }
    else
    {
        Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0);
        uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg);

        /* The first error will be returned. */
        if (RT_FAILURE(rc))
            ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
#ifdef VBOX_WITH_DEBUGGER
        else
        {
            PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;

            /* Overwrite with injected error code. */
            if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
                rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS);
            else
                rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS);

            if (RT_FAILURE(rc))
                ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS);
        }
#endif

        if (!(uOld - pTask->DataSeg.cbSeg)
            && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true))
        {
#ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
            PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint;

            /* Check if we should delay completion of the request. */
            if (   ASMAtomicReadU32(&pEpFile->msDelay) > 0
                && ASMAtomicCmpXchgPtr(&pEpFile->pReqDelayed, pTaskFile, NULL))
            {
                /* Arm the delay. */
                pEpFile->tsDelayEnd = RTTimeProgramMilliTS() + pEpFile->msDelay;
                LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, pEpFile->msDelay));
                return;
            }
#endif
            pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);

#if PDM_ASYNC_COMPLETION_FILE_WITH_DELAY
            /* Check for an expired delay. */
            if (   pEpFile->pReqDelayed != NULL
                && RTTimeProgramMilliTS() >= pEpFile->tsDelayEnd)
            {
                pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pReqDelayed, NULL, PPDMASYNCCOMPLETIONTASKFILE);
                ASMAtomicXchgU32(&pEpFile->msDelay, 0);
                LogRel(("AIOMgr: Delayed request %#p completed\n", pTaskFile));
                pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true);
            }
#endif
        }
    }
}