static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
{
    /*
     * Validate input.
     */
    AssertPtr(pThis);
    AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
#ifdef IN_RING0
    Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
#else
    Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
#endif

#ifdef RTCRITSECTRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (!fTryOnly)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTCSRW_CNT_MASK / 2);
            u64State &= ~RTCSRW_CNT_RD_MASK;
            u64State |= c << RTCSRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTCRITSECTRW_STRICT
                RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
# ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
# elif defined(IN_RING3)
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif

        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    }
    /* won't get here */
}
Exemple #3
0
static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    RTSEMRWINTERNAL *pThis = hRWSem;
    if (pThis == NIL_RTSEMRW)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTSEMRW_CNT_MASK / 2);
            u64State &= ~RTSEMRW_CNT_RD_MASK;
            u64State |= c << RTSEMRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTSEMRW_STRICT
                RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
Exemple #4
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
            &&  pThis->cNestings > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNestings);
        return VINF_SUCCESS;
    }

#ifdef RTSEMMUTEX_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies)
    {
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif

    /*
     * Convert timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    /*
     * Lock the mutex.
     * Optimize for the uncontended case (makes 1-2 ns difference).
     */
    if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)))
    {
        for (;;)
        {
            int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2);

            /*
             * Was the lock released in the meantime? This is unlikely (but possible)
             */
            if (RT_UNLIKELY(iOld == 0))
                break;

            /*
             * Go to sleep.
             */
            if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec ))
            {
#ifdef RTSEMMUTEX_STRICT
                int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                          cMillies, RTTHREADSTATE_MUTEX, true);
                if (RT_FAILURE(rc9))
                    return rc9;
#else
                RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
            }

            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0);

            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC))
                return VERR_SEM_DESTROYED;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == 0)
                /* we'll leave the loop now unless another thread is faster */;
            else if (rc == -EWOULDBLOCK)
                /* retry with new value. */;
            else if (rc == -EINTR)
            {
                if (!fAutoResume)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }

            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64End - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                {
                    rc = VERR_TIMEOUT;
                    break;
                }
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
        }

        /*
         * When leaving this loop, iState is set to 2. This means that we gained the
         * lock and there are _possibly_ some waiters. We don't know exactly as another
         * thread might entered this loop at nearly the same time. Therefore we will
         * call futex_wakeup once too often (if _no_ other thread entered this loop).
         * The key problem is the simple futex_wait test for x != y (iState != 2) in
         * our case).
         */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNestings, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
Exemple #5
0
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertPtr(pCritSect);
    AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * How is calling and is the order right?
     */
    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
#ifdef RTCRITSECT_STRICT
    RTTHREAD        hThreadSelf = pCritSect->pValidatorRec
                                ? RTThreadSelfAutoAdopt()
                                : RTThreadSelf();
    int             rc9;
    if (pCritSect->pValidatorRec) /* (bootstap) */
    {
         rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
         if (RT_FAILURE(rc9))
             return rc9;
    }
#endif

    /*
     * Increment the waiter counter.
     * This becomes 0 when the section is free.
     */
    if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    {
        /*
         * Nested?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                {
                    ASMAtomicDecS32(&pCritSect->cLockers);
                    return rc9;
                }
#endif
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }

            AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
            ASMAtomicDecS32(&pCritSect->cLockers);
            return VERR_SEM_NESTED;
        }

        /*
         * Wait for the current owner to release it.
         */
#ifndef RTCRITSECT_STRICT
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        for (;;)
        {
#ifdef RTCRITSECT_STRICT
            rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
            if (RT_FAILURE(rc9))
            {
                ASMAtomicDecS32(&pCritSect->cLockers);
                return rc9;
            }
#else
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
#endif
            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);

            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
                return VERR_SEM_DESTROYED;
            if (rc == VINF_SUCCESS)
                break;
            AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
        }
        AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
Exemple #6
0
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_DESTROYED if destroyed.
 *
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
        /*
         * Do the wait.
         *
         * In ring-3 this gets cluttered by lock validation and thread state
         * maintainence.
         *
         * In ring-0 we have to deal with the possibility that the thread has
         * been signalled and the interruptible wait function returning
         * immediately.  In that case we do normal R0/RC rcBusy handling.
         */
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
#  else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
#  endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# else  /* IN_RING0 */
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# endif /* IN_RING0 */

        /*
         * Deal with the return code and critsect destruction.
         */
        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));

# ifdef IN_RING0
        /* Something is pending (signal, APC, debugger, whatever), just go back
           to ring-3 so the kernel can deal with it when leaving kernel context.

           Note! We've incremented cLockers already and cannot safely decrement
                 it without creating a race with PDMCritSectLeave, resulting in
                 spurious wakeups. */
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
        AssertRC(rc);
# endif
    }
    /* won't get here */
}
Exemple #7
0
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis   = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart         = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies)
    {
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows write access.
     */
    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (    !pThis->cReads
        &&  (   (   !pThis->cWrites
                 && (   !pThis->cWritesWaiting /* play fair if we can wait */
                     || !cMillies)
                )
             || pThis->hWriter == hNativeSelf
            )
       )
    {
        /*
         * Reset the reader event semaphore if necessary.
         */
        if (pThis->fNeedResetReadEvent)
        {
            pThis->fNeedResetReadEvent = false;
            rc = RTSemEventMultiReset(pThis->ReadEvent);
            AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
        }

        pThis->cWrites++;
        pThis->hWriter = hNativeSelf;
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, pThis->cWrites == 1);
#endif
        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    /*
     * Signal writer presence.
     */
    if (cMillies != 0)
        pThis->cWritesWaiting++;

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for writing.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }

#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
                                                 cMillies, RTTHREADSTATE_RW_WRITE, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventWaitNoResume(pThis->WriteEvent, cMillies);
        else
            rcWait = rc = RTSemEventWait(pThis->WriteEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (RT_UNLIKELY(RT_FAILURE_NP(rc) && rc != VERR_TIMEOUT)) /* timeouts are handled below */
        {
            AssertMsgRC(rc, ("RTSemEventWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (RT_UNLIKELY(pThis->u32Magic != RTSEMRW_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did prior to this loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (!pThis->cReads && (!pThis->cWrites || pThis->hWriter == hNativeSelf))
        {
            /*
             * Reset the reader event semaphore if necessary.
             */
            if (pThis->fNeedResetReadEvent)
            {
                pThis->fNeedResetReadEvent = false;
                rc = RTSemEventMultiReset(pThis->ReadEvent);
                AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
            }

            pThis->cWrites++;
            pThis->hWriter = hNativeSelf;
            pThis->cWritesWaiting--;
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /*
     * Timeout/error case, clean up.
     */
    if (pThis->u32Magic == RTSEMRW_MAGIC)
    {
        RTCritSectEnter(&pThis->CritSect);
        /* Adjust this counter, whether we got the critsect or not. */
        pThis->cWritesWaiting--;
        RTCritSectLeave(&pThis->CritSect);
    }
    return rc;
}
Exemple #8
0
DECL_FORCE_INLINE(int) rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int rc9;
        if (pThis->hWriter != NIL_RTTHREAD && pThis->hWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows read access.
     * Do not block further readers if there is a writer waiting, as
     * that will break/deadlock reader recursion.
     */
    if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
        && (   !pThis->cWritesWaiting
            ||  pThis->cReads)
#endif
       )
    {
        pThis->cReads++;
        Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (pThis->hWriter == hNativeSelf)
    {
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
        if (RT_FAILURE(rc9))
        {
            RTCritSectLeave(&pThis->CritSect);
            return rc9;
        }
#endif

        pThis->cWriterReads++;
        Assert(pThis->cWriterReads > 0);

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for reading.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }
#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
                                                   cMillies, RTTHREADSTATE_RW_READ, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventMultiWaitNoResume(pThis->ReadEvent, cMillies);
        else
            rcWait = rc = RTSemEventMultiWait(pThis->ReadEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
        if (RT_FAILURE(rc) && rc != VERR_TIMEOUT) /* handle timeout below */
        {
            AssertMsgRC(rc, ("RTSemEventMultiWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (pThis->u32Magic != RTSEMRW_MAGIC)
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did before the loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
            && (   !pThis->cWritesWaiting
                ||  pThis->cReads)
#endif
           )
        {
            pThis->cReads++;
            Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /* failed */
    return rc;
}