예제 #1
0
static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
{
    /*
     * Validate input.
     */
    AssertPtr(pThis);
    AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
#ifdef IN_RING0
    Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
#else
    Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
#endif

#ifdef RTCRITSECTRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (!fTryOnly)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTCSRW_CNT_MASK / 2);
            u64State &= ~RTCSRW_CNT_RD_MASK;
            u64State |= c << RTCSRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTCRITSECTRW_STRICT
                RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
/**
 * Implements the indefinite wait.
 *
 * @returns See RTSemEventMultiWaitEx.
 * @param   pThis               The semaphore.
 * @param   fFlags              See RTSemEventMultiWaitEx.
 * @param   pSrcPos             The source position, can be NULL.
 */
static int rtSemEventMultiPosixWaitIndefinite(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, PCRTLOCKVALSRCPOS pSrcPos)
{
    /* take mutex */
    int rc = pthread_mutex_lock(&pThis->Mutex);
    AssertMsgReturn(!rc, ("Failed to lock event multi sem %p, rc=%d.\n", pThis, rc), RTErrConvertFromErrno(rc));
    ASMAtomicIncU32(&pThis->cWaiters);

    for (;;)
    {
        /* check state. */
        uint32_t const u32State = pThis->u32State;
        if (u32State != EVENTMULTI_STATE_NOT_SIGNALED)
        {
            ASMAtomicDecU32(&pThis->cWaiters);
            rc = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc));
            return u32State == EVENTMULTI_STATE_SIGNALED
                   ? VINF_SUCCESS
                   : VERR_SEM_DESTROYED;
        }

        /* wait */
#ifdef RTSEMEVENTMULTI_STRICT
        RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                    RT_INDEFINITE_WAIT, RTTHREADSTATE_EVENT_MULTI, true);
            if (RT_FAILURE(rc))
            {
                ASMAtomicDecU32(&pThis->cWaiters);
                pthread_mutex_unlock(&pThis->Mutex);
                return rc;
            }
        }
#else
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true);
        /** @todo interruptible wait is not implementable... */ NOREF(fFlags);
        rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI);
        if (RT_UNLIKELY(rc))
        {
            AssertMsgFailed(("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc));
            ASMAtomicDecU32(&pThis->cWaiters);
            int rc2 = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2));
            NOREF(rc2);
            return RTErrConvertFromErrno(rc);
        }
    }
}
예제 #3
0
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
# ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
# elif defined(IN_RING3)
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif

        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    }
    /* won't get here */
}
예제 #4
0
static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    RTSEMRWINTERNAL *pThis = hRWSem;
    if (pThis == NIL_RTSEMRW)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTSEMRW_CNT_MASK / 2);
            u64State &= ~RTSEMRW_CNT_RD_MASK;
            u64State |= c << RTSEMRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTSEMRW_STRICT
                RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
예제 #5
0
RTDECL(int)   RTSemEventWaitNoResume(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies)
{
    PCRTLOCKVALSRCPOS pSrcPos = NULL;

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Wait for condition.
     */
#ifdef RTSEMEVENT_STRICT
    RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                         ? RTThreadSelfAutoAdopt()
                         : RTThreadSelf();
    if (pThis->fEverHadSignallers)
    {
        DWORD rc = WaitForSingleObjectEx(pThis->hev,
                                         0 /*Timeout*/,
                                         TRUE /*fAlertable*/);
        if (rc != WAIT_TIMEOUT || cMillies == 0)
            return rtSemEventWaitHandleStatus(pThis, rc);
        int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                        cMillies, RTTHREADSTATE_EVENT, true);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
    DWORD rc = WaitForSingleObjectEx(pThis->hev,
                                     cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies,
                                     TRUE /*fAlertable*/);
    RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
    return rtSemEventWaitHandleStatus(pThis, rc);
}
예제 #6
0
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
                    ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
                    VERR_INVALID_HANDLE);

    /*
     * Recursion?
     */
    pthread_t Self = pthread_self();
    pthread_t Writer;
    ATOMIC_GET_PTHREAD_T(&pThis->Writer, &Writer);
    if (Writer == Self)
    {
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        Assert(pThis->cWrites < INT32_MAX);
        pThis->cWrites++;
        return VINF_SUCCESS;
    }

    /*
     * Try lock it.
     */
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies)
    {
#ifdef RTSEMRW_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_RW_WRITE, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, true);
#endif
    }

    if (cMillies == RT_INDEFINITE_WAIT)
    {
        /* take rwlock */
        int rc = pthread_rwlock_wrlock(&pThis->RWLock);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (rc)
        {
            AssertMsgFailed(("Failed write lock read-write sem %p, rc=%d.\n", hRWSem, rc));
            return RTErrConvertFromErrno(rc);
        }
    }
    else
    {
#ifdef RT_OS_DARWIN
        AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API."));
        return VERR_NOT_IMPLEMENTED;
#else /* !RT_OS_DARWIN */
        /*
         * Get current time and calc end of wait time.
         */
        struct timespec     ts = {0,0};
        clock_gettime(CLOCK_REALTIME, &ts);
        if (cMillies != 0)
        {
            ts.tv_nsec += (cMillies % 1000) * 1000000;
            ts.tv_sec  += cMillies / 1000;
            if (ts.tv_nsec >= 1000000000)
            {
                ts.tv_nsec -= 1000000000;
                ts.tv_sec++;
            }
        }

        /* take rwlock */
        int rc = pthread_rwlock_timedwrlock(&pThis->RWLock, &ts);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (rc)
        {
            AssertMsg(rc == ETIMEDOUT, ("Failed read lock read-write sem %p, rc=%d.\n", hRWSem, rc));
            return RTErrConvertFromErrno(rc);
        }
#endif /* !RT_OS_DARWIN */
    }

    ATOMIC_SET_PTHREAD_T(&pThis->Writer, Self);
    pThis->cWrites = 1;
    Assert(!pThis->cReaders);
#ifdef RTSEMRW_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
예제 #7
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
        &&  pThis->cNesting > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNesting);
        return VINF_SUCCESS;
    }

    /*
     * Lock it.
     */
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies != 0)
    {
#ifdef RTSEMMUTEX_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_MUTEX, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
    }

    if (cMillies == RT_INDEFINITE_WAIT)
    {
        /* take mutex */
        int rc = pthread_mutex_lock(&pThis->Mutex);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
        if (rc)
        {
            AssertMsgFailed(("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc);
            return RTErrConvertFromErrno(rc);
        }
    }
    else
    {
#ifdef RT_OS_DARWIN
        AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API."));
        return VERR_NOT_IMPLEMENTED;
#else /* !RT_OS_DARWIN */
        /*
         * Get current time and calc end of wait time.
         */
        struct timespec     ts = {0,0};
        clock_gettime(CLOCK_REALTIME, &ts);
        if (cMillies != 0)
        {
            ts.tv_nsec += (cMillies % 1000) * 1000000;
            ts.tv_sec  += cMillies / 1000;
            if (ts.tv_nsec >= 1000000000)
            {
                ts.tv_nsec -= 1000000000;
                ts.tv_sec++;
            }
        }

        /* take mutex */
        int rc = pthread_mutex_timedlock(&pThis->Mutex, &ts);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
        if (rc)
        {
            AssertMsg(rc == ETIMEDOUT, ("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc);
            return RTErrConvertFromErrno(rc);
        }
#endif /* !RT_OS_DARWIN */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNesting, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
DECLINLINE(int) rtSemEventLnxMultiWait(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout,
                                       PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE);
    AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);

    /*
     * Quickly check whether it's signaled.
     */
    int32_t iCur = ASMAtomicUoReadS32(&pThis->iState);
    Assert(iCur == 0 || iCur == -1 || iCur == 1);
    if (iCur == -1)
        return VINF_SUCCESS;

    /*
     * Check and convert the timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64Deadline = 0; /* shut up gcc */
    if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE))
    {
        /* If the timeout is zero, then we're done. */
        if (!uTimeout)
            return VERR_TIMEOUT;

        /* Convert it to a deadline + interval timespec. */
        if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
            uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
                     ? uTimeout * UINT32_C(1000000)
                     : UINT64_MAX;
        if (uTimeout != UINT64_MAX) /* unofficial way of indicating an indefinite wait */
        {
            if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
                u64Deadline = RTTimeSystemNanoTS() + uTimeout;
            else
            {
                uint64_t u64Now = RTTimeSystemNanoTS();
                if (uTimeout <= u64Now)
                    return VERR_TIMEOUT;
                u64Deadline = uTimeout;
                uTimeout   -= u64Now;
            }
            if (   sizeof(ts.tv_sec) >= sizeof(uint64_t)
                || uTimeout <= UINT64_C(1000000000) * UINT32_MAX)
            {
                ts.tv_nsec = uTimeout % UINT32_C(1000000000);
                ts.tv_sec  = uTimeout / UINT32_C(1000000000);
                pTimeout = &ts;
            }
        }
    }

    /*
     * The wait loop.
     */
#ifdef RTSEMEVENTMULTI_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (unsigned i = 0;; i++)
    {
        /*
         * Start waiting. We only account for there being or having been
         * threads waiting on the semaphore to keep things simple.
         */
        iCur = ASMAtomicUoReadS32(&pThis->iState);
        Assert(iCur == 0 || iCur == -1 || iCur == 1);
        if (    iCur == 1
            ||  ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))
        {
            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64Deadline - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                    return VERR_TIMEOUT;
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
#ifdef RTSEMEVENTMULTI_STRICT
            if (pThis->fEverHadSignallers)
            {
                int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                                uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true);
                if (RT_FAILURE(rc9))
                    return rc9;
            }
#endif
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true);
            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 1, pTimeout, NULL, 0);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC))
                return VERR_SEM_DESTROYED;
            if (rc == 0)
                return VINF_SUCCESS;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
/** @todo something is broken here. shows up every now and again in the ata
 *        code. Should try to run the timeout against RTTimeMilliTS to
 *        check that it's doing the right thing... */
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == -EWOULDBLOCK)
                /* retry, the value changed. */;
            else if (rc == -EINTR)
            {
                if (fFlags & RTSEMWAIT_FLAGS_NORESUME)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }
        }
        else if (iCur == -1)
            return VINF_SUCCESS;
    }
}
static int rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume)
{
    PCRTLOCKVALSRCPOS pSrcPos = NULL;

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Quickly check whether it's signaled.
     */
    /** @todo this isn't fair if someone is already waiting on it.  They should
     *        have the first go at it!
     *  (ASMAtomicReadS32(&pThis->cWaiters) == 0 || !cMillies) && ... */
    if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
        return VINF_SUCCESS;

    /*
     * Convert the timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        if (!cMillies)
            return VERR_TIMEOUT;
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    ASMAtomicIncS32(&pThis->cWaiters);

    /*
     * The wait loop.
     */
#ifdef RTSEMEVENT_STRICT
    RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                         ? RTThreadSelfAutoAdopt()
                         : RTThreadSelf();
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    int rc = VINF_SUCCESS;
    for (;;)
    {
#ifdef RTSEMEVENT_STRICT
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                       cMillies, RTTHREADSTATE_EVENT, true);
            if (RT_FAILURE(rc))
                break;
        }
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
        long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, pTimeout, NULL, 0);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
        if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK))
        {
            /* successful wakeup or fSignalled > 0 in the meantime */
            if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
                break;
        }
        else if (lrc == -ETIMEDOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
        else if (lrc == -EINTR)
        {
            if (!fAutoResume)
            {
                rc = VERR_INTERRUPTED;
                break;
            }
        }
        else
        {
            /* this shouldn't happen! */
            AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno));
            rc = RTErrConvertFromErrno(lrc);
            break;
        }
        /* adjust the relative timeout */
        if (pTimeout)
        {
            int64_t i64Diff = u64End - RTTimeSystemNanoTS();
            if (i64Diff < 1000)
            {
                rc = VERR_TIMEOUT;
                break;
            }
            ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
            ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
        }
    }

    ASMAtomicDecS32(&pThis->cWaiters);
    return rc;
}
예제 #10
0
DECL_FORCE_INLINE(int) rtCritSectEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
{
    AssertPtr(pCritSect);
    AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);

    /*
     * Return straight away if NOP.
     */
    if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    /*
     * How is calling and is the order right?
     */
    RTNATIVETHREAD  NativeThreadSelf = RTThreadNativeSelf();
#ifdef RTCRITSECT_STRICT
    RTTHREAD        hThreadSelf = pCritSect->pValidatorRec
                                ? RTThreadSelfAutoAdopt()
                                : RTThreadSelf();
    int             rc9;
    if (pCritSect->pValidatorRec) /* (bootstap) */
    {
         rc9 = RTLockValidatorRecExclCheckOrder(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
         if (RT_FAILURE(rc9))
             return rc9;
    }
#endif

    /*
     * Increment the waiter counter.
     * This becomes 0 when the section is free.
     */
    if (ASMAtomicIncS32(&pCritSect->cLockers) > 0)
    {
        /*
         * Nested?
         */
        if (pCritSect->NativeThreadOwner == NativeThreadSelf)
        {
            if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING))
            {
#ifdef RTCRITSECT_STRICT
                rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos);
                if (RT_FAILURE(rc9))
                {
                    ASMAtomicDecS32(&pCritSect->cLockers);
                    return rc9;
                }
#endif
                pCritSect->cNestings++;
                return VINF_SUCCESS;
            }

            AssertBreakpoint(); /* don't do normal assertion here, the logger uses this code too. */
            ASMAtomicDecS32(&pCritSect->cLockers);
            return VERR_SEM_NESTED;
        }

        /*
         * Wait for the current owner to release it.
         */
#ifndef RTCRITSECT_STRICT
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        for (;;)
        {
#ifdef RTCRITSECT_STRICT
            rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, false);
            if (RT_FAILURE(rc9))
            {
                ASMAtomicDecS32(&pCritSect->cLockers);
                return rc9;
            }
#else
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, false);
#endif
            int rc = RTSemEventWait(pCritSect->EventSem, RT_INDEFINITE_WAIT);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);

            if (pCritSect->u32Magic != RTCRITSECT_MAGIC)
                return VERR_SEM_DESTROYED;
            if (rc == VINF_SUCCESS)
                break;
            AssertMsg(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
        }
        AssertMsg(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD, ("pCritSect->NativeThreadOwner=%p\n", pCritSect->NativeThreadOwner));
    }

    /*
     * First time
     */
    pCritSect->cNestings = 1;
    ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf);
#ifdef RTCRITSECT_STRICT
    RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}
예제 #11
0
/**
 * Implements the timed wait.
 *
 * @returns See RTSemEventMultiWaitEx
 * @param   pThis               The semaphore.
 * @param   fFlags              See RTSemEventMultiWaitEx.
 * @param   uTimeout            See RTSemEventMultiWaitEx.
 * @param   pSrcPos             The source position, can be NULL.
 */
static int rtSemEventMultiPosixWaitTimed(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout,
                                         PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Convert uTimeout to a relative value in nano seconds.
     */
    if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
        uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000)
                 ? uTimeout * UINT32_C(1000000)
                 : UINT64_MAX;
    if (uTimeout == UINT64_MAX) /* unofficial way of indicating an indefinite wait */
        return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos);

    uint64_t uAbsTimeout = uTimeout;
    if (fFlags & RTSEMWAIT_FLAGS_ABSOLUTE)
    {
        uint64_t u64Now = RTTimeSystemNanoTS();
        uTimeout = uTimeout > u64Now ? uTimeout - u64Now : 0;
    }

    if (uTimeout == 0)
        return rtSemEventMultiPosixWaitPoll(pThis);

    /*
     * Get current time and calc end of deadline relative to real time.
     */
    struct timespec     ts = {0,0};
    if (!pThis->fMonotonicClock)
    {
#if defined(RT_OS_DARWIN) || defined(RT_OS_HAIKU)
        struct timeval  tv = {0,0};
        gettimeofday(&tv, NULL);
        ts.tv_sec = tv.tv_sec;
        ts.tv_nsec = tv.tv_usec * 1000;
#else
        clock_gettime(CLOCK_REALTIME, &ts);
#endif
        struct timespec tsAdd;
        tsAdd.tv_nsec = uTimeout % UINT32_C(1000000000);
        tsAdd.tv_sec  = uTimeout / UINT32_C(1000000000);
        if (   sizeof(ts.tv_sec) < sizeof(uint64_t)
            && (   uTimeout > UINT64_C(1000000000) * UINT32_MAX
                || (uint64_t)ts.tv_sec + tsAdd.tv_sec >= UINT32_MAX) )
            return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos);

        ts.tv_sec  += tsAdd.tv_sec;
        ts.tv_nsec += tsAdd.tv_nsec;
        if (ts.tv_nsec >= 1000000000)
        {
            ts.tv_nsec -= 1000000000;
            ts.tv_sec++;
        }
        /* Note! No need to complete uAbsTimeout for RTSEMWAIT_FLAGS_RELATIVE in this path. */
    }
    else
    {
        /* ASSUMES RTTimeSystemNanoTS() == RTTimeNanoTS() == clock_gettime(CLOCK_MONOTONIC). */
        if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
            uAbsTimeout += RTTimeSystemNanoTS();
        if (   sizeof(ts.tv_sec) < sizeof(uint64_t)
            && uAbsTimeout > UINT64_C(1000000000) * UINT32_MAX)
            return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos);
        ts.tv_nsec = uAbsTimeout % UINT32_C(1000000000);
        ts.tv_sec  = uAbsTimeout / UINT32_C(1000000000);
    }

    /*
     * To business!
     */
    /* take mutex */
    int rc = pthread_mutex_lock(&pThis->Mutex);
    AssertMsgReturn(rc == 0, ("rc=%d pThis=%p\n", rc, pThis), RTErrConvertFromErrno(rc)); NOREF(rc);
    ASMAtomicIncU32(&pThis->cWaiters);

    for (;;)
    {
        /* check state. */
        uint32_t const u32State = pThis->u32State;
        if (u32State != EVENTMULTI_STATE_NOT_SIGNALED)
        {
            ASMAtomicDecU32(&pThis->cWaiters);
            rc = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc));
            return u32State == EVENTMULTI_STATE_SIGNALED
                 ? VINF_SUCCESS
                 : VERR_SEM_DESTROYED;
        }

        /* wait */
#ifdef RTSEMEVENTMULTI_STRICT
        RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                       uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true);
            if (RT_FAILURE(rc))
            {
                ASMAtomicDecU32(&pThis->cWaiters);
                pthread_mutex_unlock(&pThis->Mutex);
                return rc;
            }
        }
#else
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true);
        rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI);
        if (    rc
            && (   rc != EINTR  /* according to SuS this function shall not return EINTR, but linux man page says differently. */
                || (fFlags & RTSEMWAIT_FLAGS_NORESUME)) )
        {
            AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc));
            ASMAtomicDecU32(&pThis->cWaiters);
            int rc2 = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2)); NOREF(rc2);
            return RTErrConvertFromErrno(rc);
        }

        /* check the absolute deadline. */
    }
}
예제 #12
0
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @retval  VINF_SUCCESS on success.
 * @retval  VERR_SEM_DESTROYED if destroyed.
 *
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
        /*
         * Do the wait.
         *
         * In ring-3 this gets cluttered by lock validation and thread state
         * maintainence.
         *
         * In ring-0 we have to deal with the possibility that the thread has
         * been signalled and the interruptible wait function returning
         * immediately.  In that case we do normal R0/RC rcBusy handling.
         */
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
#  else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
#  endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# else  /* IN_RING0 */
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# endif /* IN_RING0 */

        /*
         * Deal with the return code and critsect destruction.
         */
        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));

# ifdef IN_RING0
        /* Something is pending (signal, APC, debugger, whatever), just go back
           to ring-3 so the kernel can deal with it when leaving kernel context.

           Note! We've incremented cLockers already and cannot safely decrement
                 it without creating a race with PDMCritSectLeave, resulting in
                 spurious wakeups. */
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
        AssertRC(rc);
# endif
    }
    /* won't get here */
}
예제 #13
0
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis   = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart         = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies)
    {
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows write access.
     */
    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (    !pThis->cReads
        &&  (   (   !pThis->cWrites
                 && (   !pThis->cWritesWaiting /* play fair if we can wait */
                     || !cMillies)
                )
             || pThis->hWriter == hNativeSelf
            )
       )
    {
        /*
         * Reset the reader event semaphore if necessary.
         */
        if (pThis->fNeedResetReadEvent)
        {
            pThis->fNeedResetReadEvent = false;
            rc = RTSemEventMultiReset(pThis->ReadEvent);
            AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
        }

        pThis->cWrites++;
        pThis->hWriter = hNativeSelf;
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, pThis->cWrites == 1);
#endif
        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    /*
     * Signal writer presence.
     */
    if (cMillies != 0)
        pThis->cWritesWaiting++;

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for writing.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }

#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
                                                 cMillies, RTTHREADSTATE_RW_WRITE, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventWaitNoResume(pThis->WriteEvent, cMillies);
        else
            rcWait = rc = RTSemEventWait(pThis->WriteEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (RT_UNLIKELY(RT_FAILURE_NP(rc) && rc != VERR_TIMEOUT)) /* timeouts are handled below */
        {
            AssertMsgRC(rc, ("RTSemEventWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (RT_UNLIKELY(pThis->u32Magic != RTSEMRW_MAGIC))
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did prior to this loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (!pThis->cReads && (!pThis->cWrites || pThis->hWriter == hNativeSelf))
        {
            /*
             * Reset the reader event semaphore if necessary.
             */
            if (pThis->fNeedResetReadEvent)
            {
                pThis->fNeedResetReadEvent = false;
                rc = RTSemEventMultiReset(pThis->ReadEvent);
                AssertMsgRC(rc, ("Failed to reset readers, rwsem %p, rc=%Rrc.\n", hRWSem, rc));
            }

            pThis->cWrites++;
            pThis->hWriter = hNativeSelf;
            pThis->cWritesWaiting--;
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /*
     * Timeout/error case, clean up.
     */
    if (pThis->u32Magic == RTSEMRW_MAGIC)
    {
        RTCritSectEnter(&pThis->CritSect);
        /* Adjust this counter, whether we got the critsect or not. */
        pThis->cWritesWaiting--;
        RTCritSectLeave(&pThis->CritSect);
    }
    return rc;
}
예제 #14
0
DECL_FORCE_INLINE(int) rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate handle.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

    RTMSINTERVAL    cMilliesInitial = cMillies;
    uint64_t        tsStart = 0;
    if (cMillies != RT_INDEFINITE_WAIT && cMillies != 0)
        tsStart = RTTimeNanoTS();

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int rc9;
        if (pThis->hWriter != NIL_RTTHREAD && pThis->hWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Take critsect.
     */
    int rc = RTCritSectEnter(&pThis->CritSect);
    if (RT_FAILURE(rc))
    {
        AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
        return rc;
    }

    /*
     * Check if the state of affairs allows read access.
     * Do not block further readers if there is a writer waiting, as
     * that will break/deadlock reader recursion.
     */
    if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
        && (   !pThis->cWritesWaiting
            ||  pThis->cReads)
#endif
       )
    {
        pThis->cReads++;
        Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
        RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTNATIVETHREAD hNativeSelf = pThis->CritSect.NativeThreadOwner;
    if (pThis->hWriter == hNativeSelf)
    {
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclRecursionMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core, pSrcPos);
        if (RT_FAILURE(rc9))
        {
            RTCritSectLeave(&pThis->CritSect);
            return rc9;
        }
#endif

        pThis->cWriterReads++;
        Assert(pThis->cWriterReads > 0);

        RTCritSectLeave(&pThis->CritSect);
        return VINF_SUCCESS;
    }

    RTCritSectLeave(&pThis->CritSect);

    /*
     * Wait till it's ready for reading.
     */
    if (cMillies == 0)
        return VERR_TIMEOUT;

#ifndef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    for (;;)
    {
        if (cMillies != RT_INDEFINITE_WAIT)
        {
            int64_t tsDelta = RTTimeNanoTS() - tsStart;
            if (tsDelta >= 1000000)
            {
                tsDelta /= 1000000;
                if ((uint64_t)tsDelta < cMilliesInitial)
                    cMilliesInitial = (RTMSINTERVAL)tsDelta;
                else
                    cMilliesInitial = 1;
            }
        }
#ifdef RTSEMRW_STRICT
        rc = RTLockValidatorRecSharedCheckBlocking(&pThis->ValidatorRead, hThreadSelf, pSrcPos, true,
                                                   cMillies, RTTHREADSTATE_RW_READ, false);
        if (RT_FAILURE(rc))
            break;
#else
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
#endif
        int rcWait;
        if (fInterruptible)
            rcWait = rc = RTSemEventMultiWaitNoResume(pThis->ReadEvent, cMillies);
        else
            rcWait = rc = RTSemEventMultiWait(pThis->ReadEvent, cMillies);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
        if (RT_FAILURE(rc) && rc != VERR_TIMEOUT) /* handle timeout below */
        {
            AssertMsgRC(rc, ("RTSemEventMultiWait failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (pThis->u32Magic != RTSEMRW_MAGIC)
        {
            rc = VERR_SEM_DESTROYED;
            break;
        }

        /*
         * Re-take critsect and repeat the check we did before the loop.
         */
        rc = RTCritSectEnter(&pThis->CritSect);
        if (RT_FAILURE(rc))
        {
            AssertMsgFailed(("RTCritSectEnter failed on rwsem %p, rc=%Rrc\n", hRWSem, rc));
            break;
        }

        if (    pThis->hWriter == NIL_RTNATIVETHREAD
#if 0
            && (   !pThis->cWritesWaiting
                ||  pThis->cReads)
#endif
           )
        {
            pThis->cReads++;
            Assert(pThis->cReads > 0);
#ifdef RTSEMRW_STRICT
            RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif

            RTCritSectLeave(&pThis->CritSect);
            return VINF_SUCCESS;
        }

        RTCritSectLeave(&pThis->CritSect);

        /*
         * Quit if the wait already timed out.
         */
        if (rcWait == VERR_TIMEOUT)
        {
            rc = VERR_TIMEOUT;
            break;
        }
    }

    /* failed */
    return rc;
}
예제 #15
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
            &&  pThis->cNestings > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNestings);
        return VINF_SUCCESS;
    }

#ifdef RTSEMMUTEX_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies)
    {
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif

    /*
     * Convert timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    /*
     * Lock the mutex.
     * Optimize for the uncontended case (makes 1-2 ns difference).
     */
    if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)))
    {
        for (;;)
        {
            int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2);

            /*
             * Was the lock released in the meantime? This is unlikely (but possible)
             */
            if (RT_UNLIKELY(iOld == 0))
                break;

            /*
             * Go to sleep.
             */
            if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec ))
            {
#ifdef RTSEMMUTEX_STRICT
                int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                          cMillies, RTTHREADSTATE_MUTEX, true);
                if (RT_FAILURE(rc9))
                    return rc9;
#else
                RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
            }

            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0);

            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC))
                return VERR_SEM_DESTROYED;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == 0)
                /* we'll leave the loop now unless another thread is faster */;
            else if (rc == -EWOULDBLOCK)
                /* retry with new value. */;
            else if (rc == -EINTR)
            {
                if (!fAutoResume)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }

            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64End - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                {
                    rc = VERR_TIMEOUT;
                    break;
                }
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
        }

        /*
         * When leaving this loop, iState is set to 2. This means that we gained the
         * lock and there are _possibly_ some waiters. We don't know exactly as another
         * thread might entered this loop at nearly the same time. Therefore we will
         * call futex_wakeup once too often (if _no_ other thread entered this loop).
         * The key problem is the simple futex_wait test for x != y (iState != 2) in
         * our case).
         */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNestings, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
/**
 * Internal worker for RTSemMutexRequestNoResume and it's debug companion.
 *
 * @returns Same as RTSEmMutexRequestNoResume
 * @param   hMutexSem            The mutex handle.
 * @param   cMillies            The number of milliseconds to wait.
 * @param   pSrcPos             The source position of the caller.
 */
DECL_FORCE_INLINE(int) rtSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check for recursive entry.
     */
    RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
    RTNATIVETHREAD hNativeOwner;
    ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner);
    if (hNativeOwner == hNativeSelf)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cRecursions);
        return VINF_SUCCESS;
    }

    /*
     * Lock mutex semaphore.
     */
    RTTHREAD        hThreadSelf = NIL_RTTHREAD;
    if (cMillies > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_MUTEX, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
    }
    DWORD rc = WaitForSingleObjectEx(pThis->hMtx,
                                     cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies,
                                     TRUE /*fAlertable*/);
    RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
    switch (rc)
    {
        case WAIT_OBJECT_0:
#ifdef RTSEMMUTEX_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
            ASMAtomicWriteHandle(&pThis->hNativeOwner, hNativeSelf);
            ASMAtomicWriteU32(&pThis->cRecursions, 1);
            return VINF_SUCCESS;

        case WAIT_TIMEOUT:          return VERR_TIMEOUT;
        case WAIT_IO_COMPLETION:    return VERR_INTERRUPTED;
        case WAIT_ABANDONED:        return VERR_SEM_OWNER_DIED;
        default:
            AssertMsgFailed(("%u\n",  rc));
        case WAIT_FAILED:
        {
            int rc2 = RTErrConvertFromWin32(GetLastError());
            AssertMsgFailed(("Wait on hMutexSem %p failed, rc=%d lasterr=%d\n", hMutexSem, rc, GetLastError()));
            if (rc2 != VINF_SUCCESS)
                return rc2;

            AssertMsgFailed(("WaitForSingleObject(event) -> rc=%d while converted lasterr=%d\n", rc, rc2));
            return VERR_INTERNAL_ERROR;
        }
    }
}
DECL_FORCE_INLINE(int) rtSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies, bool fAutoResume)
{
#ifdef RTSEMEVENT_STRICT
    PCRTLOCKVALSRCPOS  pSrcPos = NULL;
#endif

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    uint32_t    u32 = pThis->u32State;
    AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE);

    /*
     * Timed or indefinite wait?
     */
    if (cMillies == RT_INDEFINITE_WAIT)
    {
        /* for fairness, yield before going to sleep. */
        if (    ASMAtomicIncU32(&pThis->cWaiters) > 1
            &&  pThis->u32State == EVENT_STATE_SIGNALED)
            pthread_yield();

         /* take mutex */
        int rc = pthread_mutex_lock(&pThis->Mutex);
        if (rc)
        {
            ASMAtomicDecU32(&pThis->cWaiters);
            AssertMsgFailed(("Failed to lock event sem %p, rc=%d.\n", hEventSem, rc));
            return RTErrConvertFromErrno(rc);
        }

        for (;;)
        {
            /* check state. */
            if (pThis->u32State == EVENT_STATE_SIGNALED)
            {
                ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED);
                ASMAtomicDecU32(&pThis->cWaiters);
                rc = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc);
                return VINF_SUCCESS;
            }
            if (pThis->u32State == EVENT_STATE_UNINITIALIZED)
            {
                rc = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc);
                return VERR_SEM_DESTROYED;
            }

            /* wait */
#ifdef RTSEMEVENT_STRICT
            RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                                 ? RTThreadSelfAutoAdopt()
                                 : RTThreadSelf();
            if (pThis->fEverHadSignallers)
            {
                rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                           cMillies, RTTHREADSTATE_EVENT, true);
                if (RT_FAILURE(rc))
                {
                    ASMAtomicDecU32(&pThis->cWaiters);
                    pthread_mutex_unlock(&pThis->Mutex);
                    return rc;
                }
            }
#else
            RTTHREAD hThreadSelf = RTThreadSelf();
#endif
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
            rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
            if (rc)
            {
                AssertMsgFailed(("Failed to wait on event sem %p, rc=%d.\n", hEventSem, rc));
                ASMAtomicDecU32(&pThis->cWaiters);
                int rc2 = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc2, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc2)); NOREF(rc2);
                return RTErrConvertFromErrno(rc);
            }
        }
    }
    else
    {
        /*
         * Get current time and calc end of wait time.
         */
        struct timespec     ts = {0,0};
#if defined(RT_OS_DARWIN) || defined(RT_OS_HAIKU)
        struct timeval      tv = {0,0};
        gettimeofday(&tv, NULL);
        ts.tv_sec = tv.tv_sec;
        ts.tv_nsec = tv.tv_usec * 1000;
#else
        clock_gettime(CLOCK_REALTIME, &ts);
#endif
        if (cMillies != 0)
        {
            ts.tv_nsec += (cMillies % 1000) * 1000000;
            ts.tv_sec  += cMillies / 1000;
            if (ts.tv_nsec >= 1000000000)
            {
                ts.tv_nsec -= 1000000000;
                ts.tv_sec++;
            }
        }

        /* for fairness, yield before going to sleep. */
        if (ASMAtomicIncU32(&pThis->cWaiters) > 1 && cMillies)
            pthread_yield();

        /* take mutex */
        int rc = pthread_mutex_lock(&pThis->Mutex);
        if (rc)
        {
            ASMAtomicDecU32(&pThis->cWaiters);
            AssertMsg(rc == ETIMEDOUT, ("Failed to lock event sem %p, rc=%d.\n", hEventSem, rc));
            return RTErrConvertFromErrno(rc);
        }

        for (;;)
        {
            /* check state. */
            if (pThis->u32State == EVENT_STATE_SIGNALED)
            {
                ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED);
                ASMAtomicDecU32(&pThis->cWaiters);
                rc = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc);
                return VINF_SUCCESS;
            }
            if (pThis->u32State == EVENT_STATE_UNINITIALIZED)
            {
                rc = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); NOREF(rc);
                return VERR_SEM_DESTROYED;
            }

            /* we're done if the timeout is 0. */
            if (!cMillies)
            {
                ASMAtomicDecU32(&pThis->cWaiters);
                rc = pthread_mutex_unlock(&pThis->Mutex);
                return VERR_TIMEOUT;
            }

            /* wait */
#ifdef RTSEMEVENT_STRICT
            RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                                 ? RTThreadSelfAutoAdopt()
                                 : RTThreadSelf();
            if (pThis->fEverHadSignallers)
            {
                rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                           cMillies, RTTHREADSTATE_EVENT, true);
                if (RT_FAILURE(rc))
                {
                    ASMAtomicDecU32(&pThis->cWaiters);
                    pthread_mutex_unlock(&pThis->Mutex);
                    return rc;
                }
            }
#else
            RTTHREAD hThreadSelf = RTThreadSelf();
#endif
            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
            rc = pthread_cond_timedwait(&pThis->Cond, &pThis->Mutex, &ts);
            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
            if (rc && (rc != EINTR || !fAutoResume)) /* according to SuS this function shall not return EINTR, but linux man page says differently. */
            {
                AssertMsg(rc == ETIMEDOUT, ("Failed to wait on event sem %p, rc=%d.\n", hEventSem, rc));
                ASMAtomicDecU32(&pThis->cWaiters);
                int rc2 = pthread_mutex_unlock(&pThis->Mutex);
                AssertMsg(!rc2, ("Failed to unlock event sem %p, rc2=%d.\n", hEventSem, rc2)); NOREF(rc2);
                return RTErrConvertFromErrno(rc);
            }
        } /* for (;;) */
    }
}