PSLIST_ENTRY WINAPI InterlockedPopEntrySList(PSLIST_HEADER pHead)
{
    RESOLVE_ME(InterlockedPopEntrySList);
    if (pfnApi)
        return pfnApi(pHead);

    /* fallback: */
    PSLIST_ENTRY pRet = NULL;
    for (;;)
    {
        SLIST_HEADER OldHead = *pHead;
        pRet = OldHead.Next.Next;
        if (pRet)
        {
            SLIST_HEADER NewHead;
            __try
            {
                NewHead.Next.Next = pRet->Next;
            }
            __except(EXCEPTION_EXECUTE_HANDLER)
            {
                continue;
            }
            NewHead.Depth     = OldHead.Depth - 1;
            NewHead.Sequence  = OldHead.Sequence + 1;
            if (ASMAtomicCmpXchgU64(&pHead->Alignment, NewHead.Alignment, OldHead.Alignment))
                break;
        }
        else
            break;
    }
PSLIST_ENTRY WINAPI InterlockedFlushSList(PSLIST_HEADER pHead)
{
    RESOLVE_ME(InterlockedFlushSList);
    if (pfnApi)
        return pfnApi(pHead);

    /* fallback: */
    PSLIST_ENTRY pRet = NULL;
    if (pHead->Next.Next)
    {
        for (;;)
        {
            SLIST_HEADER OldHead = *pHead;
            SLIST_HEADER NewHead;
            NewHead.Alignment = 0;
            NewHead.Sequence  = OldHead.Sequence + 1;
            if (ASMAtomicCmpXchgU64(&pHead->Alignment, NewHead.Alignment, OldHead.Alignment))
            {
                pRet = OldHead.Next.Next;
                break;
            }
        }
    }
    return pRet;
}
/**
 * Internal worker for RTSemXRoadsNSEnter and RTSemXRoadsEWEnter.
 *
 * @returns IPRT status code.
 * @param   pThis               The semaphore instance.
 * @param   fDir                The direction.
 * @param   uCountShift         The shift count for getting the count.
 * @param   fCountMask          The mask for getting the count.
 * @param   uWaitCountShift     The shift count for getting the wait count.
 * @param   fWaitCountMask      The mask for getting the wait count.
 */
DECL_FORCE_INLINE(int) rtSemXRoadsEnter(RTSEMXROADSINTERNAL *pThis, uint64_t fDir,
                                        uint64_t uCountShift, uint64_t fCountMask,
                                        uint64_t uWaitCountShift, uint64_t fWaitCountMask)
{
    uint64_t    u64OldState;
    uint64_t    u64State;

    u64State = ASMAtomicReadU64(&pThis->u64State);
    u64OldState = u64State;
    add_hist(u64State, u64OldState, fDir, "enter");

    for (;;)
    {
        if ((u64State & RTSEMXROADS_DIR_MASK) == (fDir << RTSEMXROADS_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & fCountMask) >> uCountShift;
            c++;
            Assert(c < 8*_1K);
            u64State &= ~fCountMask;
            u64State |= c << uCountShift;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
                add_hist(u64State, u64OldState, fDir, "enter-simple");
                break;
            }
        }
        else if ((u64State & (RTSEMXROADS_CNT_NS_MASK | RTSEMXROADS_CNT_EW_MASK)) == 0)
Example #4
0
static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
{
    /*
     * Validate input.
     */
    AssertPtr(pThis);
    AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
#ifdef IN_RING0
    Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
#else
    Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
#endif

#ifdef RTCRITSECTRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (!fTryOnly)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTCSRW_CNT_MASK / 2);
            u64State &= ~RTCSRW_CNT_RD_MASK;
            u64State |= c << RTCSRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTCRITSECTRW_STRICT
                RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
Example #5
0
static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    RTSEMRWINTERNAL *pThis = hRWSem;
    if (pThis == NIL_RTSEMRW)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE);

#ifdef RTSEMRW_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies > 0)
    {
        int            rc9;
        RTNATIVETHREAD hNativeWriter;
        ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter);
        if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
            rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies);
        else
            rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#endif

    /*
     * Get cracking...
     */
    uint64_t u64State    = ASMAtomicReadU64(&pThis->u64State);
    uint64_t u64OldState = u64State;

    for (;;)
    {
        if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT))
        {
            /* It flows in the right direction, try follow it before it changes. */
            uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT;
            c++;
            Assert(c < RTSEMRW_CNT_MASK / 2);
            u64State &= ~RTSEMRW_CNT_RD_MASK;
            u64State |= c << RTSEMRW_CNT_RD_SHIFT;
            if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState))
            {
#ifdef RTSEMRW_STRICT
                RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos);
#endif
                break;
            }
        }
        else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
DECLINLINE(uint64_t) rtTimeGetSystemNanoTS(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16) /* This must match timer-r0drv-linux.c! */
    /*
     * Use ktime_get_ts, this is also what clock_gettime(CLOCK_MONOTONIC,) is using.
     */
    uint64_t u64;
    struct timespec Ts;
    ktime_get_ts(&Ts);
    u64 = Ts.tv_sec * UINT64_C(1000000000) + Ts.tv_nsec;
    return u64;

#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 60)
    /*
     * Seems there is no way of getting to the exact source of
     * sys_clock_gettime(CLOCK_MONOTONIC, &ts) here, I think. But
     * 64-bit jiffies adjusted for the initial value should be pretty
     * much the same I hope.
     */
    uint64_t u64 = get_jiffies_64();
# ifdef INITIAL_JIFFIES
    u64 += INITIAL_JIFFIES;
# endif
    u64 *= TICK_NSEC;
    return u64;

#else   /* < 2.5.60 */
# if BITS_PER_LONG >= 64
    /*
     * This is the same as above, except that there is no get_jiffies_64()
     * here and we rely on long, and therefor jiffies, being 64-bit instead.
     */
    uint64_t u64 = jiffies;
# ifdef INITIAL_JIFFIES
    u64 += INITIAL_JIFFIES;
# endif
    u64 *= TICK_NSEC;
    return u64;

# else /* 32 bit jiffies */
    /*
     * We'll have to try track jiffy rollovers here or we'll be
     * in trouble every time it flips.
     *
     * The high dword of the s_u64Last is the rollover count, the
     * low dword is the previous jiffies.  Updating is done by
     * atomic compare & exchange of course.
     */
    static uint64_t volatile s_u64Last = 0;
    uint64_t u64;

    for (;;)
    {
        uint64_t u64NewLast;
        int32_t iDelta;
        uint32_t cRollovers;
        uint32_t u32LastJiffies;

        /* sample the values */
        unsigned long ulNow = jiffies;
        uint64_t u64Last = s_u64Last;
        if (ulNow != jiffies)
            continue; /* try again */
#  ifdef INITIAL_JIFFIES
        ulNow += INITIAL_JIFFIES;
#  endif

        u32LastJiffies = (uint32_t)u64Last;
        cRollovers = u64Last >> 32;

        /*
         * Check for rollover and update the static last value.
         *
         * We have to make sure we update it successfully to rule out
         * an underrun because of racing someone.
         */
        iDelta = ulNow - u32LastJiffies;
        if (iDelta < 0)
        {
            cRollovers++;
            u64NewLast = RT_MAKE_U64(ulNow, cRollovers);
            if (!ASMAtomicCmpXchgU64(&s_u64Last, u64NewLast, u64Last))
                continue; /* race, try again */
        }
        else
        {
            u64NewLast = RT_MAKE_U64(ulNow, cRollovers);
            ASMAtomicCmpXchgU64(&s_u64Last, u64NewLast, u64Last);
        }

        /* calculate the return value */
        u64 = ulNow;
        u64 *= TICK_NSEC;
        u64 += cRollovers * (_4G * TICK_NSEC);
        break;
    }

    return u64;
# endif /* 32 bit jiffies */
#endif  /* < 2.5.60 */
}