static void TestNegative(void)
{
    RTTestSub(g_hTest, "Negative");
    bool fSavedAssertQuiet    = RTAssertSetQuiet(true);
    bool fSavedAssertMayPanic = RTAssertSetMayPanic(false);
    bool fSavedLckValEnabled  = RTLockValidatorSetEnabled(false);

    RTCRITSECTRW CritSectRw;
    RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwInit(&CritSectRw), VINF_SUCCESS);

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VERR_NOT_OWNER);
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VERR_NOT_OWNER);

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS);
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VERR_NOT_OWNER);

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS);
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VERR_WRONG_ORDER); /* cannot release the final write before the reads. */
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS);
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS);

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwDelete(&CritSectRw), VINF_SUCCESS);

    RTLockValidatorSetEnabled(fSavedLckValEnabled);
    RTAssertSetMayPanic(fSavedAssertMayPanic);
    RTAssertSetQuiet(fSavedAssertQuiet);
}
Beispiel #2
0
/*virtual*/ void RWLockHandle::unlockWrite()
{
#ifdef GLUE_USE_CRITSECTRW
    int vrc = RTCritSectRwLeaveExcl(&m->CritSect);
#else
    int vrc = RTSemRWReleaseWrite(m->sem);
#endif
    AssertRC(vrc);

}
static DECLCALLBACK(int) Test4Thread(RTTHREAD ThreadSelf, void *pvUser)
{
    /* Use randomization to get a little more variation of the sync pattern.
       We use a pseudo random generator here so that we don't end up testing
       the speed of the /dev/urandom implementation, but rather the read-write
       semaphores. */
    int rc;
    RTRAND hRand;
    RTTEST_CHECK_RC_OK_RET(g_hTest, rc = RTRandAdvCreateParkMiller(&hRand), rc);
    RTTEST_CHECK_RC_OK_RET(g_hTest, rc = RTRandAdvSeed(hRand, (uintptr_t)ThreadSelf), rc);
    unsigned c100 = RTRandAdvU32Ex(hRand, 0, 99);

    uint64_t *pcItr = (uint64_t *)pvUser;
    bool fWrite;
    for (;;)
    {
        unsigned readrec = RTRandAdvU32Ex(hRand, 0, 3);
        unsigned writerec = RTRandAdvU32Ex(hRand, 0, 3);
        /* Don't overdo recursion testing. */
        if (readrec > 1)
            readrec--;
        if (writerec > 1)
            writerec--;

        fWrite = (c100 < g_uWritePercent);
        if (fWrite)
        {
            for (unsigned i = 0; i <= writerec; i++)
            {
                rc = RTCritSectRwEnterExcl(&g_CritSectRw);
                if (RT_FAILURE(rc))
                {
                    RTTestFailed(g_hTest, "Write recursion %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc);
                    break;
                }
            }
            if (RT_FAILURE(rc))
                break;
            if (ASMAtomicIncU32(&g_cConcurrentWriters) != 1)
            {
                RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s after write locking it",
                             g_cConcurrentWriters, RTThreadSelfName());
                break;
            }
            if (g_cConcurrentReaders != 0)
            {
                RTTestFailed(g_hTest, "g_cConcurrentReaders=%u on %s after write locking it",
                             g_cConcurrentReaders, RTThreadSelfName());
                break;
            }
        }
        else
        {
            rc = RTCritSectRwEnterShared(&g_CritSectRw);
            if (RT_FAILURE(rc))
            {
                RTTestFailed(g_hTest, "Read locking on %s failed with rc=%Rrc", RTThreadSelfName(), rc);
                break;
            }
            ASMAtomicIncU32(&g_cConcurrentReaders);
            if (g_cConcurrentWriters != 0)
            {
                RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s after read locking it",
                             g_cConcurrentWriters, RTThreadSelfName());
                break;
            }
        }
        for (unsigned i = 0; i < readrec; i++)
        {
            rc = RTCritSectRwEnterShared(&g_CritSectRw);
            if (RT_FAILURE(rc))
            {
                RTTestFailed(g_hTest, "Read recursion %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc);
                break;
            }
        }
        if (RT_FAILURE(rc))
            break;

        /*
         * Check for fairness: The values of the threads should not differ too much
         */
        (*pcItr)++;

        /*
         * Check for correctness: Give other threads a chance. If the implementation is
         * correct, no other thread will be able to enter this lock now.
         */
        if (g_fYield)
            RTThreadYield();

        for (unsigned i = 0; i < readrec; i++)
        {
            rc = RTCritSectRwLeaveShared(&g_CritSectRw);
            if (RT_FAILURE(rc))
            {
                RTTestFailed(g_hTest, "Read release %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc);
                break;
            }
        }
        if (RT_FAILURE(rc))
            break;

        if (fWrite)
        {
            if (ASMAtomicDecU32(&g_cConcurrentWriters) != 0)
            {
                RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s before write release",
                             g_cConcurrentWriters, RTThreadSelfName());
                break;
            }
            if (g_cConcurrentReaders != 0)
            {
                RTTestFailed(g_hTest, "g_cConcurrentReaders=%u on %s before write release",
                             g_cConcurrentReaders, RTThreadSelfName());
                break;
            }
            for (unsigned i = 0; i <= writerec; i++)
            {
                rc = RTCritSectRwLeaveExcl(&g_CritSectRw);
                if (RT_FAILURE(rc))
                {
                    RTTestFailed(g_hTest, "Write release %u on %s failed with rc=%Rrc", i, RTThreadSelfName(), rc);
                    break;
                }
            }
        }
        else
        {
            if (g_cConcurrentWriters != 0)
            {
                RTTestFailed(g_hTest, "g_cConcurrentWriters=%u on %s before read release",
                             g_cConcurrentWriters, RTThreadSelfName());
                break;
            }
            ASMAtomicDecU32(&g_cConcurrentReaders);
            rc = RTCritSectRwLeaveShared(&g_CritSectRw);
            if (RT_FAILURE(rc))
            {
                RTTestFailed(g_hTest, "Read release on %s failed with rc=%Rrc", RTThreadSelfName(), rc);
                break;
            }
        }

        if (g_fTerminate)
            break;

        c100++;
        c100 %= 100;
    }
    if (!g_fQuiet)
        RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Thread %s exited with %lld\n", RTThreadSelfName(), *pcItr);
    RTRandAdvDestroy(hRand);
    return VINF_SUCCESS;
}
static bool Test1(void)
{
    RTTestSub(g_hTest, "Basics");

    RTCRITSECTRW CritSectRw;
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwInit(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsInitialized(&CritSectRw), false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);

    for (int iRun = 0; iRun < 3; iRun++)
    {
        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 1, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 2, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterShared(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 2, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 1, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwEnterExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 3, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 1, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        /*  midway  */

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 2, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 1, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 2, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 1, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == true, false);

        RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriteRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwGetWriterReadRecursion(&CritSectRw) == 0, false);
        RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsWriteOwner(&CritSectRw) == false, false);
    }

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);

    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterExcl(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwTryEnterShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveShared(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwLeaveExcl(&CritSectRw), VINF_SUCCESS, false);


    RTTEST_CHECK_RET(g_hTest, RTCritSectRwIsInitialized(&CritSectRw), false);
    RTTEST_CHECK_RC_RET(g_hTest, RTCritSectRwDelete(&CritSectRw), VINF_SUCCESS, false);
    RTTEST_CHECK_RET(g_hTest, !RTCritSectRwIsInitialized(&CritSectRw), false);

    return true;
}
static void Test4(unsigned cThreads, unsigned cSeconds, unsigned uWritePercent, bool fYield, bool fQuiet)
{
    unsigned i;
    uint64_t acIterations[32];
    RTTHREAD aThreads[RT_ELEMENTS(acIterations)];
    AssertRelease(cThreads <= RT_ELEMENTS(acIterations));

    RTTestSubF(g_hTest, "Test4 - %u threads, %u sec, %u%% writes, %syielding",
               cThreads, cSeconds, uWritePercent, fYield ? "" : "non-");

    /*
     * Init globals.
     */
    g_fYield = fYield;
    g_fQuiet = fQuiet;
    g_fTerminate = false;
    g_uWritePercent = uWritePercent;
    g_cConcurrentWriters = 0;
    g_cConcurrentReaders = 0;

    RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwInit(&g_CritSectRw), VINF_SUCCESS);

    /*
     * Create the threads and let them block on the semrw.
     */
    RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwEnterExcl(&g_CritSectRw), VINF_SUCCESS);

    for (i = 0; i < cThreads; i++)
    {
        acIterations[i] = 0;
        RTTEST_CHECK_RC_RETV(g_hTest, RTThreadCreateF(&aThreads[i], Test4Thread, &acIterations[i], 0,
                                                      RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE,
                                                      "test-%u", i), VINF_SUCCESS);
    }

    /*
     * Do the test run.
     */
    uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
    uint64_t u64StartTS = RTTimeNanoTS();
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&g_CritSectRw), VINF_SUCCESS);
    RTThreadSleep(cSeconds * 1000);
    ASMAtomicWriteBool(&g_fTerminate, true);
    uint64_t ElapsedNS = RTTimeNanoTS() - u64StartTS;

    /*
     * Clean up the threads and semaphore.
     */
    for (i = 0; i < cThreads; i++)
        RTTEST_CHECK_RC(g_hTest, RTThreadWait(aThreads[i], 5000, NULL), VINF_SUCCESS);

    RTTEST_CHECK_MSG(g_hTest, g_cConcurrentWriters == 0, (g_hTest, "g_cConcurrentWriters=%u at end of test\n", g_cConcurrentWriters));
    RTTEST_CHECK_MSG(g_hTest, g_cConcurrentReaders == 0, (g_hTest, "g_cConcurrentReaders=%u at end of test\n", g_cConcurrentReaders));

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwDelete(&g_CritSectRw), VINF_SUCCESS);

    if (RTTestErrorCount(g_hTest) != cErrorsBefore)
        RTThreadSleep(100);

    /*
     * Collect and display the results.
     */
    uint64_t cItrTotal = acIterations[0];
    for (i = 1; i < cThreads; i++)
        cItrTotal += acIterations[i];

    uint64_t cItrNormal = cItrTotal / cThreads;
    uint64_t cItrMinOK = cItrNormal / 20; /* 5% */
    uint64_t cItrMaxDeviation = 0;
    for (i = 0; i < cThreads; i++)
    {
        uint64_t cItrDelta = RT_ABS((int64_t)(acIterations[i] - cItrNormal));
        if (acIterations[i] < cItrMinOK)
            RTTestFailed(g_hTest, "Thread %u did less than 5%% of the iterations - %llu (it) vs. %llu (5%%) - %llu%%\n",
                         i, acIterations[i], cItrMinOK, cItrDelta * 100 / cItrNormal);
        else if (cItrDelta > cItrNormal / 2)
            RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                         "Warning! Thread %u deviates by more than 50%% - %llu (it) vs. %llu (avg) - %llu%%\n",
                         i, acIterations[i], cItrNormal, cItrDelta * 100 / cItrNormal);
        if (cItrDelta > cItrMaxDeviation)
            cItrMaxDeviation = cItrDelta;

    }

    //RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
    //             "Threads: %u  Total: %llu  Per Sec: %llu  Avg: %llu ns  Max dev: %llu%%\n",
    //             cThreads,
    //             cItrTotal,
    //             cItrTotal / cSeconds,
    //             ElapsedNS / cItrTotal,
    //             cItrMaxDeviation * 100 / cItrNormal
    //             );
    //
    RTTestValue(g_hTest, "Thruput", cItrTotal * UINT32_C(1000000000) / ElapsedNS, RTTESTUNIT_CALLS_PER_SEC);
    RTTestValue(g_hTest, "Max diviation", cItrMaxDeviation * 100 / cItrNormal, RTTESTUNIT_PCT);
}
/**
 * Looks up the given user, creating it if not found
 *
 * @returns Pointer to the LOCKED and RETAINED per user data.
 *          NULL on allocation error.
 * @param   a_rStrUserSid   The user SID.
 * @param   a_rStrUsername  The user name if available.
 */
VBoxSDSPerUserData *VirtualBoxSDS::i_lookupOrCreatePerUserData(com::Utf8Str const &a_rStrUserSid,
                                                               com::Utf8Str const &a_rStrUsername)
{
    /*
     * Try do a simple lookup first.
     */
    VBoxSDSPerUserData *pUserData = i_lookupPerUserData(a_rStrUserSid);
    if (!pUserData)
    {
        /*
         * SID is not in map, create a new one.
         */
        try
        {
            pUserData = new VBoxSDSPerUserData(a_rStrUserSid, a_rStrUsername);
        }
        catch (std::bad_alloc &)
        {
            pUserData = NULL;
        }
        if (pUserData)
        {
            /*
             * Insert it.  We must check if someone raced us here.
             */
            VBoxSDSPerUserData *pUserDataFree = pUserData;
            pUserData->i_lock();

            int vrc = RTCritSectRwEnterExcl(&m_MapCritSect);
            if (RT_SUCCESS(vrc))
            {

                UserDataMap_T::iterator it = m_UserDataMap.find(a_rStrUserSid);
                if (it == m_UserDataMap.end())
                {
                    try
                    {
                        m_UserDataMap[a_rStrUserSid] = pUserData;
                        pUserData->i_retain();
                    }
                    catch (std::bad_alloc &)
                    {
                        pUserData = NULL;
                    }
                }
                else
                    pUserData = NULL;

                RTCritSectRwLeaveExcl(&m_MapCritSect);

                if (pUserData)
                    LogRel(("i_lookupOrCreatePerUserData: Created new entry for %s (%s)\n",
                            pUserData->m_strUserSid.c_str(), pUserData->m_strUsername.c_str() ));
                else
                {
                    pUserDataFree->i_unlock();
                    delete pUserDataFree;
                }
            }
        }
    }

    return pUserData;
}