/**
 * Does a multi-threading list test. Several list additions, reading, replacing
 * and erasing are done simultaneous.
 *
 */
static void test2()
{
    RTTestISubF("MT test with 6 threads (%u tests per thread).", MTTESTITEMS);

    int                         rc;
    MTTESTLISTTYPE<MTTESTTYPE>  testList;
    RTTHREAD                    ahThreads[6];
    static PFNRTTHREAD          apfnThreads[6] =
    {
        MtTest1ThreadProc, MtTest2ThreadProc, MtTest3ThreadProc, MtTest4ThreadProc, MtTest5ThreadProc, MtTest6ThreadProc
    };

    for (unsigned i = 0; i < RT_ELEMENTS(ahThreads); i++)
    {
        RTTESTI_CHECK_RC_RETV(RTThreadCreateF(&ahThreads[i], apfnThreads[i], &testList, 0,
                                              RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "mttest%u", i), VINF_SUCCESS);
    }

    uint64_t tsMsDeadline = RTTimeMilliTS() + 60000;
    for (unsigned i = 0; i < RT_ELEMENTS(ahThreads); i++)
    {
        uint64_t tsNow = RTTimeMilliTS();
        uint32_t cWait = tsNow > tsMsDeadline ? 5000 : tsMsDeadline - tsNow;
        RTTESTI_CHECK_RC(RTThreadWait(ahThreads[i], tsNow, NULL), VINF_SUCCESS);
    }

    RTTESTI_CHECK_RETV(testList.size() == MTTESTITEMS * 2);
    for (size_t i = 0; i < testList.size(); ++i)
    {
        uint32_t a = testList.at(i);
        RTTESTI_CHECK(a == 0x0 || a == 0xFFFFFFFF || a == 0xF0F0F0F0 || a == 0xFF00FF00);
    }
}
Esempio n. 2
0
/**
 * Time constrained test with and unlimited  N threads.
 */
static void tst3(uint32_t cThreads, uint32_t cbObject, int iMethod, uint32_t cSecs)
{
    RTTestISubF("Benchmark - %u threads, %u bytes, %u secs, %s", cThreads, cbObject, cSecs,
                iMethod == 0 ? "RTMemCache"
                : "RTMemAlloc");

    /*
     * Create a cache with unlimited space, a start semaphore and line up
     * the threads.
     */
    RTTESTI_CHECK_RC_RETV(RTMemCacheCreate(&g_hMemCache, cbObject, 0 /*cbAlignment*/, UINT32_MAX, NULL, NULL, NULL, 0 /*fFlags*/), VINF_SUCCESS);

    RTSEMEVENTMULTI hEvt;
    RTTESTI_CHECK_RC_OK_RETV(RTSemEventMultiCreate(&hEvt));

    TST3THREAD aThreads[64];
    RTTESTI_CHECK_RETV(cThreads < RT_ELEMENTS(aThreads));

    ASMAtomicWriteBool(&g_fTst3Stop, false);
    for (uint32_t i = 0; i < cThreads; i++)
    {
        aThreads[i].hThread     = NIL_RTTHREAD;
        aThreads[i].cIterations = 0;
        aThreads[i].fUseCache   = iMethod == 0;
        aThreads[i].cbObject    = cbObject;
        aThreads[i].hEvt        = hEvt;
        RTTESTI_CHECK_RC_OK_RETV(RTThreadCreateF(&aThreads[i].hThread, tst3Thread, &aThreads[i], 0,
                                                 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "tst3-%u", i));
    }

    /*
     * Start the race.
     */
    RTTimeNanoTS(); /* warmup */

    uint64_t uStartTS = RTTimeNanoTS();
    RTTESTI_CHECK_RC_OK_RETV(RTSemEventMultiSignal(hEvt));
    RTThreadSleep(cSecs * 1000);
    ASMAtomicWriteBool(&g_fTst3Stop, true);
    for (uint32_t i = 0; i < cThreads; i++)
        RTTESTI_CHECK_RC_OK_RETV(RTThreadWait(aThreads[i].hThread, 60*1000, NULL));
    uint64_t cElapsedNS = RTTimeNanoTS() - uStartTS;

    /*
     * Sum up the counts.
     */
    uint64_t cIterations = 0;
    for (uint32_t i = 0; i < cThreads; i++)
        cIterations += aThreads[i].cIterations;

    RTTestIPrintf(RTTESTLVL_ALWAYS, "%'8u iterations per second, %'llu ns on avg\n",
                  (unsigned)((long double)cIterations * 1000000000.0 / cElapsedNS),
                  cElapsedNS / cIterations);

    /* clean up */
    RTTESTI_CHECK_RC(RTMemCacheDestroy(g_hMemCache), VINF_SUCCESS);
    RTTESTI_CHECK_RC_OK(RTSemEventMultiDestroy(hEvt));
}
Esempio n. 3
0
/** sub test */
static void tst4Sub(uint32_t cThreads)
{
    RTTestISubF("Serialization - %u threads", cThreads);
    RTMEMPOOL hMemPool;
    RTTESTI_CHECK_RC_RETV(RTMemPoolCreate(&hMemPool, "test 2a"), VINF_SUCCESS);
    g_hMemPool4 = hMemPool;

    PRTTHREAD pahThreads = (PRTTHREAD)RTMemPoolAlloc(hMemPool, cThreads * sizeof(RTTHREAD));
    RTTESTI_CHECK(pahThreads);
    if (pahThreads)
    {
        /* start them. */
        for (uint32_t i = 0; i < cThreads; i++)
        {
            int rc = RTThreadCreateF(&pahThreads[i], tst4Thread, (void *)(uintptr_t)i, 0,
                                     RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "tst4-%u/%u", i, cThreads);
            RTTESTI_CHECK_RC_OK(rc);
            if (RT_FAILURE(rc))
                pahThreads[i] = NIL_RTTHREAD;
        }
        RTThreadYield();

        /* kick them off. */
        for (uint32_t i = 0; i < cThreads; i++)
            if (pahThreads[i] != NIL_RTTHREAD)
                RTTESTI_CHECK_RC_OK(RTThreadUserSignal(pahThreads[i]));

        /* wait for them. */
        for (uint32_t i = 0; i < cThreads; i++)
            if (pahThreads[i] != NIL_RTTHREAD)
            {
                int rc = RTThreadWait(pahThreads[i], 2*60*1000, NULL);
                RTTESTI_CHECK_RC_OK(rc);
            }
    }

    RTTESTI_CHECK_RC(RTMemPoolDestroy(hMemPool), VINF_SUCCESS);
}
Esempio n. 4
0
/**
 * Create new thread.
 */
sys_thread_t sys_thread_new(void (* thread)(void *arg), void *arg, int prio)
{
    int rc;
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_DECL_PROTECT(old_level);
#endif
    unsigned id;
    RTTHREAD tid;

#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_PROTECT(old_level);
#else
    RTSemEventWait(g_ThreadSem, RT_INDEFINITE_WAIT);
#endif
    id = g_cThreads;
    g_cThreads++;
    Assert(g_cThreads <= THREADS_MAX);
    g_aTLS[id].thread = thread;
    g_aTLS[id].arg = arg;
    rc = RTThreadCreateF(&tid, sys_thread_adapter, &g_aTLS[id], 0,
                         RTTHREADTYPE_IO, 0, "lwIP%u", id);
    if (RT_FAILURE(rc))
    {
        g_cThreads--;
        tid = NIL_RTTHREAD;
    }
    else
        g_aTLS[id].tid = tid;
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_UNPROTECT(old_level);
#else
    RTSemEventSignal(g_ThreadSem);
#endif
    AssertRC(rc);
    return tid;
}
static void Test4(unsigned cThreads, unsigned cSeconds, unsigned uWritePercent, bool fYield, bool fQuiet)
{
    unsigned i;
    uint64_t acIterations[32];
    RTTHREAD aThreads[RT_ELEMENTS(acIterations)];
    AssertRelease(cThreads <= RT_ELEMENTS(acIterations));

    RTTestSubF(g_hTest, "Test4 - %u threads, %u sec, %u%% writes, %syielding",
               cThreads, cSeconds, uWritePercent, fYield ? "" : "non-");

    /*
     * Init globals.
     */
    g_fYield = fYield;
    g_fQuiet = fQuiet;
    g_fTerminate = false;
    g_uWritePercent = uWritePercent;
    g_cConcurrentWriters = 0;
    g_cConcurrentReaders = 0;

    RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwInit(&g_CritSectRw), VINF_SUCCESS);

    /*
     * Create the threads and let them block on the semrw.
     */
    RTTEST_CHECK_RC_RETV(g_hTest, RTCritSectRwEnterExcl(&g_CritSectRw), VINF_SUCCESS);

    for (i = 0; i < cThreads; i++)
    {
        acIterations[i] = 0;
        RTTEST_CHECK_RC_RETV(g_hTest, RTThreadCreateF(&aThreads[i], Test4Thread, &acIterations[i], 0,
                                                      RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE,
                                                      "test-%u", i), VINF_SUCCESS);
    }

    /*
     * Do the test run.
     */
    uint32_t cErrorsBefore = RTTestErrorCount(g_hTest);
    uint64_t u64StartTS = RTTimeNanoTS();
    RTTEST_CHECK_RC(g_hTest, RTCritSectRwLeaveExcl(&g_CritSectRw), VINF_SUCCESS);
    RTThreadSleep(cSeconds * 1000);
    ASMAtomicWriteBool(&g_fTerminate, true);
    uint64_t ElapsedNS = RTTimeNanoTS() - u64StartTS;

    /*
     * Clean up the threads and semaphore.
     */
    for (i = 0; i < cThreads; i++)
        RTTEST_CHECK_RC(g_hTest, RTThreadWait(aThreads[i], 5000, NULL), VINF_SUCCESS);

    RTTEST_CHECK_MSG(g_hTest, g_cConcurrentWriters == 0, (g_hTest, "g_cConcurrentWriters=%u at end of test\n", g_cConcurrentWriters));
    RTTEST_CHECK_MSG(g_hTest, g_cConcurrentReaders == 0, (g_hTest, "g_cConcurrentReaders=%u at end of test\n", g_cConcurrentReaders));

    RTTEST_CHECK_RC(g_hTest, RTCritSectRwDelete(&g_CritSectRw), VINF_SUCCESS);

    if (RTTestErrorCount(g_hTest) != cErrorsBefore)
        RTThreadSleep(100);

    /*
     * Collect and display the results.
     */
    uint64_t cItrTotal = acIterations[0];
    for (i = 1; i < cThreads; i++)
        cItrTotal += acIterations[i];

    uint64_t cItrNormal = cItrTotal / cThreads;
    uint64_t cItrMinOK = cItrNormal / 20; /* 5% */
    uint64_t cItrMaxDeviation = 0;
    for (i = 0; i < cThreads; i++)
    {
        uint64_t cItrDelta = RT_ABS((int64_t)(acIterations[i] - cItrNormal));
        if (acIterations[i] < cItrMinOK)
            RTTestFailed(g_hTest, "Thread %u did less than 5%% of the iterations - %llu (it) vs. %llu (5%%) - %llu%%\n",
                         i, acIterations[i], cItrMinOK, cItrDelta * 100 / cItrNormal);
        else if (cItrDelta > cItrNormal / 2)
            RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                         "Warning! Thread %u deviates by more than 50%% - %llu (it) vs. %llu (avg) - %llu%%\n",
                         i, acIterations[i], cItrNormal, cItrDelta * 100 / cItrNormal);
        if (cItrDelta > cItrMaxDeviation)
            cItrMaxDeviation = cItrDelta;

    }

    //RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
    //             "Threads: %u  Total: %llu  Per Sec: %llu  Avg: %llu ns  Max dev: %llu%%\n",
    //             cThreads,
    //             cItrTotal,
    //             cItrTotal / cSeconds,
    //             ElapsedNS / cItrTotal,
    //             cItrMaxDeviation * 100 / cItrNormal
    //             );
    //
    RTTestValue(g_hTest, "Thruput", cItrTotal * UINT32_C(1000000000) / ElapsedNS, RTTESTUNIT_CALLS_PER_SEC);
    RTTestValue(g_hTest, "Max diviation", cItrMaxDeviation * 100 / cItrNormal, RTTESTUNIT_PCT);
}
Esempio n. 6
0
/**
 * Creates a new async I/O manager.
 *
 * @returns VBox status code.
 * @param   pEpClass    Pointer to the endpoint class data.
 * @param   ppAioMgr    Where to store the pointer to the new async I/O manager on success.
 * @param   enmMgrType  Wanted manager type - can be overwritten by the global override.
 */
int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr,
                          PDMACEPFILEMGRTYPE enmMgrType)
{
    LogFlowFunc((": Entered\n"));

    PPDMACEPFILEMGR pAioMgrNew;
    int rc = MMR3HeapAllocZEx(pEpClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, sizeof(PDMACEPFILEMGR), (void **)&pAioMgrNew);
    if (RT_SUCCESS(rc))
    {
        if (enmMgrType < pEpClass->enmMgrTypeOverride)
            pAioMgrNew->enmMgrType = enmMgrType;
        else
            pAioMgrNew->enmMgrType = pEpClass->enmMgrTypeOverride;

        pAioMgrNew->msBwLimitExpired = RT_INDEFINITE_WAIT;

        rc = RTSemEventCreate(&pAioMgrNew->EventSem);
        if (RT_SUCCESS(rc))
        {
            rc = RTSemEventCreate(&pAioMgrNew->EventSemBlock);
            if (RT_SUCCESS(rc))
            {
                rc = RTCritSectInit(&pAioMgrNew->CritSectBlockingEvent);
                if (RT_SUCCESS(rc))
                {
                    /* Init the rest of the manager. */
                    if (pAioMgrNew->enmMgrType != PDMACEPFILEMGRTYPE_SIMPLE)
                        rc = pdmacFileAioMgrNormalInit(pAioMgrNew);

                    if (RT_SUCCESS(rc))
                    {
                        pAioMgrNew->enmState = PDMACEPFILEMGRSTATE_RUNNING;

                        rc = RTThreadCreateF(&pAioMgrNew->Thread,
                                             pAioMgrNew->enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE
                                             ? pdmacFileAioMgrFailsafe
                                             : pdmacFileAioMgrNormal,
                                             pAioMgrNew,
                                             0,
                                             RTTHREADTYPE_IO,
                                             0,
                                             "AioMgr%d-%s", pEpClass->cAioMgrs,
                                             pAioMgrNew->enmMgrType == PDMACEPFILEMGRTYPE_SIMPLE
                                             ? "F"
                                             : "N");
                        if (RT_SUCCESS(rc))
                        {
                            /* Link it into the list. */
                            RTCritSectEnter(&pEpClass->CritSect);
                            pAioMgrNew->pNext = pEpClass->pAioMgrHead;
                            if (pEpClass->pAioMgrHead)
                                pEpClass->pAioMgrHead->pPrev = pAioMgrNew;
                            pEpClass->pAioMgrHead = pAioMgrNew;
                            pEpClass->cAioMgrs++;
                            RTCritSectLeave(&pEpClass->CritSect);

                            *ppAioMgr = pAioMgrNew;

                            Log(("PDMAC: Successfully created new file AIO Mgr {%s}\n", RTThreadGetName(pAioMgrNew->Thread)));
                            return VINF_SUCCESS;
                        }
                        pdmacFileAioMgrNormalDestroy(pAioMgrNew);
                    }
                    RTCritSectDelete(&pAioMgrNew->CritSectBlockingEvent);
                }
                RTSemEventDestroy(pAioMgrNew->EventSem);
            }
            RTSemEventDestroy(pAioMgrNew->EventSemBlock);
        }
        MMR3HeapFree(pAioMgrNew);
    }

    LogFlowFunc((": Leave rc=%Rrc\n", rc));

    return rc;
}
Esempio n. 7
0
static void tstTraffic(unsigned cThreads, unsigned cSecs)
{
    RTTestSubF(g_hTest, "Traffic - %u threads per direction, %u sec", cThreads, cSecs);

    /*
     * Create X worker threads which drives in the south/north direction and Y
     * worker threads which drives in the west/east direction.  Let them drive
     * in a loop for 15 seconds with slight delays between some of the runs and
     * then check the numbers.
     */

    /* init */
    RTTHREAD ahThreadsX[8];
    for (unsigned i = 0; i < RT_ELEMENTS(ahThreadsX); i++)
        ahThreadsX[i] = NIL_RTTHREAD;
    AssertRelease(RT_ELEMENTS(ahThreadsX) >= cThreads);

    RTTHREAD ahThreadsY[8];
    for (unsigned i = 0; i < RT_ELEMENTS(ahThreadsY); i++)
        ahThreadsY[i] = NIL_RTTHREAD;
    AssertRelease(RT_ELEMENTS(ahThreadsY) >= cThreads);

    g_cNSCrossings      = 0;
    g_cEWCrossings      = 0;
    g_cSecs             = cSecs;
    g_u64StartMilliTS   = RTTimeMilliTS();

    /* create */
    RTTEST_CHECK_RC_RETV(g_hTest, RTSemXRoadsCreate(&g_hXRoads), VINF_SUCCESS);

    int rc = VINF_SUCCESS;
    for (unsigned i = 0; i < cThreads && RT_SUCCESS(rc); i++)
    {
        rc = RTThreadCreateF(&ahThreadsX[i], tstTrafficNSThread, (void *)(uintptr_t)i, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "NS-%u", i);
        RTTEST_CHECK_RC_OK(g_hTest, rc);
    }

    for (unsigned i = 0; i < cThreads && RT_SUCCESS(rc); i++)
    {
        rc = RTThreadCreateF(&ahThreadsX[i], tstTrafficEWThread, (void *)(uintptr_t)i, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "NS-%u", i);
        RTTEST_CHECK_RC_OK(g_hTest, rc);
    }

    /* wait */
    for (unsigned i = 0; i < RT_ELEMENTS(ahThreadsX); i++)
        if (ahThreadsX[i] != NIL_RTTHREAD)
        {
            int rc2 = RTThreadWaitNoResume(ahThreadsX[i], (60 + cSecs) * 1000, NULL);
            RTTEST_CHECK_RC_OK(g_hTest, rc2);
        }

    for (unsigned i = 0; i < RT_ELEMENTS(ahThreadsY); i++)
        if (ahThreadsY[i] != NIL_RTTHREAD)
        {
            int rc2 = RTThreadWaitNoResume(ahThreadsY[i], (60 + cSecs) * 1000, NULL);
            RTTEST_CHECK_RC_OK(g_hTest, rc2);
        }

    RTTEST_CHECK_MSG_RETV(g_hTest, g_cEWCrossings > 10 && g_cNSCrossings,
                          (g_hTest, "cEWCrossings=%u g_cNSCrossings=%u\n", g_cEWCrossings, g_cNSCrossings));
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cNSCrossings=%u\n", g_cNSCrossings);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "cEWCrossings=%u\n", g_cEWCrossings);
}
/**
 * Starts monitoring a VBoxSVC process.
 *
 * @param   pUserData   The user which chosen VBoxSVC should be watched.
 * @param   hProcess    Handle to the VBoxSVC process.  Consumed.
 * @param   pid         The VBoxSVC PID.
 * @returns Success indicator.
 */
bool VirtualBoxSDS::i_watchIt(VBoxSDSPerUserData *pUserData, HANDLE hProcess, RTPROCESS pid)
{
    RTCritSectEnter(&m_WatcherCritSect);

    /*
     * Find a watcher with capacity left over (we save 8 entries for removals).
     */
    for (uint32_t i = 0; i < m_cWatchers; i++)
    {
        VBoxSDSWatcher *pWatcher = m_papWatchers[i];
        if (   pWatcher->cHandlesEffective < RT_ELEMENTS(pWatcher->aHandles)
            && !pWatcher->fShutdown)
        {
            uint32_t iTodo = pWatcher->cTodos;
            if (iTodo + 8 < RT_ELEMENTS(pWatcher->aTodos))
            {
                pWatcher->aTodos[iTodo].hProcess       = hProcess;
                pWatcher->aTodos[iTodo].Data.pUserData = pUserData;
                pWatcher->aTodos[iTodo].Data.iRevision = ++pUserData->m_iTheChosenOneRevision;
                pWatcher->aTodos[iTodo].Data.pid       = pid;
                pWatcher->cTodos = iTodo + 1;

                pUserData->m_iWatcher = pWatcher->iWatcher;
                pUserData->i_retain();

                BOOL fRc = SetEvent(pWatcher->aHandles[0]);
                AssertLogRelMsg(fRc, ("SetEvent(%p) failed: %u\n", pWatcher->aHandles[0], GetLastError()));
                LogRel(("i_watchIt: Added %p/%p to watcher #%u: %RTbool\n", pUserData, hProcess, pWatcher->iWatcher, fRc));

                i_incrementClientCount();
                RTCritSectLeave(&m_WatcherCritSect);
                RTThreadYield();
                return true;
            }
        }
    }

    /*
     * No watcher with capacity was found, so create a new one with
     * the user/handle prequeued.
     */
    void *pvNew = RTMemRealloc(m_papWatchers, sizeof(m_papWatchers[0]) * (m_cWatchers + 1));
    if (pvNew)
    {
        m_papWatchers = (VBoxSDSWatcher **)pvNew;
        VBoxSDSWatcher *pWatcher = (VBoxSDSWatcher *)RTMemAllocZ(sizeof(*pWatcher));
        if (pWatcher)
        {
            for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aData); i++)
                pWatcher->aData[i].setNull();
            for (uint32_t i = 0; i < RT_ELEMENTS(pWatcher->aTodos); i++)
                pWatcher->aTodos[i].Data.setNull();

            pWatcher->pVBoxSDS          = this;
            pWatcher->iWatcher          = m_cWatchers;
            pWatcher->cRefs             = 2;
            pWatcher->cHandlesEffective = 2;
            pWatcher->cHandles          = 2;
            pWatcher->aHandles[0]       = CreateEventW(NULL, FALSE /*fManualReset*/, FALSE /*fInitialState*/,  NULL);
            if (pWatcher->aHandles[0])
            {
                /* Add incoming VBoxSVC process in slot #1: */
                pWatcher->aHandles[1]        = hProcess;
                pWatcher->aData[1].pid       = pid;
                pWatcher->aData[1].pUserData = pUserData;
                pWatcher->aData[1].iRevision = ++pUserData->m_iTheChosenOneRevision;
                pUserData->i_retain();
                pUserData->m_iWatcher = pWatcher->iWatcher;

                /* Start the thread and we're good. */
                m_papWatchers[m_cWatchers++] = pWatcher;
                int rc = RTThreadCreateF(&pWatcher->hThread, i_watcherThreadProc, pWatcher, 0, RTTHREADTYPE_MAIN_WORKER,
                                         RTTHREADFLAGS_WAITABLE, "watcher%u", pWatcher->iWatcher);
                if (RT_SUCCESS(rc))
                {
                    LogRel(("i_watchIt: Created new watcher #%u for %p/%p\n", m_cWatchers, pUserData, hProcess));

                    i_incrementClientCount();
                    RTCritSectLeave(&m_WatcherCritSect);
                    return true;
                }

                LogRel(("i_watchIt: Error starting watcher thread: %Rrc\n", rc));
                m_papWatchers[--m_cWatchers] = NULL;

                pUserData->m_iWatcher = UINT32_MAX;
                pUserData->i_release();
                CloseHandle(pWatcher->aHandles[0]);
            }
            else
                LogRel(("i_watchIt: CreateEventW failed: %u\n", GetLastError()));
            RTMemFree(pWatcher);
        }
        else
            LogRel(("i_watchIt: failed to allocate watcher structure!\n"));
    }
    else
        LogRel(("i_watchIt: Failed to grow watcher array to %u entries!\n", m_cWatchers + 1));

    RTCritSectLeave(&m_WatcherCritSect);
    CloseHandle(hProcess);
    return false;
}
Esempio n. 9
0
/*static*/
DECLCALLBACK(int) VirtualBox::ClientWatcher::worker(RTTHREAD hThreadSelf, void *pvUser)
{
    LogFlowFuncEnter();
    NOREF(hThreadSelf);

    VirtualBox::ClientWatcher *that = (VirtualBox::ClientWatcher *)pvUser;
    Assert(that);

    typedef std::vector<ComObjPtr<Machine> > MachineVector;
    typedef std::vector<ComObjPtr<SessionMachine> > SessionMachineVector;

    SessionMachineVector machines;
    MachineVector spawnedMachines;

    size_t cnt = 0;
    size_t cntSpawned = 0;

    VirtualBoxBase::initializeComForThread();

#if defined(RT_OS_WINDOWS)

    int vrc;

    /* Initialize all the subworker data. */
    that->maSubworkers[0].hThread = hThreadSelf;
    for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++)
        that->maSubworkers[iSubworker].hThread    = NIL_RTTHREAD;
    for (uint32_t iSubworker = 0; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++)
    {
        that->maSubworkers[iSubworker].pSelf      = that;
        that->maSubworkers[iSubworker].iSubworker = iSubworker;
    }

    do
    {
        /* VirtualBox has been early uninitialized, terminate. */
        AutoCaller autoCaller(that->mVirtualBox);
        if (!autoCaller.isOk())
            break;

        bool fPidRace = false;          /* We poll if the PID of a spawning session hasn't been established yet.  */
        bool fRecentDeath = false;      /* We slowly poll if a session has recently been closed to do reaping. */
        for (;;)
        {
            /* release the caller to let uninit() ever proceed */
            autoCaller.release();

            /* Kick of the waiting. */
            uint32_t const cSubworkers = (that->mcWaitHandles + CW_MAX_HANDLES_PER_THREAD - 1) / CW_MAX_HANDLES_PER_THREAD;
            uint32_t const cMsWait     = fPidRace ? 500 : fRecentDeath ? 5000 : INFINITE;
            LogFlowFunc(("UPDATE: Waiting. %u handles, %u subworkers, %u ms wait\n", that->mcWaitHandles, cSubworkers, cMsWait));

            that->mcMsWait = cMsWait;
            ASMAtomicWriteU32(&that->mcActiveSubworkers, cSubworkers);
            RTThreadUserReset(hThreadSelf);

            for (uint32_t iSubworker = 1; iSubworker < cSubworkers; iSubworker++)
            {
                if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD)
                {
                    vrc = RTThreadUserSignal(that->maSubworkers[iSubworker].hThread);
                    AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserSignal -> %Rrc\n", vrc));
                }
                else
                {
                    vrc = RTThreadCreateF(&that->maSubworkers[iSubworker].hThread,
                                          VirtualBox::ClientWatcher::subworkerThread, &that->maSubworkers[iSubworker],
                                          _128K, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "Watcher%u", iSubworker);
                    AssertLogRelMsgStmt(RT_SUCCESS(vrc), ("%Rrc iSubworker=%u\n", vrc, iSubworker),
                                        that->maSubworkers[iSubworker].hThread = NIL_RTTHREAD);
                }
                if (RT_FAILURE(vrc))
                    that->subworkerWait(&that->maSubworkers[iSubworker], 1);
            }

            /* Wait ourselves. */
            that->subworkerWait(&that->maSubworkers[0], cMsWait);

            /* Make sure all waiters are done waiting. */
            BOOL fRc = SetEvent(that->mUpdateReq);
            Assert(fRc); NOREF(fRc);

            vrc = RTThreadUserWait(hThreadSelf, RT_INDEFINITE_WAIT);
            AssertLogRelMsg(RT_SUCCESS(vrc), ("RTThreadUserWait -> %Rrc\n", vrc));
            Assert(that->mcActiveSubworkers == 0);

            /* Consume pending update request before proceeding with processing the wait results. */
            fRc = ResetEvent(that->mUpdateReq);
            Assert(fRc);

            bool update = ASMAtomicXchgBool(&that->mfUpdateReq, false);
            if (update)
                LogFlowFunc(("UPDATE: Update request pending\n"));
            update |= fPidRace;

            /* Process the wait results. */
            autoCaller.add();
            if (!autoCaller.isOk())
                break;
            fRecentDeath = false;
            for (uint32_t iSubworker = 0; iSubworker < cSubworkers; iSubworker++)
            {
                DWORD dwWait = that->maSubworkers[iSubworker].dwWait;
                LogFlowFunc(("UPDATE: subworker #%u: dwWait=%#x\n", iSubworker, dwWait));
                if (   (dwWait > WAIT_OBJECT_0    && dwWait < WAIT_OBJECT_0    + CW_MAX_HANDLES_PER_THREAD)
                    || (dwWait > WAIT_ABANDONED_0 && dwWait < WAIT_ABANDONED_0 + CW_MAX_HANDLES_PER_THREAD) )
                {
                    uint32_t idxHandle = iSubworker * CW_MAX_HANDLES_PER_THREAD;
                    if (dwWait > WAIT_OBJECT_0    && dwWait < WAIT_OBJECT_0    + CW_MAX_HANDLES_PER_THREAD)
                        idxHandle += dwWait - WAIT_OBJECT_0;
                    else
                        idxHandle += dwWait - WAIT_ABANDONED_0;

                    uint32_t const idxMachine = idxHandle - (iSubworker + 1);
                    if (idxMachine < cnt)
                    {
                        /* Machine mutex is released or abandond due to client process termination. */
                        LogFlowFunc(("UPDATE: Calling i_checkForDeath on idxMachine=%u (idxHandle=%u) dwWait=%#x\n",
                                     idxMachine, idxHandle, dwWait));
                        fRecentDeath |= (machines[idxMachine])->i_checkForDeath();
                    }
                    else if (idxMachine < cnt + cntSpawned)
                    {
                        /* Spawned VM process has terminated normally. */
                        Assert(dwWait < WAIT_ABANDONED_0);
                        LogFlowFunc(("UPDATE: Calling i_checkForSpawnFailure on idxMachine=%u/%u idxHandle=%u dwWait=%#x\n",
                                     idxMachine, idxMachine - cnt, idxHandle, dwWait));
                        fRecentDeath |= (spawnedMachines[idxMachine - cnt])->i_checkForSpawnFailure();
                    }
                    else
                        AssertFailed();
                    update = true;
                }
                else
                    Assert(dwWait == WAIT_OBJECT_0 || dwWait == WAIT_TIMEOUT);
            }

            if (update)
            {
                LogFlowFunc(("UPDATE: Update pending (cnt=%u cntSpawned=%u)...\n", cnt, cntSpawned));

                /* close old process handles */
                that->winResetHandleArray((uint32_t)cntSpawned);

                // get reference to the machines list in VirtualBox
                VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList();

                // lock the machines list for reading
                AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS);

                /* obtain a new set of opened machines */
                cnt = 0;
                machines.clear();
                uint32_t idxHandle = 0;

                for (MachinesOList::iterator it = allMachines.begin();
                     it != allMachines.end();
                     ++it)
                {
                    AssertMsgBreak(idxHandle < CW_MAX_CLIENTS, ("CW_MAX_CLIENTS reached"));

                    ComObjPtr<SessionMachine> sm;
                    if ((*it)->i_isSessionOpenOrClosing(sm))
                    {
                        AutoCaller smCaller(sm);
                        if (smCaller.isOk())
                        {
                            AutoReadLock smLock(sm COMMA_LOCKVAL_SRC_POS);
                            Machine::ClientToken *ct = sm->i_getClientToken();
                            if (ct)
                            {
                                HANDLE ipcSem = ct->getToken();
                                machines.push_back(sm);
                                if (!(idxHandle % CW_MAX_HANDLES_PER_THREAD))
                                    idxHandle++;
                                that->mahWaitHandles[idxHandle++] = ipcSem;
                                ++cnt;
                            }
                        }
                    }
                }

                LogFlowFunc(("UPDATE: direct session count = %d\n", cnt));

                /* obtain a new set of spawned machines */
                fPidRace = false;
                cntSpawned = 0;
                spawnedMachines.clear();

                for (MachinesOList::iterator it = allMachines.begin();
                     it != allMachines.end();
                     ++it)
                {
                    AssertMsgBreak(idxHandle < CW_MAX_CLIENTS, ("CW_MAX_CLIENTS reached"));

                    if ((*it)->i_isSessionSpawning())
                    {
                        ULONG pid;
                        HRESULT hrc = (*it)->COMGETTER(SessionPID)(&pid);
                        if (SUCCEEDED(hrc))
                        {
                            if (pid != NIL_RTPROCESS)
                            {
                                HANDLE hProc = OpenProcess(SYNCHRONIZE, FALSE, pid);
                                AssertMsg(hProc != NULL, ("OpenProcess (pid=%d) failed with %d\n", pid, GetLastError()));
                                if (hProc != NULL)
                                {
                                    spawnedMachines.push_back(*it);
                                    if (!(idxHandle % CW_MAX_HANDLES_PER_THREAD))
                                        idxHandle++;
                                    that->mahWaitHandles[idxHandle++] = hProc;
                                    ++cntSpawned;
                                }
                            }
                            else
                                fPidRace = true;
                        }
                    }
                }

                LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned));

                /* Update mcWaitHandles and make sure there is at least one handle to wait on. */
                that->mcWaitHandles = RT_MAX(idxHandle, 1);

                // machines lock unwinds here
            }
            else
                LogFlowFunc(("UPDATE: No update pending.\n"));

            /* reap child processes */
            that->reapProcesses();

        } /* for ever (well, till autoCaller fails). */

    } while (0);

    /* Terminate subworker threads. */
    ASMAtomicWriteBool(&that->mfTerminate, true);
    for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++)
        if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD)
            RTThreadUserSignal(that->maSubworkers[iSubworker].hThread);
    for (uint32_t iSubworker = 1; iSubworker < RT_ELEMENTS(that->maSubworkers); iSubworker++)
        if (that->maSubworkers[iSubworker].hThread != NIL_RTTHREAD)
        {
            vrc = RTThreadWait(that->maSubworkers[iSubworker].hThread, RT_MS_1MIN, NULL /*prc*/);
            if (RT_SUCCESS(vrc))
                that->maSubworkers[iSubworker].hThread = NIL_RTTHREAD;
            else
                AssertLogRelMsgFailed(("RTThreadWait -> %Rrc\n", vrc));
        }

    /* close old process handles */
    that->winResetHandleArray((uint32_t)cntSpawned);

    /* release sets of machines if any */
    machines.clear();
    spawnedMachines.clear();

    ::CoUninitialize();

#elif defined(RT_OS_OS2)

    /* according to PMREF, 64 is the maximum for the muxwait list */
    SEMRECORD handles[64];

    HMUX muxSem = NULLHANDLE;

    do
    {
        AutoCaller autoCaller(that->mVirtualBox);
        /* VirtualBox has been early uninitialized, terminate */
        if (!autoCaller.isOk())
            break;

        for (;;)
        {
            /* release the caller to let uninit() ever proceed */
            autoCaller.release();

            int vrc = RTSemEventWait(that->mUpdateReq, 500);

            /* Restore the caller before using VirtualBox. If it fails, this
             * means VirtualBox is being uninitialized and we must terminate. */
            autoCaller.add();
            if (!autoCaller.isOk())
                break;

            bool update = false;
            bool updateSpawned = false;

            if (RT_SUCCESS(vrc))
            {
                /* update event is signaled */
                update = true;
                updateSpawned = true;
            }
            else
            {
                AssertMsg(vrc == VERR_TIMEOUT || vrc == VERR_INTERRUPTED,
                          ("RTSemEventWait returned %Rrc\n", vrc));

                /* are there any mutexes? */
                if (cnt > 0)
                {
                    /* figure out what's going on with machines */

                    unsigned long semId = 0;
                    APIRET arc = ::DosWaitMuxWaitSem(muxSem,
                                                     SEM_IMMEDIATE_RETURN, &semId);

                    if (arc == NO_ERROR)
                    {
                        /* machine mutex is normally released */
                        Assert(semId >= 0 && semId < cnt);
                        if (semId >= 0 && semId < cnt)
                        {
#if 0//def DEBUG
                            {
                                AutoReadLock machineLock(machines[semId] COMMA_LOCKVAL_SRC_POS);
                                LogFlowFunc(("released mutex: machine='%ls'\n",
                                             machines[semId]->name().raw()));
                            }
#endif
                            machines[semId]->i_checkForDeath();
                        }
                        update = true;
                    }
                    else if (arc == ERROR_SEM_OWNER_DIED)
                    {
                        /* machine mutex is abandoned due to client process
                         * termination; find which mutex is in the Owner Died
                         * state */
                        for (size_t i = 0; i < cnt; ++i)
                        {
                            PID pid; TID tid;
                            unsigned long reqCnt;
                            arc = DosQueryMutexSem((HMTX)handles[i].hsemCur, &pid, &tid, &reqCnt);
                            if (arc == ERROR_SEM_OWNER_DIED)
                            {
                                /* close the dead mutex as asked by PMREF */
                                ::DosCloseMutexSem((HMTX)handles[i].hsemCur);

                                Assert(i >= 0 && i < cnt);
                                if (i >= 0 && i < cnt)
                                {
#if 0//def DEBUG
                                    {
                                        AutoReadLock machineLock(machines[semId] COMMA_LOCKVAL_SRC_POS);
                                        LogFlowFunc(("mutex owner dead: machine='%ls'\n",
                                                     machines[i]->name().raw()));
                                    }
#endif
                                    machines[i]->i_checkForDeath();
                                }
                            }
                        }
                        update = true;
                    }
                    else
                        AssertMsg(arc == ERROR_INTERRUPT || arc == ERROR_TIMEOUT,
                                  ("DosWaitMuxWaitSem returned %d\n", arc));
                }

                /* are there any spawning sessions? */
                if (cntSpawned > 0)
                {
                    for (size_t i = 0; i < cntSpawned; ++i)
                        updateSpawned |= (spawnedMachines[i])->
                            i_checkForSpawnFailure();
                }
            }

            if (update || updateSpawned)
            {
                // get reference to the machines list in VirtualBox
                VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList();

                // lock the machines list for reading
                AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS);

                if (update)
                {
                    /* close the old muxsem */
                    if (muxSem != NULLHANDLE)
                        ::DosCloseMuxWaitSem(muxSem);

                    /* obtain a new set of opened machines */
                    cnt = 0;
                    machines.clear();

                    for (MachinesOList::iterator it = allMachines.begin();
                         it != allMachines.end(); ++it)
                    {
                        /// @todo handle situations with more than 64 objects
                        AssertMsg(cnt <= 64 /* according to PMREF */,
                                  ("maximum of 64 mutex semaphores reached (%d)",
                                   cnt));

                        ComObjPtr<SessionMachine> sm;
                        if ((*it)->i_isSessionOpenOrClosing(sm))
                        {
                            AutoCaller smCaller(sm);
                            if (smCaller.isOk())
                            {
                                AutoReadLock smLock(sm COMMA_LOCKVAL_SRC_POS);
                                ClientToken *ct = sm->i_getClientToken();
                                if (ct)
                                {
                                    HMTX ipcSem = ct->getToken();
                                    machines.push_back(sm);
                                    handles[cnt].hsemCur = (HSEM)ipcSem;
                                    handles[cnt].ulUser = cnt;
                                    ++cnt;
                                }
                            }
                        }
                    }

                    LogFlowFunc(("UPDATE: direct session count = %d\n", cnt));

                    if (cnt > 0)
                    {
                        /* create a new muxsem */
                        APIRET arc = ::DosCreateMuxWaitSem(NULL, &muxSem, cnt,
                                                           handles,
                                                           DCMW_WAIT_ANY);
                        AssertMsg(arc == NO_ERROR,
                                  ("DosCreateMuxWaitSem returned %d\n", arc));
                        NOREF(arc);
                    }
                }

                if (updateSpawned)
                {
                    /* obtain a new set of spawned machines */
                    spawnedMachines.clear();

                    for (MachinesOList::iterator it = allMachines.begin();
                         it != allMachines.end(); ++it)
                    {
                        if ((*it)->i_isSessionSpawning())
                            spawnedMachines.push_back(*it);
                    }

                    cntSpawned = spawnedMachines.size();
                    LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned));
                }
            }

            /* reap child processes */
            that->reapProcesses();

        } /* for ever (well, till autoCaller fails). */

    } while (0);

    /* close the muxsem */
    if (muxSem != NULLHANDLE)
        ::DosCloseMuxWaitSem(muxSem);

    /* release sets of machines if any */
    machines.clear();
    spawnedMachines.clear();

#elif defined(VBOX_WITH_SYS_V_IPC_SESSION_WATCHER)

    bool update = false;
    bool updateSpawned = false;

    do
    {
        AutoCaller autoCaller(that->mVirtualBox);
        if (!autoCaller.isOk())
            break;

        do
        {
            /* release the caller to let uninit() ever proceed */
            autoCaller.release();

            /* determine wait timeout adaptively: after updating information
             * relevant to the client watcher, check a few times more
             * frequently. This ensures good reaction time when the signalling
             * has to be done a bit before the actual change for technical
             * reasons, and saves CPU cycles when no activities are expected. */
            RTMSINTERVAL cMillies;
            {
                uint8_t uOld, uNew;
                do
                {
                    uOld = ASMAtomicUoReadU8(&that->mUpdateAdaptCtr);
                    uNew = uOld ? uOld - 1 : uOld;
                } while (!ASMAtomicCmpXchgU8(&that->mUpdateAdaptCtr, uNew, uOld));
                Assert(uOld <= RT_ELEMENTS(s_aUpdateTimeoutSteps) - 1);
                cMillies = s_aUpdateTimeoutSteps[uOld];
            }

            int rc = RTSemEventWait(that->mUpdateReq, cMillies);

            /*
             *  Restore the caller before using VirtualBox. If it fails, this
             *  means VirtualBox is being uninitialized and we must terminate.
             */
            autoCaller.add();
            if (!autoCaller.isOk())
                break;

            if (RT_SUCCESS(rc) || update || updateSpawned)
            {
                /* RT_SUCCESS(rc) means an update event is signaled */

                // get reference to the machines list in VirtualBox
                VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList();

                // lock the machines list for reading
                AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS);

                if (RT_SUCCESS(rc) || update)
                {
                    /* obtain a new set of opened machines */
                    machines.clear();

                    for (MachinesOList::iterator it = allMachines.begin();
                         it != allMachines.end();
                         ++it)
                    {
                        ComObjPtr<SessionMachine> sm;
                        if ((*it)->i_isSessionOpenOrClosing(sm))
                            machines.push_back(sm);
                    }

                    cnt = machines.size();
                    LogFlowFunc(("UPDATE: direct session count = %d\n", cnt));
                }

                if (RT_SUCCESS(rc) || updateSpawned)
                {
                    /* obtain a new set of spawned machines */
                    spawnedMachines.clear();

                    for (MachinesOList::iterator it = allMachines.begin();
                         it != allMachines.end();
                         ++it)
                    {
                        if ((*it)->i_isSessionSpawning())
                            spawnedMachines.push_back(*it);
                    }

                    cntSpawned = spawnedMachines.size();
                    LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned));
                }

                // machines lock unwinds here
            }

            update = false;
            for (size_t i = 0; i < cnt; ++i)
                update |= (machines[i])->i_checkForDeath();

            updateSpawned = false;
            for (size_t i = 0; i < cntSpawned; ++i)
                updateSpawned |= (spawnedMachines[i])->i_checkForSpawnFailure();

            /* reap child processes */
            that->reapProcesses();
        }
        while (true);
    }
    while (0);

    /* release sets of machines if any */
    machines.clear();
    spawnedMachines.clear();

#elif defined(VBOX_WITH_GENERIC_SESSION_WATCHER)

    bool updateSpawned = false;

    do
    {
        AutoCaller autoCaller(that->mVirtualBox);
        if (!autoCaller.isOk())
            break;

        do
        {
            /* release the caller to let uninit() ever proceed */
            autoCaller.release();

            /* determine wait timeout adaptively: after updating information
             * relevant to the client watcher, check a few times more
             * frequently. This ensures good reaction time when the signalling
             * has to be done a bit before the actual change for technical
             * reasons, and saves CPU cycles when no activities are expected. */
            RTMSINTERVAL cMillies;
            {
                uint8_t uOld, uNew;
                do
                {
                    uOld = ASMAtomicUoReadU8(&that->mUpdateAdaptCtr);
                    uNew = uOld ? (uint8_t)(uOld - 1) : uOld;
                } while (!ASMAtomicCmpXchgU8(&that->mUpdateAdaptCtr, uNew, uOld));
                Assert(uOld <= RT_ELEMENTS(s_aUpdateTimeoutSteps) - 1);
                cMillies = s_aUpdateTimeoutSteps[uOld];
            }

            int rc = RTSemEventWait(that->mUpdateReq, cMillies);

            /*
             *  Restore the caller before using VirtualBox. If it fails, this
             *  means VirtualBox is being uninitialized and we must terminate.
             */
            autoCaller.add();
            if (!autoCaller.isOk())
                break;

            /** @todo this quite big effort for catching machines in spawning
             * state which can't be caught by the token mechanism (as the token
             * can't be in the other process yet) could be eliminated if the
             * reaping is made smarter, having cross-reference information
             * from the pid to the corresponding machine object. Both cases do
             * more or less the same thing anyway. */
            if (RT_SUCCESS(rc) || updateSpawned)
            {
                /* RT_SUCCESS(rc) means an update event is signaled */

                // get reference to the machines list in VirtualBox
                VirtualBox::MachinesOList &allMachines = that->mVirtualBox->i_getMachinesList();

                // lock the machines list for reading
                AutoReadLock thatLock(allMachines.getLockHandle() COMMA_LOCKVAL_SRC_POS);

                if (RT_SUCCESS(rc) || updateSpawned)
                {
                    /* obtain a new set of spawned machines */
                    spawnedMachines.clear();

                    for (MachinesOList::iterator it = allMachines.begin();
                         it != allMachines.end();
                         ++it)
                    {
                        if ((*it)->i_isSessionSpawning())
                            spawnedMachines.push_back(*it);
                    }

                    cntSpawned = spawnedMachines.size();
                    LogFlowFunc(("UPDATE: spawned session count = %d\n", cntSpawned));
                }

                NOREF(cnt);
                // machines lock unwinds here
            }

            updateSpawned = false;
            for (size_t i = 0; i < cntSpawned; ++i)
                updateSpawned |= (spawnedMachines[i])->i_checkForSpawnFailure();

            /* reap child processes */
            that->reapProcesses();
        }
        while (true);
    }
    while (0);

    /* release sets of machines if any */
    machines.clear();
    spawnedMachines.clear();

#else
# error "Port me!"
#endif

    VirtualBoxBase::uninitializeComForThread();

    LogFlowFuncLeave();
    return 0;
}