Beispiel #1
0
/**
 * Unblocks a thread.
 *
 * This function is paired with rtThreadBlocking.
 *
 * @param   hThread     The current thread.
 * @param   enmCurState The current state, used to check for nested blocking.
 *                      The new state will be running.
 */
RTDECL(void) RTThreadUnblocked(RTTHREAD hThread, RTTHREADSTATE enmCurState)
{
    PRTTHREADINT pThread = hThread;
    if (pThread != NIL_RTTHREAD)
    {
        Assert(pThread == RTThreadSelf());
        ASMAtomicWriteBool(&pThread->fReallySleeping, false);

        RTTHREADSTATE enmActualState = rtThreadGetState(pThread);
        if (enmActualState == enmCurState)
        {
            rtThreadSetState(pThread, RTTHREADSTATE_RUNNING);
            if (   pThread->LockValidator.pRec
                && pThread->LockValidator.enmRecState == enmCurState)
                ASMAtomicWriteNullPtr(&pThread->LockValidator.pRec);
        }
        /* This is a bit ugly... :-/ */
        else if (   (   enmActualState == RTTHREADSTATE_TERMINATED
                     || enmActualState == RTTHREADSTATE_INITIALIZING)
                 && pThread->LockValidator.pRec)
            ASMAtomicWriteNullPtr(&pThread->LockValidator.pRec);
        Assert(   pThread->LockValidator.pRec == NULL
               || RTTHREAD_IS_SLEEPING(enmActualState));
    }
}
Beispiel #2
0
/* static */
int NativeEventQueue::init()
{
    Assert(sMainQueue == NULL);
    Assert(RTThreadIsMain(RTThreadSelf()));

    try
    {
        sMainQueue = new NativeEventQueue();
        AssertPtr(sMainQueue);
#ifdef VBOX_WITH_XPCOM
        /* Check that it actually is the main event queue, i.e. that
           we're called on the right thread. */
        nsCOMPtr<nsIEventQueue> q;
        nsresult rv = NS_GetMainEventQ(getter_AddRefs(q));
        AssertComRCReturn(rv, VERR_INVALID_POINTER);
        Assert(q == sMainQueue->mEventQ);

        /* Check that it's a native queue. */
        PRBool fIsNative = PR_FALSE;
        rv = sMainQueue->mEventQ->IsQueueNative(&fIsNative);
        Assert(NS_SUCCEEDED(rv) && fIsNative);
#endif // VBOX_WITH_XPCOM
    }
    catch (std::bad_alloc &ba)
    {
        NOREF(ba);
        return VERR_NO_MEMORY;
    }

    return VINF_SUCCESS;
}
Beispiel #3
0
/**
 * Get the thread handle of the current thread, automatically adopting alien
 * threads.
 *
 * @returns Thread handle.
 */
RTDECL(RTTHREAD) RTThreadSelfAutoAdopt(void)
{
    RTTHREAD hSelf = RTThreadSelf();
    if (RT_UNLIKELY(hSelf == NIL_RTTHREAD))
        RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
    return hSelf;
}
/**
 * Called by the PDM thread in response to a wakeup call with
 * suspending as the new state.
 *
 * The thread will block in side this call until the state is changed in
 * response to a VM state change or to the device/driver/whatever calling the
 * PDMR3ThreadResume API.
 *
 * @returns VBox status code.
 *          On failure, terminate the thread.
 * @param   pThread     The PDM thread.
 */
VMMR3DECL(int) PDMR3ThreadIAmSuspending(PPDMTHREAD pThread)
{
    /*
     * Assert sanity.
     */
    AssertPtr(pThread);
    AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
    Assert(pThread->Thread == RTThreadSelf() || pThread->enmState == PDMTHREADSTATE_INITIALIZING);
    PDMTHREADSTATE enmState = pThread->enmState;
    Assert(     enmState == PDMTHREADSTATE_SUSPENDING
           ||   enmState == PDMTHREADSTATE_INITIALIZING);

    /*
     * Update the state, notify the control thread (the API caller) and go to sleep.
     */
    int rc = VERR_WRONG_ORDER;
    if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_SUSPENDED, enmState))
    {
        rc = RTThreadUserSignal(pThread->Thread);
        if (RT_SUCCESS(rc))
        {
            rc = RTSemEventMultiWait(pThread->Internal.s.BlockEvent, RT_INDEFINITE_WAIT);
            if (    RT_SUCCESS(rc)
                &&  pThread->enmState != PDMTHREADSTATE_SUSPENDED)
                return rc;

            if (RT_SUCCESS(rc))
                rc = VERR_PDM_THREAD_IPE_2;
        }
    }

    AssertMsgFailed(("rc=%d enmState=%d\n", rc, pThread->enmState));
    pdmR3ThreadBailMeOut(pThread);
    return rc;
}
Beispiel #5
0
/**
 * Wrapper that selects rtStrConvertCached or rtStrConvertUncached.
 *
 * @returns IPRT status code.
 *
 * @param   pszInput        Pointer to intput string.
 * @param   cchInput        Size (in bytes) of input string. Excludes any
 *                          terminators.
 * @param   pszInputCS      Codeset of the input string.
 * @param   ppszOutput      Pointer to pointer to output buffer if cbOutput > 0.
 *                          If cbOutput is 0 this is where the pointer to the
 *                          allocated buffer is stored.
 * @param   cbOutput        Size of the passed in buffer.
 * @param   pszOutputCS     Codeset of the input string.
 * @param   cFactor         Input vs. output size factor.
 * @param   enmCacheIdx     The iconv cache index.
 */
DECLINLINE(int) rtStrConvertWrapper(const char *pchInput, size_t cchInput, const char *pszInputCS,
                                    char **ppszOutput, size_t cbOutput, const char *pszOutputCS,
                                    unsigned cFactor, RTSTRICONV enmCacheIdx)
{
#ifdef RT_WITH_ICONV_CACHE
    RTTHREAD hSelf = RTThreadSelf();
    if (hSelf != NIL_RTTHREAD)
    {
        PRTTHREADINT pThread = rtThreadGet(hSelf);
        if (pThread)
        {
            if ((pThread->fIntFlags & (RTTHREADINT_FLAGS_ALIEN | RTTHREADINT_FLAGS_MAIN)) != RTTHREADINT_FLAGS_ALIEN)
            {
                int rc = rtstrConvertCached(pchInput, cchInput, pszInputCS,
                                            (void **)ppszOutput, cbOutput, pszOutputCS,
                                            cFactor, (iconv_t *)&pThread->ahIconvs[enmCacheIdx]);
                rtThreadRelease(pThread);
                return rc;
            }
            rtThreadRelease(pThread);
        }
    }
#endif
    return rtStrConvertUncached(pchInput, cchInput, pszInputCS,
                                (void **)ppszOutput, cbOutput, pszOutputCS,
                                cFactor);
}
Beispiel #6
0
RTDECL(bool) RTThreadIsSelfKnown(void)
{
    if (g_frtThreadInitialized)
    {
        RTTHREAD hSelf = RTThreadSelf();
        if (hSelf != NIL_RTTHREAD)
            return true;
    }
    return false;
}
Beispiel #7
0
RTDECL(int) RTSemRWReleaseRead(RTSEMRW hRWSem)
{
    /*
     * Validate input.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
                    ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
                    VERR_INVALID_HANDLE);

    /*
     * Check if it's the writer.
     */
    pthread_t Self = pthread_self();
    pthread_t Writer;
    ATOMIC_GET_PTHREAD_T(&pThis->Writer, &Writer);
    if (Writer == Self)
    {
        AssertMsgReturn(pThis->cWriterReads > 0, ("pThis=%p\n", pThis), VERR_NOT_OWNER);
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclUnwindMixed(&pThis->ValidatorWrite, &pThis->ValidatorRead.Core);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        pThis->cWriterReads--;
        return VINF_SUCCESS;
    }

    /*
     * Try unlock it.
     */
#ifdef RTSEMRW_STRICT
    int rc9 = RTLockValidatorRecSharedCheckAndRelease(&pThis->ValidatorRead, RTThreadSelf());
    if (RT_FAILURE(rc9))
        return rc9;
#endif
#ifdef RT_OS_LINUX /* glibc (at least 2.8) may screw up when unlocking a lock we don't own. */
    if (ASMAtomicReadU32(&pThis->cReaders) == 0)
    {
        AssertMsgFailed(("Not owner of %p\n", pThis));
        return VERR_NOT_OWNER;
    }
#endif
    ASMAtomicDecU32(&pThis->cReaders);
    int rc = pthread_rwlock_unlock(&pThis->RWLock);
    if (rc)
    {
        ASMAtomicIncU32(&pThis->cReaders);
        AssertMsgFailed(("Failed read unlock read-write sem %p, rc=%d.\n", hRWSem, rc));
        return RTErrConvertFromErrno(rc);
    }
    return VINF_SUCCESS;
}
/**
 * Suspends the thread.
 *
 * This can be called at the power off / suspend notifications to suspend the
 * PDM thread a bit early. The thread will be automatically suspend upon
 * completion of the device/driver notification cycle.
 *
 * The caller is responsible for serializing the control operations on the
 * thread. That basically means, always do these calls from the EMT.
 *
 * @returns VBox status code.
 * @param   pThread     The PDM thread.
 */
VMMR3DECL(int) PDMR3ThreadSuspend(PPDMTHREAD pThread)
{
    /*
     * Assert sanity.
     */
    AssertPtrReturn(pThread, VERR_INVALID_POINTER);
    AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
    Assert(pThread->Thread != RTThreadSelf());

    /*
     * This is a noop if the thread is already suspended.
     */
    if (pThread->enmState == PDMTHREADSTATE_SUSPENDED)
        return VINF_SUCCESS;

    /*
     * Change the state to resuming and kick the thread.
     */
    int rc = RTSemEventMultiReset(pThread->Internal.s.BlockEvent);
    if (RT_SUCCESS(rc))
    {
        rc = RTThreadUserReset(pThread->Thread);
        if (RT_SUCCESS(rc))
        {
            rc = VERR_WRONG_ORDER;
            if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_SUSPENDING, PDMTHREADSTATE_RUNNING))
            {
                rc = pdmR3ThreadWakeUp(pThread);
                if (RT_SUCCESS(rc))
                {
                    /*
                     * Wait for the thread to reach the suspended state.
                     */
                    if (pThread->enmState != PDMTHREADSTATE_SUSPENDED)
                        rc = RTThreadUserWait(pThread->Thread, 60*1000);
                    if (    RT_SUCCESS(rc)
                        &&  pThread->enmState != PDMTHREADSTATE_SUSPENDED)
                        rc = VERR_PDM_THREAD_IPE_2;
                    if (RT_SUCCESS(rc))
                        return rc;
                }
            }
        }
    }

    /*
     * Something failed, initialize termination.
     */
    AssertMsgFailed(("PDMR3ThreadSuspend -> rc=%Rrc enmState=%d suspending '%s'\n",
                     rc, pThread->enmState, RTThreadGetName(pThread->Thread)));
    pdmR3ThreadBailOut(pThread);
    return rc;
}
/**
 * Implements the indefinite wait.
 *
 * @returns See RTSemEventMultiWaitEx.
 * @param   pThis               The semaphore.
 * @param   fFlags              See RTSemEventMultiWaitEx.
 * @param   pSrcPos             The source position, can be NULL.
 */
static int rtSemEventMultiPosixWaitIndefinite(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, PCRTLOCKVALSRCPOS pSrcPos)
{
    /* take mutex */
    int rc = pthread_mutex_lock(&pThis->Mutex);
    AssertMsgReturn(!rc, ("Failed to lock event multi sem %p, rc=%d.\n", pThis, rc), RTErrConvertFromErrno(rc));
    ASMAtomicIncU32(&pThis->cWaiters);

    for (;;)
    {
        /* check state. */
        uint32_t const u32State = pThis->u32State;
        if (u32State != EVENTMULTI_STATE_NOT_SIGNALED)
        {
            ASMAtomicDecU32(&pThis->cWaiters);
            rc = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc));
            return u32State == EVENTMULTI_STATE_SIGNALED
                   ? VINF_SUCCESS
                   : VERR_SEM_DESTROYED;
        }

        /* wait */
#ifdef RTSEMEVENTMULTI_STRICT
        RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
        if (pThis->fEverHadSignallers)
        {
            rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                    RT_INDEFINITE_WAIT, RTTHREADSTATE_EVENT_MULTI, true);
            if (RT_FAILURE(rc))
            {
                ASMAtomicDecU32(&pThis->cWaiters);
                pthread_mutex_unlock(&pThis->Mutex);
                return rc;
            }
        }
#else
        RTTHREAD hThreadSelf = RTThreadSelf();
#endif
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true);
        /** @todo interruptible wait is not implementable... */ NOREF(fFlags);
        rc = pthread_cond_wait(&pThis->Cond, &pThis->Mutex);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI);
        if (RT_UNLIKELY(rc))
        {
            AssertMsgFailed(("Failed to wait on event multi sem %p, rc=%d.\n", pThis, rc));
            ASMAtomicDecU32(&pThis->cWaiters);
            int rc2 = pthread_mutex_unlock(&pThis->Mutex);
            AssertMsg(!rc2, ("Failed to unlock event multi sem %p, rc=%d.\n", pThis, rc2));
            NOREF(rc2);
            return RTErrConvertFromErrno(rc);
        }
    }
}
Beispiel #10
0
/**
 * Change the thread state to blocking.
 *
 * @param   hThread         The current thread.
 * @param   enmState        The sleep state.
 * @param   fReallySleeping Really going to sleep now.
 */
RTDECL(void) RTThreadBlocking(RTTHREAD hThread, RTTHREADSTATE enmState, bool fReallySleeping)
{
    Assert(RTTHREAD_IS_SLEEPING(enmState));
    PRTTHREADINT pThread = hThread;
    if (pThread != NIL_RTTHREAD)
    {
        Assert(pThread == RTThreadSelf());
        if (rtThreadGetState(pThread) == RTTHREADSTATE_RUNNING)
            rtThreadSetState(pThread, enmState);
        ASMAtomicWriteBool(&pThread->fReallySleeping, fReallySleeping);
    }
}
Beispiel #11
0
RTDECL(int)   RTSemEventWaitNoResume(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies)
{
    PCRTLOCKVALSRCPOS pSrcPos = NULL;

    /*
     * Validate input.
     */
    struct RTSEMEVENTINTERNAL *pThis = hEventSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Wait for condition.
     */
#ifdef RTSEMEVENT_STRICT
    RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
                         ? RTThreadSelfAutoAdopt()
                         : RTThreadSelf();
    if (pThis->fEverHadSignallers)
    {
        DWORD rc = WaitForSingleObjectEx(pThis->hev,
                                         0 /*Timeout*/,
                                         TRUE /*fAlertable*/);
        if (rc != WAIT_TIMEOUT || cMillies == 0)
            return rtSemEventWaitHandleStatus(pThis, rc);
        int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
                                                        cMillies, RTTHREADSTATE_EVENT, true);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif
    RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
    DWORD rc = WaitForSingleObjectEx(pThis->hev,
                                     cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies,
                                     TRUE /*fAlertable*/);
    RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
    return rtSemEventWaitHandleStatus(pThis, rc);
}
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
# ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
# elif defined(IN_RING3)
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif

        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    }
    /* won't get here */
}
Beispiel #13
0
RTR3DECL(int) RTTlsSet(RTTLS iTls, void *pvValue)
{
    if (RT_UNLIKELY(    iTls < 0
                    ||  iTls >= RTTHREAD_TLS_ENTRIES
                    ||  !ASMBitTest(&g_au32AllocatedBitmap[0], iTls)))
        return VERR_INVALID_PARAMETER;

    PRTTHREADINT pThread = rtThreadGet(RTThreadSelf());
    AssertReturn(pThread, VERR_NOT_SUPPORTED);
    pThread->apvTlsEntries[iTls] = pvValue;
    rtThreadRelease(pThread);
    return VINF_SUCCESS;
}
Beispiel #14
0
/**
 * Adopts a non-IPRT thread.
 *
 * @returns IPRT status code.
 * @param   enmType         The thread type.
 * @param   fFlags          The thread flags. RTTHREADFLAGS_WAITABLE is not currently allowed.
 * @param   pszName         The thread name. Optional.
 * @param   pThread         Where to store the thread handle. Optional.
 */
RTDECL(int) RTThreadAdopt(RTTHREADTYPE enmType, unsigned fFlags, const char *pszName, PRTTHREAD pThread)
{
    int      rc;
    RTTHREAD Thread;

    AssertReturn(!(fFlags & RTTHREADFLAGS_WAITABLE), VERR_INVALID_PARAMETER);
    AssertReturn(!pszName || VALID_PTR(pszName), VERR_INVALID_POINTER);
    AssertReturn(!pThread || VALID_PTR(pThread), VERR_INVALID_POINTER);

    rc = VINF_SUCCESS;
    Thread = RTThreadSelf();
    if (Thread == NIL_RTTHREAD)
    {
        /* generate a name if none was given. */
        char szName[RTTHREAD_NAME_LEN];
        if (!pszName || !*pszName)
        {
            static uint32_t s_i32AlienId = 0;
            uint32_t i32Id = ASMAtomicIncU32(&s_i32AlienId);
            RTStrPrintf(szName, sizeof(szName), "ALIEN-%RX32", i32Id);
            pszName = szName;
        }

        /* try adopt it */
        rc = rtThreadAdopt(enmType, fFlags, 0, pszName);
        Thread = RTThreadSelf();
        Log(("RTThreadAdopt: %RTthrd %RTnthrd '%s' enmType=%d fFlags=%#x rc=%Rrc\n",
             Thread, RTThreadNativeSelf(), pszName, enmType, fFlags, rc));
    }
    else
        Log(("RTThreadAdopt: %RTthrd %RTnthrd '%s' enmType=%d fFlags=%#x - already adopted!\n",
             Thread, RTThreadNativeSelf(), pszName, enmType, fFlags));

    if (pThread)
        *pThread = Thread;
    return rc;
}
Beispiel #15
0
/**
 * Gets the name of the current thread thread.
 *
 * @returns Pointer to readonly name string.
 * @returns NULL on failure.
 */
RTDECL(const char *) RTThreadSelfName(void)
{
    RTTHREAD Thread = RTThreadSelf();
    if (Thread != NIL_RTTHREAD)
    {
        PRTTHREADINT pThread = rtThreadGet(Thread);
        if (pThread)
        {
            const char *szName = pThread->szName;
            rtThreadRelease(pThread);
            return szName;
        }
    }
    return NULL;
}
/**
 * Called by the PDM thread instead of RTThreadSleep.
 *
 * The difference is that the sleep will be interrupted on state change. The
 * thread must be in the running state, otherwise it will return immediately.
 *
 * @returns VBox status code.
 * @retval  VINF_SUCCESS on success or state change.
 * @retval  VERR_INTERRUPTED on signal or APC.
 *
 * @param   pThread     The PDM thread.
 * @param   cMillies    The number of milliseconds to sleep.
 */
VMMR3DECL(int) PDMR3ThreadSleep(PPDMTHREAD pThread, RTMSINTERVAL cMillies)
{
    /*
     * Assert sanity.
     */
    AssertReturn(pThread->enmState > PDMTHREADSTATE_INVALID && pThread->enmState < PDMTHREADSTATE_TERMINATED, VERR_PDM_THREAD_IPE_2);
    AssertReturn(pThread->Thread == RTThreadSelf(), VERR_PDM_THREAD_INVALID_CALLER);

    /*
     * Reset the event semaphore, check the state and sleep.
     */
    RTSemEventMultiReset(pThread->Internal.s.SleepEvent);
    if (pThread->enmState != PDMTHREADSTATE_RUNNING)
        return VINF_SUCCESS;
    return RTSemEventMultiWaitNoResume(pThread->Internal.s.SleepEvent, cMillies);
}
Beispiel #17
0
RTDECL(int) RTThreadPoke(RTTHREAD hThread)
{
    AssertReturn(hThread != RTThreadSelf(), VERR_INVALID_PARAMETER);
    PRTTHREADINT pThread = rtThreadGet(hThread);
    AssertReturn(pThread, VERR_INVALID_HANDLE);

    int rc;
    if (g_iSigPokeThread != -1)
    {
        rc = pthread_kill((pthread_t)(uintptr_t)pThread->Core.Key, g_iSigPokeThread);
        rc = RTErrConvertFromErrno(rc);
    }
    else
        rc = VERR_NOT_SUPPORTED;

    rtThreadRelease(pThread);
    return rc;
}
Beispiel #18
0
RTDECL(bool) RTThreadIsSelfAlive(void)
{
    if (g_frtThreadInitialized)
    {
        RTTHREAD hSelf = RTThreadSelf();
        if (hSelf != NIL_RTTHREAD)
        {
            /*
             * Inspect the thread state.  ASSUMES thread state order.
             */
            RTTHREADSTATE enmState = rtThreadGetState(hSelf);
            if (   enmState >= RTTHREADSTATE_RUNNING
                && enmState <= RTTHREADSTATE_END)
                return true;
        }
    }
    return false;
}
/**
 * Resumes the thread.
 *
 * This can be called the power on / resume notifications to resume the
 * PDM thread a bit early. The thread will be automatically resumed upon
 * return from these two notification callbacks (devices/drivers).
 *
 * The caller is responsible for serializing the control operations on the
 * thread. That basically means, always do these calls from the EMT.
 *
 * @returns VBox status code.
 * @param   pThread     The PDM thread.
 */
VMMR3DECL(int) PDMR3ThreadResume(PPDMTHREAD pThread)
{
    /*
     * Assert sanity.
     */
    AssertPtrReturn(pThread, VERR_INVALID_POINTER);
    AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
    Assert(pThread->Thread != RTThreadSelf());

    /*
     * Change the state to resuming and kick the thread.
     */
    int rc = RTThreadUserReset(pThread->Thread);
    if (RT_SUCCESS(rc))
    {
        rc = VERR_WRONG_ORDER;
        if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_RESUMING, PDMTHREADSTATE_SUSPENDED))
        {
            rc = RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
            if (RT_SUCCESS(rc))
            {
                /*
                 * Wait for the thread to reach the running state.
                 */
                rc = RTThreadUserWait(pThread->Thread, 60*1000);
                if (    RT_SUCCESS(rc)
                    &&  pThread->enmState != PDMTHREADSTATE_RUNNING)
                    rc = VERR_PDM_THREAD_IPE_2;
                if (RT_SUCCESS(rc))
                    return rc;
            }
        }
    }

    /*
     * Something failed, initialize termination.
     */
    AssertMsgFailed(("PDMR3ThreadResume -> rc=%Rrc enmState=%d\n", rc, pThread->enmState));
    pdmR3ThreadBailOut(pThread);
    return rc;
}
Beispiel #20
0
/**
 * Grab the pointer to this thread's timeouts from TLS.
 */
struct sys_timeouts *sys_arch_timeouts(void)
{
    unsigned i;
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_DECL_PROTECT(old_level);
#endif
    RTTHREAD myself;
    struct sys_timeouts *to = NULL;

    myself = RTThreadSelf();
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_PROTECT(old_level);
#else
    RTSemEventWait(g_ThreadSem, RT_INDEFINITE_WAIT);
#endif
    for (i = 0; i < g_cThreads; i++)
    {
        if (g_aTLS[i].tid == myself)
        {
            to = &g_aTLS[i].timeouts;
            break;
        }
    }
    /* Auto-adopt new threads which use lwIP as they pop up. */
    if (!to)
    {
        unsigned id;
        id = g_cThreads;
        g_cThreads++;
        Assert(g_cThreads <= THREADS_MAX);
        g_aTLS[id].tid = myself;
        to = &g_aTLS[id].timeouts;
    }
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_UNPROTECT(old_level);
#else
    RTSemEventSignal(g_ThreadSem);
#endif
    return to;
}
Beispiel #21
0
/* static */
int EventQueue::init()
{
    Assert(sMainQueue == NULL);
    Assert(RTThreadIsMain(RTThreadSelf()));
    sMainQueue = new EventQueue();

#ifdef VBOX_WITH_XPCOM
    /* Check that it actually is the main event queue, i.e. that
       we're called on the right thread. */
    nsCOMPtr<nsIEventQueue> q;
    nsresult rv = NS_GetMainEventQ(getter_AddRefs(q));
    Assert(NS_SUCCEEDED(rv));
    Assert(q == sMainQueue->mEventQ);

    /* Check that it's a native queue. */
    PRBool fIsNative = PR_FALSE;
    rv = sMainQueue->mEventQ->IsQueueNative(&fIsNative);
    Assert(NS_SUCCEEDED(rv) && fIsNative);
#endif // VBOX_WITH_XPCOM

    return VINF_SUCCESS;
}
/**
 * Called by the PDM thread in response to a resuming state.
 *
 * The purpose of this API is to tell the PDMR3ThreadResume caller that
 * the PDM thread has successfully resumed. It will also do the
 * state transition from the resuming to the running state.
 *
 * @returns VBox status code.
 *          On failure, terminate the thread.
 * @param   pThread     The PDM thread.
 */
VMMR3DECL(int) PDMR3ThreadIAmRunning(PPDMTHREAD pThread)
{
    /*
     * Assert sanity.
     */
    Assert(pThread->enmState == PDMTHREADSTATE_RESUMING);
    Assert(pThread->Thread == RTThreadSelf());

    /*
     * Update the state and tell the control thread (the guy calling the resume API).
     */
    int rc = VERR_WRONG_ORDER;
    if (pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_RUNNING, PDMTHREADSTATE_RESUMING))
    {
        rc = RTThreadUserSignal(pThread->Thread);
        if (RT_SUCCESS(rc))
            return rc;
    }

    AssertMsgFailed(("rc=%d enmState=%d\n", rc, pThread->enmState));
    pdmR3ThreadBailMeOut(pThread);
    return rc;
}
Beispiel #23
0
DECLCALLBACK(int) VBoxClipboardWorker(void *pInstance, bool volatile *pfShutdown)
{
    AssertPtr(pInstance);
    LogFlowFunc(("pInstance=%p\n", pInstance));

    /*
     * Tell the control thread that it can continue
     * spawning services.
     */
    RTThreadUserSignal(RTThreadSelf());

    PVBOXCLIPBOARDCONTEXT pCtx = (PVBOXCLIPBOARDCONTEXT)pInstance;
    AssertPtr(pCtx);

    int rc;

    /* The thread waits for incoming messages from the host. */
    for (;;)
    {
        uint32_t u32Msg;
        uint32_t u32Formats;
        rc = VbglR3ClipboardGetHostMsg(pCtx->u32ClientID, &u32Msg, &u32Formats);
        if (RT_FAILURE(rc))
        {
            if (rc == VERR_INTERRUPTED)
                break;

            LogFlowFunc(("Error getting host message, rc=%Rrc\n", rc));

            if (*pfShutdown)
                break;

            /* Wait a bit before retrying. */
            RTThreadSleep(1000);
            continue;
        }
        else
        {
            LogFlowFunc(("u32Msg=%RU32, u32Formats=0x%x\n", u32Msg, u32Formats));
            switch (u32Msg)
            {
                /** @todo r=andy: Use a \#define for WM_USER (+1). */
                case VBOX_SHARED_CLIPBOARD_HOST_MSG_FORMATS:
                {
                    /* The host has announced available clipboard formats.
                     * Forward the information to the window, so it can later
                     * respond to WM_RENDERFORMAT message. */
                    ::PostMessage(pCtx->hwnd, WM_USER, 0, u32Formats);
                } break;

                case VBOX_SHARED_CLIPBOARD_HOST_MSG_READ_DATA:
                {
                    /* The host needs data in the specified format. */
                    ::PostMessage(pCtx->hwnd, WM_USER + 1, 0, u32Formats);
                } break;

                case VBOX_SHARED_CLIPBOARD_HOST_MSG_QUIT:
                {
                    /* The host is terminating. */
                    LogRel(("Clipboard: Terminating ...\n"));
                    ASMAtomicXchgBool(pfShutdown, true);
                } break;

                default:
                {
                    LogFlowFunc(("Unsupported message from host, message=%RU32\n", u32Msg));

                    /* Wait a bit before retrying. */
                    RTThreadSleep(1000);
                } break;
            }
        }

        if (*pfShutdown)
            break;
    }

    LogFlowFuncLeaveRC(rc);
    return rc;
}
/**
 * Internal worker for RTSemMutexRequestNoResume and it's debug companion.
 *
 * @returns Same as RTSEmMutexRequestNoResume
 * @param   hMutexSem            The mutex handle.
 * @param   cMillies            The number of milliseconds to wait.
 * @param   pSrcPos             The source position of the caller.
 */
DECL_FORCE_INLINE(int) rtSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate.
     */
    RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check for recursive entry.
     */
    RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
    RTNATIVETHREAD hNativeOwner;
    ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner);
    if (hNativeOwner == hNativeSelf)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cRecursions);
        return VINF_SUCCESS;
    }

    /*
     * Lock mutex semaphore.
     */
    RTTHREAD        hThreadSelf = NIL_RTTHREAD;
    if (cMillies > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_MUTEX, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
    }
    DWORD rc = WaitForSingleObjectEx(pThis->hMtx,
                                     cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies,
                                     TRUE /*fAlertable*/);
    RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
    switch (rc)
    {
        case WAIT_OBJECT_0:
#ifdef RTSEMMUTEX_STRICT
            RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
            ASMAtomicWriteHandle(&pThis->hNativeOwner, hNativeSelf);
            ASMAtomicWriteU32(&pThis->cRecursions, 1);
            return VINF_SUCCESS;

        case WAIT_TIMEOUT:          return VERR_TIMEOUT;
        case WAIT_IO_COMPLETION:    return VERR_INTERRUPTED;
        case WAIT_ABANDONED:        return VERR_SEM_OWNER_DIED;
        default:
            AssertMsgFailed(("%u\n",  rc));
        case WAIT_FAILED:
        {
            int rc2 = RTErrConvertFromWin32(GetLastError());
            AssertMsgFailed(("Wait on hMutexSem %p failed, rc=%d lasterr=%d\n", hMutexSem, rc, GetLastError()));
            if (rc2 != VINF_SUCCESS)
                return rc2;

            AssertMsgFailed(("WaitForSingleObject(event) -> rc=%d while converted lasterr=%d\n", rc, rc2));
            return VERR_INTERNAL_ERROR;
        }
    }
}
Beispiel #25
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
            &&  pThis->cNestings > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNestings);
        return VINF_SUCCESS;
    }

#ifdef RTSEMMUTEX_STRICT
    RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
    if (cMillies)
    {
        int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies);
        if (RT_FAILURE(rc9))
            return rc9;
    }
#else
    RTTHREAD hThreadSelf = RTThreadSelf();
#endif

    /*
     * Convert timeout value.
     */
    struct timespec ts;
    struct timespec *pTimeout = NULL;
    uint64_t u64End = 0; /* shut up gcc */
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        ts.tv_sec  = cMillies / 1000;
        ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000);
        u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000);
        pTimeout = &ts;
    }

    /*
     * Lock the mutex.
     * Optimize for the uncontended case (makes 1-2 ns difference).
     */
    if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)))
    {
        for (;;)
        {
            int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2);

            /*
             * Was the lock released in the meantime? This is unlikely (but possible)
             */
            if (RT_UNLIKELY(iOld == 0))
                break;

            /*
             * Go to sleep.
             */
            if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec ))
            {
#ifdef RTSEMMUTEX_STRICT
                int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                          cMillies, RTTHREADSTATE_MUTEX, true);
                if (RT_FAILURE(rc9))
                    return rc9;
#else
                RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
            }

            long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0);

            RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
            if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC))
                return VERR_SEM_DESTROYED;

            /*
             * Act on the wakup code.
             */
            if (rc == -ETIMEDOUT)
            {
                Assert(pTimeout);
                return VERR_TIMEOUT;
            }
            if (rc == 0)
                /* we'll leave the loop now unless another thread is faster */;
            else if (rc == -EWOULDBLOCK)
                /* retry with new value. */;
            else if (rc == -EINTR)
            {
                if (!fAutoResume)
                    return VERR_INTERRUPTED;
            }
            else
            {
                /* this shouldn't happen! */
                AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno));
                return RTErrConvertFromErrno(rc);
            }

            /* adjust the relative timeout */
            if (pTimeout)
            {
                int64_t i64Diff = u64End - RTTimeSystemNanoTS();
                if (i64Diff < 1000)
                {
                    rc = VERR_TIMEOUT;
                    break;
                }
                ts.tv_sec  = (uint64_t)i64Diff / UINT32_C(1000000000);
                ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000);
            }
        }

        /*
         * When leaving this loop, iState is set to 2. This means that we gained the
         * lock and there are _possibly_ some waiters. We don't know exactly as another
         * thread might entered this loop at nearly the same time. Therefore we will
         * call futex_wakeup once too often (if _no_ other thread entered this loop).
         * The key problem is the simple futex_wait test for x != y (iState != 2) in
         * our case).
         */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNestings, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
/** @copydoc VBOXSERVICE::pfnWorker */
DECLCALLBACK(int) VBoxServiceVMStatsWorker(bool volatile *pfShutdown)
{
    int rc = VINF_SUCCESS;

    /* Start monitoring of the stat event change event. */
    rc = VbglR3CtlFilterMask(VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST, 0);
    if (RT_FAILURE(rc))
    {
        VBoxServiceVerbose(3, "VBoxServiceVMStatsWorker: VbglR3CtlFilterMask failed with %d\n", rc);
        return rc;
    }

    /*
     * Tell the control thread that it can continue
     * spawning services.
     */
    RTThreadUserSignal(RTThreadSelf());

    /*
     * Now enter the loop retrieving runtime data continuously.
     */
    for (;;)
    {
        uint32_t fEvents = 0;
        RTMSINTERVAL cWaitMillies;

        /* Check if an update interval change is pending. */
        rc = VbglR3WaitEvent(VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST, 0 /* no wait */, &fEvents);
        if (    RT_SUCCESS(rc)
            &&  (fEvents & VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST))
        {
            VbglR3StatQueryInterval(&gCtx.cMsStatInterval);
        }

        if (gCtx.cMsStatInterval)
        {
            VBoxServiceVMStatsReport();
            cWaitMillies = gCtx.cMsStatInterval;
        }
        else
            cWaitMillies = 3000;

        /*
         * Block for a while.
         *
         * The event semaphore takes care of ignoring interruptions and it
         * allows us to implement service wakeup later.
         */
        if (*pfShutdown)
            break;
        int rc2 = RTSemEventMultiWait(g_VMStatEvent, cWaitMillies);
        if (*pfShutdown)
            break;
        if (rc2 != VERR_TIMEOUT && RT_FAILURE(rc2))
        {
            VBoxServiceError("VBoxServiceVMStatsWorker: RTSemEventMultiWait failed; rc2=%Rrc\n", rc2);
            rc = rc2;
            break;
        }
    }

    /* Cancel monitoring of the stat event change event. */
    rc = VbglR3CtlFilterMask(0, VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST);
    if (RT_FAILURE(rc))
        VBoxServiceVerbose(3, "VBoxServiceVMStatsWorker: VbglR3CtlFilterMask failed with %d\n", rc);

    RTSemEventMultiDestroy(g_VMStatEvent);
    g_VMStatEvent = NIL_RTSEMEVENTMULTI;

    VBoxServiceVerbose(3, "VBoxStatsThread: finished statistics change request thread\n");
    return 0;
}
Beispiel #27
0
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMRWINTERNAL *pThis = hRWSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC,
                    ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic),
                    VERR_INVALID_HANDLE);

    /*
     * Recursion?
     */
    pthread_t Self = pthread_self();
    pthread_t Writer;
    ATOMIC_GET_PTHREAD_T(&pThis->Writer, &Writer);
    if (Writer == Self)
    {
#ifdef RTSEMRW_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        Assert(pThis->cWrites < INT32_MAX);
        pThis->cWrites++;
        return VINF_SUCCESS;
    }

    /*
     * Try lock it.
     */
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies)
    {
#ifdef RTSEMRW_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_RW_WRITE, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, true);
#endif
    }

    if (cMillies == RT_INDEFINITE_WAIT)
    {
        /* take rwlock */
        int rc = pthread_rwlock_wrlock(&pThis->RWLock);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (rc)
        {
            AssertMsgFailed(("Failed write lock read-write sem %p, rc=%d.\n", hRWSem, rc));
            return RTErrConvertFromErrno(rc);
        }
    }
    else
    {
#ifdef RT_OS_DARWIN
        AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API."));
        return VERR_NOT_IMPLEMENTED;
#else /* !RT_OS_DARWIN */
        /*
         * Get current time and calc end of wait time.
         */
        struct timespec     ts = {0,0};
        clock_gettime(CLOCK_REALTIME, &ts);
        if (cMillies != 0)
        {
            ts.tv_nsec += (cMillies % 1000) * 1000000;
            ts.tv_sec  += cMillies / 1000;
            if (ts.tv_nsec >= 1000000000)
            {
                ts.tv_nsec -= 1000000000;
                ts.tv_sec++;
            }
        }

        /* take rwlock */
        int rc = pthread_rwlock_timedwrlock(&pThis->RWLock, &ts);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
        if (rc)
        {
            AssertMsg(rc == ETIMEDOUT, ("Failed read lock read-write sem %p, rc=%d.\n", hRWSem, rc));
            return RTErrConvertFromErrno(rc);
        }
#endif /* !RT_OS_DARWIN */
    }

    ATOMIC_SET_PTHREAD_T(&pThis->Writer, Self);
    pThis->cWrites = 1;
    Assert(!pThis->cReaders);
#ifdef RTSEMRW_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true);
#endif
    return VINF_SUCCESS;
}
/** @copydoc VBOXSERVICE::pfnWorker */
DECLCALLBACK(int) VBoxServiceCpuHotPlugWorker(bool volatile *pfShutdown)
{
    /*
     * Tell the control thread that it can continue spawning services.
     */
    RTThreadUserSignal(RTThreadSelf());

    /*
     * Enable the CPU hotplug notifier.
     */
    int rc = VbglR3CpuHotPlugInit();
    if (RT_FAILURE(rc))
        return rc;

    /*
     * The Work Loop.
     */
    for (;;)
    {
        /* Wait for CPU hot plugging event. */
        uint32_t            idCpuCore;
        uint32_t            idCpuPackage;
        VMMDevCpuEventType  enmEventType;
        rc = VbglR3CpuHotPlugWaitForEvent(&enmEventType, &idCpuCore, &idCpuPackage);
        if (RT_SUCCESS(rc))
        {
            VBoxServiceVerbose(3, "CpuHotPlug: Event happened idCpuCore=%u idCpuPackage=%u enmEventType=%d\n",
                               idCpuCore, idCpuPackage, enmEventType);
            switch (enmEventType)
            {
                case VMMDevCpuEventType_Plug:
                    VBoxServiceCpuHotPlugHandlePlugEvent(idCpuCore, idCpuPackage);
                    break;

                case VMMDevCpuEventType_Unplug:
                    VBoxServiceCpuHotPlugHandleUnplugEvent(idCpuCore, idCpuPackage);
                    break;

                default:
                {
                    static uint32_t s_iErrors = 0;
                    if (s_iErrors++ < 10)
                        VBoxServiceError("CpuHotPlug: Unknown event: idCpuCore=%u idCpuPackage=%u enmEventType=%d\n",
                                         idCpuCore, idCpuPackage, enmEventType);
                    break;
                }
            }
        }
        else if (rc != VERR_INTERRUPTED && rc != VERR_TRY_AGAIN)
        {
            VBoxServiceError("CpuHotPlug: VbglR3CpuHotPlugWaitForEvent returned %Rrc\n", rc);
            break;
        }

        if (*pfShutdown)
            break;
    }

    VbglR3CpuHotPlugTerm();
    return rc;
}
/**
 * @interface_method_impl{VBOXSERVICE,pfnWorker}
 */
DECLCALLBACK(int) vgsvcTimeSyncWorker(bool volatile *pfShutdown)
{
    RTTIME Time;
    char sz[64];
    int rc = VINF_SUCCESS;

    /*
     * Tell the control thread that it can continue spawning services.
     */
    RTThreadUserSignal(RTThreadSelf());

    /*
     * The Work Loop.
     */
    for (;;)
    {
        /*
         * Try get a reliable time reading.
         */
        int cTries = 3;
        do
        {
            /* query it. */
            RTTIMESPEC GuestNow0, GuestNow, HostNow;
            RTTimeNow(&GuestNow0);
            int rc2 = VbglR3GetHostTime(&HostNow);
            if (RT_FAILURE(rc2))
            {
                if (g_cTimeSyncErrors++ < 10)
                    VGSvcError("vgsvcTimeSyncWorker: VbglR3GetHostTime failed; rc2=%Rrc\n", rc2);
                break;
            }
            RTTimeNow(&GuestNow);

            /* calc latency and check if it's ok. */
            RTTIMESPEC GuestElapsed = GuestNow;
            RTTimeSpecSub(&GuestElapsed, &GuestNow0);
            if ((uint32_t)RTTimeSpecGetMilli(&GuestElapsed) < g_TimeSyncMaxLatency)
            {
                /*
                 * Set the time once after we were restored.
                 * (Of course only if the drift is bigger than MinAdjust)
                 */
                uint32_t TimeSyncSetThreshold = g_TimeSyncSetThreshold;
                if (g_fTimeSyncSetOnRestore)
                {
                    uint64_t idNewSession = g_idTimeSyncSession;
                    VbglR3GetSessionId(&idNewSession);
                    if (idNewSession != g_idTimeSyncSession)
                    {
                        VGSvcVerbose(3, "vgsvcTimeSyncWorker: The VM session ID changed, forcing resync.\n");
                        TimeSyncSetThreshold = 0;
                        g_idTimeSyncSession  = idNewSession;
                    }
                }

                /*
                 * Calculate the adjustment threshold and the current drift.
                 */
                uint32_t MinAdjust = RTTimeSpecGetMilli(&GuestElapsed) * g_TimeSyncLatencyFactor;
                if (MinAdjust < g_TimeSyncMinAdjust)
                    MinAdjust = g_TimeSyncMinAdjust;

                RTTIMESPEC Drift = HostNow;
                RTTimeSpecSub(&Drift, &GuestNow);
                if (RTTimeSpecGetMilli(&Drift) < 0)
                    MinAdjust += g_TimeSyncMinAdjust; /* extra buffer against moving time backwards. */

                RTTIMESPEC AbsDrift = Drift;
                RTTimeSpecAbsolute(&AbsDrift);
                if (g_cVerbosity >= 3)
                {
                    VGSvcVerbose(3, "vgsvcTimeSyncWorker: Host:    %s    (MinAdjust: %RU32 ms)\n",
                                 RTTimeToString(RTTimeExplode(&Time, &HostNow), sz, sizeof(sz)), MinAdjust);
                    VGSvcVerbose(3, "vgsvcTimeSyncWorker: Guest: - %s => %RDtimespec drift\n",
                                 RTTimeToString(RTTimeExplode(&Time, &GuestNow), sz, sizeof(sz)), &Drift);
                }

                uint32_t AbsDriftMilli = RTTimeSpecGetMilli(&AbsDrift);
                if (AbsDriftMilli > MinAdjust)
                {
                    /*
                     * Ok, the drift is above the threshold.
                     *
                     * Try a gradual adjustment first, if that fails or the drift is
                     * too big, fall back on just setting the time.
                     */

                    if (    AbsDriftMilli > TimeSyncSetThreshold
                        ||  g_fTimeSyncSetNext
                        ||  !vgsvcTimeSyncAdjust(&Drift))
                    {
                        vgsvcTimeSyncCancelAdjust();
                        vgsvcTimeSyncSet(&Drift);
                    }
                }
                else
                    vgsvcTimeSyncCancelAdjust();
                break;
            }
            VGSvcVerbose(3, "vgsvcTimeSyncWorker: %RDtimespec: latency too high (%RDtimespec) sleeping 1s\n", GuestElapsed);
            RTThreadSleep(1000);
        } while (--cTries > 0);

        /* Clear the set-next/set-start flag. */
        g_fTimeSyncSetNext = false;

        /*
         * Block for a while.
         *
         * The event semaphore takes care of ignoring interruptions and it
         * allows us to implement service wakeup later.
         */
        if (*pfShutdown)
            break;
        int rc2 = RTSemEventMultiWait(g_TimeSyncEvent, g_TimeSyncInterval);
        if (*pfShutdown)
            break;
        if (rc2 != VERR_TIMEOUT && RT_FAILURE(rc2))
        {
            VGSvcError("vgsvcTimeSyncWorker: RTSemEventMultiWait failed; rc2=%Rrc\n", rc2);
            rc = rc2;
            break;
        }
    }

    vgsvcTimeSyncCancelAdjust();
    RTSemEventMultiDestroy(g_TimeSyncEvent);
    g_TimeSyncEvent = NIL_RTSEMEVENTMULTI;
    return rc;
}
Beispiel #30
0
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Validate input.
     */
    struct RTSEMMUTEXINTERNAL *pThis = hMutexSem;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE);

    /*
     * Check if nested request.
     */
    pthread_t Self = pthread_self();
    if (    pThis->Owner == Self
        &&  pThis->cNesting > 0)
    {
#ifdef RTSEMMUTEX_STRICT
        int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos);
        if (RT_FAILURE(rc9))
            return rc9;
#endif
        ASMAtomicIncU32(&pThis->cNesting);
        return VINF_SUCCESS;
    }

    /*
     * Lock it.
     */
    RTTHREAD hThreadSelf = NIL_RTTHREAD;
    if (cMillies != 0)
    {
#ifdef RTSEMMUTEX_STRICT
        hThreadSelf = RTThreadSelfAutoAdopt();
        int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true,
                                                              cMillies, RTTHREADSTATE_MUTEX, true);
        if (RT_FAILURE(rc9))
            return rc9;
#else
        hThreadSelf = RTThreadSelf();
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true);
#endif
    }

    if (cMillies == RT_INDEFINITE_WAIT)
    {
        /* take mutex */
        int rc = pthread_mutex_lock(&pThis->Mutex);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
        if (rc)
        {
            AssertMsgFailed(("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc);
            return RTErrConvertFromErrno(rc);
        }
    }
    else
    {
#ifdef RT_OS_DARWIN
        AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API."));
        return VERR_NOT_IMPLEMENTED;
#else /* !RT_OS_DARWIN */
        /*
         * Get current time and calc end of wait time.
         */
        struct timespec     ts = {0,0};
        clock_gettime(CLOCK_REALTIME, &ts);
        if (cMillies != 0)
        {
            ts.tv_nsec += (cMillies % 1000) * 1000000;
            ts.tv_sec  += cMillies / 1000;
            if (ts.tv_nsec >= 1000000000)
            {
                ts.tv_nsec -= 1000000000;
                ts.tv_sec++;
            }
        }

        /* take mutex */
        int rc = pthread_mutex_timedlock(&pThis->Mutex, &ts);
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX);
        if (rc)
        {
            AssertMsg(rc == ETIMEDOUT, ("Failed to lock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc);
            return RTErrConvertFromErrno(rc);
        }
#endif /* !RT_OS_DARWIN */
    }

    /*
     * Set the owner and nesting.
     */
    pThis->Owner = Self;
    ASMAtomicWriteU32(&pThis->cNesting, 1);
#ifdef RTSEMMUTEX_STRICT
    RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true);
#endif

    return VINF_SUCCESS;
}