示例#1
0
/**
 * Notification wrapper that updates CPU states and invokes our notification
 * callbacks.
 *
 * @param idCpu             The CPU Id.
 * @param pvUser1           Pointer to the notifier_block (unused).
 * @param pvUser2           The notification event.
 * @remarks This can be invoked in interrupt context.
 */
static DECLCALLBACK(void) rtMpNotificationLinuxOnCurrentCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
{
    unsigned long ulNativeEvent = *(unsigned long *)pvUser2;
    NOREF(pvUser1);

    AssertRelease(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    AssertReleaseMsg(idCpu == RTMpCpuId(),  /* ASSUMES iCpu == RTCPUID */
                     ("idCpu=%u RTMpCpuId=%d ApicId=%d\n", idCpu, RTMpCpuId(), ASMGetApicId() ));

    switch (ulNativeEvent)
    {
# ifdef CPU_DOWN_FAILED
        case CPU_DOWN_FAILED:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
        case CPU_DOWN_FAILED_FROZEN:
#  endif
# endif
        case CPU_ONLINE:
# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
        case CPU_ONLINE_FROZEN:
# endif
            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
            break;

# ifdef CPU_DOWN_PREPARE
        case CPU_DOWN_PREPARE:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
        case CPU_DOWN_PREPARE_FROZEN:
#  endif
            rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
            break;
# endif
    }
}
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);
    pState->u32Reserved = 42;

    /*
     * Disable to prevent preemption while we grab the per-cpu spin lock.
     * Note! Only take the lock on the first call or we end up spinning for ever.
     */
    RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
    RTCPUID     idCpu       = RTMpCpuId();
    if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
    {
        Assert(g_aPreemptHacks[idCpu].cRecursion < UINT32_MAX / 2);
        if (++g_aPreemptHacks[idCpu].cRecursion == 1)
        {
            lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
            if (pSpinLock)
                lck_spin_lock(pSpinLock);
            else
                AssertFailed();
        }
    }
    ASMSetFlags(fSavedFlags);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
}
/**
 * Callback wrapper for single-CPU timers.
 *
 * @param    pvArg              Opaque pointer to the timer.
 *
 * @remarks This will be executed in interrupt context but only at the specified
 *          level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
 *          cyclic subsystem here, neither should pfnTimer().
 */
static void rtTimerSolSingleCallbackWrapper(void *pvArg)
{
    PRTTIMER pTimer = (PRTTIMER)pvArg;
    AssertPtrReturnVoid(pTimer);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    Assert(!pTimer->fAllCpus);

    /* Make sure one-shots do not fire another time. */
    Assert(   !pTimer->fSuspended
           || pTimer->cNsInterval != 0);

    if (!pTimer->fSuspendedFromTimer)
    {
        /* Make sure we are firing on the right CPU. */
        Assert(   !pTimer->fSpecificCpu
               || pTimer->iCpu == RTMpCpuId());

        /* For one-shot, we may allow the callback to restart them. */
        if (pTimer->cNsInterval == 0)
            pTimer->fSuspendedFromTimer = true;

        /*
         * Perform the callout.
         */
        pTimer->u.Single.pActiveThread = curthread;

        uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);

        pTimer->u.Single.pActiveThread = NULL;

        if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
        {
            if (   !pTimer->fIntervalChanged
                || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
                return;

            /*
             * The interval was changed, we need to set the expiration time
             * ourselves before returning.  This comes at a slight cost,
             * which is why we don't do it all the time.
             */
            if (pTimer->u.Single.nsNextTick)
                pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
            else
                pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
            cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
            return;
        }

        /*
         * The timer has been suspended, set expiration time to infinitiy.
         */
    }
    if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
        cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
}
/**
 * Hook function for the thread-preempting event.
 *
 * @param   pPreemptNotifier    Pointer to the preempt_notifier struct.
 * @param   pNext               Pointer to the task that is preempting the
 *                              current thread.
 *
 * @remarks Called with the rq (runqueue) lock held and with preemption and
 *          interrupts disabled!
 */
static void rtThreadCtxHooksLnxSchedOut(struct preempt_notifier *pPreemptNotifier, struct task_struct *pNext)
{
    PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier);
    AssertPtr(pThis);
    AssertPtr(pThis->pfnThreadCtxHook);
    Assert(pThis->fRegistered);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_PREEMPTING, pThis->pvUser);
}
示例#5
0
/**
 * Hook function for the thread-preempting event.
 *
 * @param   pvThreadCtxInt  Opaque pointer to the internal thread-context
 *                          object.
 *
 * @remarks Called with the with preemption disabled!
 */
static void rtThreadCtxHooksSolPreempting(void *pvThreadCtxInt)
{
    PRTTHREADCTXINT pThis = (PRTTHREADCTXINT)pvThreadCtxInt;
    AssertPtr(pThis);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    if (pThis->fRegistered)
    {
        Assert(pThis->pfnThreadCtxHook);
        pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_PREEMPTING, pThis->pvUser);
    }
}
/**
 * Returns the VMCPU of the calling EMT.
 *
 * @returns The VMCPU pointer. NULL if not an EMT.
 *
 * @param   pVM         Pointer to the VM.
 * @internal
 */
VMMDECL(PVMCPU) VMMGetCpu(PVM pVM)
{
#ifdef IN_RING3
    VMCPUID idCpu = VMR3GetVMCPUId(pVM);
    if (idCpu == NIL_VMCPUID)
        return NULL;
    Assert(idCpu < pVM->cCpus);
    return &pVM->aCpus[idCpu];

#elif defined(IN_RING0)
    if (pVM->cCpus == 1)
        return &pVM->aCpus[0];

    /*
     * Search first by host cpu id (most common case)
     * and then by native thread id (page fusion case).
     */
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
    {
        /** @todo r=ramshankar: This doesn't buy us anything in terms of performance
         *        leaving it here for hysterical raisins and as a reference if we
         *        implemented a hashing approach in the future. */
        RTCPUID idHostCpu = RTMpCpuId();

        /** @todo optimize for large number of VCPUs when that becomes more common. */
        for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
        {
            PVMCPU pVCpu = &pVM->aCpus[idCpu];

            if (pVCpu->idHostCpu == idHostCpu)
                return pVCpu;
        }
    }

    /* RTThreadGetNativeSelf had better be cheap. */
    RTNATIVETHREAD hThread = RTThreadNativeSelf();

    /** @todo optimize for large number of VCPUs when that becomes more common.
     * Use a map like GIP does that's indexed by the host CPU index.  */
    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];

        if (pVCpu->hNativeThreadR0 == hThread)
            return pVCpu;
    }
    return NULL;

#else /* RC: Always EMT(0) */
    return &pVM->aCpus[0];
#endif /* IN_RING0 */
}
示例#7
0
/**
 * PFNRTMPWORKER worker for executing Mp events on the target CPU.
 *
 * @param    idCpu          The current CPU Id.
 * @param    pvArg          Opaque pointer to event type (online/offline).
 * @param    pvIgnored1     Ignored.
 */
static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1)
{
    NOREF(pvIgnored1);
    NOREF(idCpu);

    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
    AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId());
    Assert(pArgs->pvUser1);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1;
    rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu);
}
示例#8
0
/**
 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
 *
 * @param   pvInfo      Pointer to the RTMPARGS package.
 */
static void rtmpLinuxAllWrapper(void *pvInfo)
{
    PRTMPARGS  pArgs      = (PRTMPARGS)pvInfo;
    PRTCPUSET  pWorkerSet = pArgs->pWorkerSet;
    RTCPUID    idCpu      = RTMpCpuId();
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    if (RTCpuSetIsMember(pWorkerSet, idCpu))
    {
        pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
        RTCpuSetDel(pWorkerSet, idCpu);
    }
}
/**
 * Callback wrapper for Omni-CPU timers.
 *
 * @param    pvArg              Opaque pointer to the timer.
 *
 * @remarks This will be executed in interrupt context but only at the specified
 *          level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
 *          cyclic subsystem here, neither should pfnTimer().
 */
static void rtTimerSolOmniCallbackWrapper(void *pvArg)
{
    PRTTIMER pTimer = (PRTTIMER)pvArg;
    AssertPtrReturnVoid(pTimer);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    Assert(pTimer->fAllCpus);

    if (!pTimer->fSuspendedFromTimer)
    {
        /*
         * Perform the callout.
         */
        uint32_t const iCpu = CPU->cpu_id;

        pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
        uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;

        pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);

        pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;

        if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
        {
            if (   !pTimer->fIntervalChanged
                || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
                return;

            /*
             * The interval was changed, we need to set the expiration time
             * ourselves before returning.  This comes at a slight cost,
             * which is why we don't do it all the time.
             *
             * Note! The cyclic_reprogram call only affects the omni cyclic
             *       component for this CPU.
             */
            if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
                pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
            else
                pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
            cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
            return;
        }

        /*
         * The timer has been suspended, set expiration time to infinitiy.
         */
    }
    if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
        cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
}
示例#10
0
RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
{
    if (pch[cb] != '\0')
        AssertBreakpoint();
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))    /** @todo this will change when preemptions hook are implemented. */
        return;
    if (    !g_frtSolSplSetsEIF
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ||  ASMIntAreEnabled()
#else
/* PORTME: Check if interrupts are enabled, if applicable. */
#endif
        )
        cmn_err(CE_CONT, pch);
    return;
}
RTDECL(int) RTThreadCtxHooksCreate(PRTTHREADCTX phThreadCtx)
{
    PRTTHREADCTXINT pThis;
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis = (PRTTHREADCTXINT)RTMemAllocZ(sizeof(*pThis));
    if (RT_UNLIKELY(!pThis))
        return VERR_NO_MEMORY;
    pThis->u32Magic    = RTTHREADCTXINT_MAGIC;
    pThis->hOwner      = RTThreadNativeSelf();
    pThis->fRegistered = false;
    preempt_notifier_init(&pThis->hPreemptNotifier, &pThis->hPreemptOps);
    pThis->cRefs       = 1;

    *phThreadCtx = pThis;
    return VINF_SUCCESS;
}
RTDECL(int) SUPR0Printf(const char *pszFormat, ...)
{
    va_list     args;
    char        szMsg[512];

    /* cmn_err() acquires adaptive mutexes. Not preemption safe, see @bugref{6657}. */
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        return 0;

    va_start(args, pszFormat);
    RTStrPrintfV(szMsg, sizeof(szMsg) - 1, pszFormat, args);
    va_end(args);

    szMsg[sizeof(szMsg) - 1] = '\0';
    cmn_err(CE_CONT, "%s", szMsg);
    return 0;
}
示例#13
0
RTDECL(int) RTThreadCtxHooksCreate(PRTTHREADCTX phThreadCtx)
{
    PRTTHREADCTXINT pThis;
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis = (PRTTHREADCTXINT)RTMemAllocZ(sizeof(*pThis));
    if (RT_UNLIKELY(!pThis))
        return VERR_NO_MEMORY;
    pThis->u32Magic    = RTTHREADCTXINT_MAGIC;
    pThis->hOwner      = RTThreadNativeSelf();
    pThis->fRegistered = false;
    pThis->cRefs       = 2;               /* One reference for the thread, one for the hook object. */

    /*
     * installctx() allocates memory and thus cannot be used in RTThreadCtxHooksRegister() which can be used
     * with preemption disabled. We allocate the context-hooks here and use 'fRegistered' to determine if we can
     * invoke the consumer's hook or not.
     */
    if (g_frtSolOldThreadCtx)
    {
        g_rtSolThreadCtx.Install.pfnSol_installctx_old(curthread,
                                                       pThis,
                                                       rtThreadCtxHooksSolPreempting,
                                                       rtThreadCtxHooksSolResumed,
                                                       NULL,                          /* fork */
                                                       NULL,                          /* lwp_create */
                                                       rtThreadCtxHooksSolFree);
    }
    else
    {
        g_rtSolThreadCtx.Install.pfnSol_installctx(curthread,
                                                   pThis,
                                                   rtThreadCtxHooksSolPreempting,
                                                   rtThreadCtxHooksSolResumed,
                                                   NULL,                              /* fork */
                                                   NULL,                              /* lwp_create */
                                                   NULL,                              /* exit */
                                                   rtThreadCtxHooksSolFree);
    }

    *phThreadCtx = pThis;
    return VINF_SUCCESS;
}
/**
 * Hook function for the thread-preempting event.
 *
 * @param   pPreemptNotifier    Pointer to the preempt_notifier struct.
 * @param   pNext               Pointer to the task that is preempting the
 *                              current thread.
 *
 * @remarks Called with the rq (runqueue) lock held and with preemption and
 *          interrupts disabled!
 */
static void rtThreadCtxHooksLnxSchedOut(struct preempt_notifier *pPreemptNotifier, struct task_struct *pNext)
{
    PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier);
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    RTCCUINTREG fSavedEFlags = ASMGetFlags();
    stac();
#endif

    AssertPtr(pThis);
    AssertPtr(pThis->pfnThreadCtxHook);
    Assert(pThis->fRegistered);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_PREEMPTING, pThis->pvUser);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    ASMSetFlags(fSavedEFlags);
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
    pThis->fSavedRFlags = fSavedEFlags;
# endif
#endif
}
RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
{
    if (pch[cb] != '\0')
        AssertBreakpoint();

    /* cmn_err() acquires adaptive mutexes. Not preemption safe, see @bugref{6657}. */
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        return;

    if (    !g_frtSolSplSetsEIF
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ||  ASMIntAreEnabled()
#else
/* PORTME: Check if interrupts are enabled, if applicable. */
#endif
        )
    {
        cmn_err(CE_CONT, pch);
    }

    return;
}
RTDECL(int) RTThreadCtxHookCreate(PRTTHREADCTXHOOK phCtxHook, uint32_t fFlags, PFNRTTHREADCTXHOOK pfnCallback, void *pvUser)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis;
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER);
    AssertReturn(fFlags == 0, VERR_INVALID_FLAGS);

    /*
     * Allocate and initialize a new hook.  We don't register it yet, just
     * create it.
     */
    pThis = (PRTTHREADCTXHOOKINT)RTMemAllocZ(sizeof(*pThis));
    if (RT_UNLIKELY(!pThis))
    {
        IPRT_LINUX_RESTORE_EFL_AC();
        return VERR_NO_MEMORY;
    }
    pThis->u32Magic     = RTTHREADCTXHOOKINT_MAGIC;
    pThis->hOwner       = RTThreadNativeSelf();
    pThis->fEnabled     = false;
    pThis->pfnCallback  = pfnCallback;
    pThis->pvUser       = pvUser;
    preempt_notifier_init(&pThis->LnxPreemptNotifier, &pThis->PreemptOps);
    pThis->PreemptOps.sched_out = rtThreadCtxHooksLnxSchedOut;
    pThis->PreemptOps.sched_in  = rtThreadCtxHooksLnxSchedIn;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
    preempt_notifier_inc();
#endif

    *phCtxHook = pThis;
    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
RTDECL(uint32_t) RTThreadCtxHooksRelease(RTTHREADCTX hThreadCtx)
{
    /*
     * Validate input.
     */
    uint32_t        cRefs;
    PRTTHREADCTXINT pThis = hThreadCtx;
    if (pThis == NIL_RTTHREADCTX)
        return 0;

    AssertPtr(pThis);
    AssertMsgReturn(pThis->u32Magic == RTTHREADCTXINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
                    UINT32_MAX);
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    cRefs = ASMAtomicDecU32(&pThis->cRefs);
    if (!cRefs)
    {
        /*
         * If there's still a registered thread-context hook, deregister it now before destroying the object.
         */
        if (pThis->fRegistered)
            rtThreadCtxHooksDeregister(pThis);

        /*
         * Paranoia... but since these are ring-0 threads we can't be too careful.
         */
        Assert(!pThis->fRegistered);
        Assert(!pThis->hPreemptOps.sched_out);
        Assert(!pThis->hPreemptOps.sched_in);

        ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXINT_MAGIC);
        RTMemFree(pThis);
    }
    else
        Assert(cRefs < UINT32_MAX / 2);

    return cRefs;
}
RTDECL(int ) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook)
{
    IPRT_LINUX_SAVE_EFL_AC();

    /*
     * Validate input.
     */
    PRTTHREADCTXHOOKINT pThis = hCtxHook;
    if (pThis == NIL_RTTHREADCTXHOOK)
        return VINF_SUCCESS;
    AssertPtr(pThis);
    AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis),
                    VERR_INVALID_HANDLE);
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf());

    /*
     * If there's still a registered thread-context hook, deregister it now before destroying the object.
     */
    if (pThis->fEnabled)
    {
        Assert(pThis->hOwner == RTThreadNativeSelf());
        rtThreadCtxHookDisable(pThis);
        Assert(!pThis->fEnabled); /* paranoia */
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
    preempt_notifier_dec();
#endif

    ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC);
    RTMemFree(pThis);

    IPRT_LINUX_RESTORE_EFL_AC();
    return VINF_SUCCESS;
}
/**
 * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
 *
 * This will check the current context and see if it's usui
 *
 * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
 * @param   pState      Output structure.
 */
static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
{
#ifndef RT_OS_WINDOWS
    RTTHREADPREEMPTSTATE const StateInit = RTTHREADPREEMPTSTATE_INITIALIZER;
#endif
    int rc  = VINF_SUCCESS;

    /** @todo Later #1: When entering in interrupt context and we're not able to
     *        wake up threads from it, we could try switch the lock into pure
     *        spinlock mode. This would require that there are no other threads
     *        currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
     *        flag is set.
     *
     *        Later #2: Similarly, it is possible to turn on the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
     *        semaphore ownership at interrupt time. We might want to try delay the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
     */

#ifdef RT_OS_WINDOWS
    /*
     * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
     */
    pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
    if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
        return VERR_SEM_BAD_CONTEXT;

    if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
        pState->fSpin = true;
    else
    {
        pState->fSpin = false;
        KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
        Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
    }

#elif defined(RT_OS_SOLARIS)
    /*
     * Solaris:  RTSemEventSignal will do bad stuff on S10 if interrupts are disabled.
     */
    if (!ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2)
    /*
     * OSes on which RTSemEventSignal can be called from any context.
     */
    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#else /* PORTME: Check for context where we cannot wake up threads. */
    /*
     * Default: ASSUME thread can be woken up if interrupts are enabled and
     *          we're not in an interrupt context.
     *          ASSUME that we can go to sleep if preemption is enabled.
     */
    if (    RTThreadIsInInterrupt(NIL_RTTHREAD)
        ||  !ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);
#endif

    /*
     * Disable interrupts if necessary.
     */
    pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
    if (pState->fValidFlags)
        pState->fSavedFlags = ASMIntDisableFlags();
    else
        pState->fSavedFlags = 0;

    return rc;
}
示例#20
0
RTDECL(uint32_t) RTThreadCtxHooksRelease(RTTHREADCTX hThreadCtx)
{
    PRTTHREADCTXINT pThis = hThreadCtx;
    if (pThis == NIL_RTTHREADCTX)
        return 0;

    RTTHREADCTX_VALID_RETURN_RC(hThreadCtx, UINT32_MAX);
    Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis->fRegistered = false;
    uint32_t cRefs = ASMAtomicDecU32(&pThis->cRefs);

    if (   pThis->hOwner == RTThreadNativeSelf()
        && cRefs == 1)
    {
        /*
         * removectx() will invoke rtThreadCtxHooksSolFree() and there is no way to bypass it and still use
         * rtThreadCtxHooksSolFree() at the same time.  Hence the convulated reference counting.
         *
         * When this function is called from the owner thread and is the last reference, we call removectx() which
         * will invoke rtThreadCtxHooksSolFree() with cRefs = 1 and that will then free the hook object.
         *
         * When the function is called from a different thread, we simply decrement the reference. Whenever the
         * ring-0 thread dies, Solaris will call rtThreadCtxHooksSolFree() which will free the hook object.
         */
        int rc;
        if (g_frtSolOldThreadCtx)
        {
            rc = g_rtSolThreadCtx.Remove.pfnSol_removectx_old(curthread,
                                                              pThis,
                                                              rtThreadCtxHooksSolPreempting,
                                                              rtThreadCtxHooksSolResumed,
                                                              NULL,                          /* fork */
                                                              NULL,                          /* lwp_create */
                                                              rtThreadCtxHooksSolFree);
        }
        else
        {
            rc = g_rtSolThreadCtx.Remove.pfnSol_removectx(curthread,
                                                          pThis,
                                                          rtThreadCtxHooksSolPreempting,
                                                          rtThreadCtxHooksSolResumed,
                                                          NULL,                              /* fork */
                                                          NULL,                              /* lwp_create */
                                                          NULL,                              /* exit */
                                                          rtThreadCtxHooksSolFree);
        }
        AssertMsg(rc, ("removectx failed. rc=%d\n", rc));
        NOREF(rc);

#ifdef VBOX_STRICT
        cRefs = ASMAtomicReadU32(&pThis->cRefs);
        Assert(!cRefs);
#endif
        cRefs = 0;
    }
    else if (!cRefs)
    {
        /*
         * The ring-0 thread for this hook object has already died. Free up the object as we have no more references.
         */
        Assert(pThis->hOwner != RTThreadNativeSelf());
        ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXINT_MAGIC);
        RTMemFree(pThis);
    }

    return cRefs;
}
示例#21
0
/**
 * Creates the default logger instance for a VBox process.
 *
 * @returns Pointer to the logger instance.
 */
RTDECL(PRTLOGGER) RTLogDefaultInit(void)
{
    /*
     * Initialize the default logger instance.
     * Take care to do this once and not recursively.
     */
    static volatile uint32_t fInitializing = 0;
    PRTLOGGER pLogger;
    int rc;

    if (g_pLogger || !ASMAtomicCmpXchgU32(&fInitializing, 1, 0))
        return g_pLogger;

#ifdef IN_RING3
    /*
     * Assert the group definitions.
     */
#define ASSERT_LOG_GROUP(grp)  ASSERT_LOG_GROUP2(LOG_GROUP_##grp, #grp)
#define ASSERT_LOG_GROUP2(def, str) \
    do { if (strcmp(g_apszGroups[def], str)) {printf("%s='%s' expects '%s'\n", #def, g_apszGroups[def], str); RTAssertDoPanic(); } } while (0)
    ASSERT_LOG_GROUP(DEFAULT);
    ASSERT_LOG_GROUP(AUDIO_MIXER);
    ASSERT_LOG_GROUP(AUDIO_MIXER_BUFFER);
    ASSERT_LOG_GROUP(CFGM);
    ASSERT_LOG_GROUP(CPUM);
    ASSERT_LOG_GROUP(CSAM);
    ASSERT_LOG_GROUP(DBGC);
    ASSERT_LOG_GROUP(DBGF);
    ASSERT_LOG_GROUP(DBGF_INFO);
    ASSERT_LOG_GROUP(DEV);
    ASSERT_LOG_GROUP(DEV_AC97);
    ASSERT_LOG_GROUP(DEV_ACPI);
    ASSERT_LOG_GROUP(DEV_APIC);
    ASSERT_LOG_GROUP(DEV_FDC);
    ASSERT_LOG_GROUP(DEV_HDA);
    ASSERT_LOG_GROUP(DEV_HDA_CODEC);
    ASSERT_LOG_GROUP(DEV_HPET);
    ASSERT_LOG_GROUP(DEV_IDE);
    ASSERT_LOG_GROUP(DEV_KBD);
    ASSERT_LOG_GROUP(DEV_LPC);
    ASSERT_LOG_GROUP(DEV_NE2000);
    ASSERT_LOG_GROUP(DEV_PC);
    ASSERT_LOG_GROUP(DEV_PC_ARCH);
    ASSERT_LOG_GROUP(DEV_PC_BIOS);
    ASSERT_LOG_GROUP(DEV_PCI);
    ASSERT_LOG_GROUP(DEV_PCNET);
    ASSERT_LOG_GROUP(DEV_PIC);
    ASSERT_LOG_GROUP(DEV_PIT);
    ASSERT_LOG_GROUP(DEV_RTC);
    ASSERT_LOG_GROUP(DEV_SB16);
    ASSERT_LOG_GROUP(DEV_SERIAL);
    ASSERT_LOG_GROUP(DEV_SMC);
    ASSERT_LOG_GROUP(DEV_VGA);
    ASSERT_LOG_GROUP(DEV_VMM);
    ASSERT_LOG_GROUP(DEV_VMM_STDERR);
    ASSERT_LOG_GROUP(DIS);
    ASSERT_LOG_GROUP(DRV);
    ASSERT_LOG_GROUP(DRV_ACPI);
    ASSERT_LOG_GROUP(DRV_AUDIO);
    ASSERT_LOG_GROUP(DRV_BLOCK);
    ASSERT_LOG_GROUP(DRV_FLOPPY);
    ASSERT_LOG_GROUP(DRV_HOST_AUDIO);
    ASSERT_LOG_GROUP(DRV_HOST_DVD);
    ASSERT_LOG_GROUP(DRV_HOST_FLOPPY);
    ASSERT_LOG_GROUP(DRV_ISO);
    ASSERT_LOG_GROUP(DRV_KBD_QUEUE);
    ASSERT_LOG_GROUP(DRV_MOUSE_QUEUE);
    ASSERT_LOG_GROUP(DRV_NAT);
    ASSERT_LOG_GROUP(DRV_RAW_IMAGE);
    ASSERT_LOG_GROUP(DRV_TUN);
    ASSERT_LOG_GROUP(DRV_USBPROXY);
    ASSERT_LOG_GROUP(DRV_VBOXHDD);
    ASSERT_LOG_GROUP(DRV_VRDE_AUDIO);
    ASSERT_LOG_GROUP(DRV_VSWITCH);
    ASSERT_LOG_GROUP(DRV_VUSB);
    ASSERT_LOG_GROUP(EM);
    ASSERT_LOG_GROUP(GUI);
    ASSERT_LOG_GROUP(HGCM);
    ASSERT_LOG_GROUP(HM);
    ASSERT_LOG_GROUP(IOM);
    ASSERT_LOG_GROUP(LWIP);
    ASSERT_LOG_GROUP(MAIN);
    ASSERT_LOG_GROUP(MM);
    ASSERT_LOG_GROUP(MM_HEAP);
    ASSERT_LOG_GROUP(MM_HYPER);
    ASSERT_LOG_GROUP(MM_HYPER_HEAP);
    ASSERT_LOG_GROUP(MM_PHYS);
    ASSERT_LOG_GROUP(MM_POOL);
    ASSERT_LOG_GROUP(NAT_SERVICE);
    ASSERT_LOG_GROUP(NET_SERVICE);
    ASSERT_LOG_GROUP(PATM);
    ASSERT_LOG_GROUP(PDM);
    ASSERT_LOG_GROUP(PDM_DEVICE);
    ASSERT_LOG_GROUP(PDM_DRIVER);
    ASSERT_LOG_GROUP(PDM_LDR);
    ASSERT_LOG_GROUP(PDM_QUEUE);
    ASSERT_LOG_GROUP(PGM);
    ASSERT_LOG_GROUP(PGM_POOL);
    ASSERT_LOG_GROUP(REM);
    ASSERT_LOG_GROUP(REM_DISAS);
    ASSERT_LOG_GROUP(REM_HANDLER);
    ASSERT_LOG_GROUP(REM_IOPORT);
    ASSERT_LOG_GROUP(REM_MMIO);
    ASSERT_LOG_GROUP(REM_PRINTF);
    ASSERT_LOG_GROUP(REM_RUN);
    ASSERT_LOG_GROUP(SELM);
    ASSERT_LOG_GROUP(SSM);
    ASSERT_LOG_GROUP(STAM);
    ASSERT_LOG_GROUP(SUP);
    ASSERT_LOG_GROUP(TM);
    ASSERT_LOG_GROUP(TRPM);
    ASSERT_LOG_GROUP(VM);
    ASSERT_LOG_GROUP(VMM);
    ASSERT_LOG_GROUP(VRDP);
#undef ASSERT_LOG_GROUP
#undef ASSERT_LOG_GROUP2
#endif /* IN_RING3 */

    /*
     * Create the default logging instance.
     */
#ifdef IN_RING3
# ifndef IN_GUEST
    char szExecName[RTPATH_MAX];
    if (!RTProcGetExecutablePath(szExecName, sizeof(szExecName)))
        strcpy(szExecName, "VBox");
    RTTIMESPEC TimeSpec;
    RTTIME Time;
    RTTimeExplode(&Time, RTTimeNow(&TimeSpec));
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE,
                     "./%04d-%02d-%02d-%02d-%02d-%02d.%03d-%s-%d.log",
                     Time.i32Year, Time.u8Month, Time.u8MonthDay, Time.u8Hour, Time.u8Minute, Time.u8Second, Time.u32Nanosecond / 10000000,
                     RTPathFilename(szExecName), RTProcSelf());
    if (RT_SUCCESS(rc))
    {
        /*
         * Write a log header.
         */
        char szBuf[RTPATH_MAX];
        RTTimeSpecToString(&TimeSpec, szBuf, sizeof(szBuf));
        RTLogLoggerEx(pLogger, 0, ~0U, "Log created: %s\n", szBuf);
        RTLogLoggerEx(pLogger, 0, ~0U, "Executable: %s\n", szExecName);

        /* executable and arguments - tricky and all platform specific. */
#  if defined(RT_OS_WINDOWS)
        RTLogLoggerEx(pLogger, 0, ~0U, "Commandline: %ls\n", GetCommandLineW());

#  elif defined(RT_OS_SOLARIS)
        psinfo_t psi;
        char szArgFileBuf[80];
        RTStrPrintf(szArgFileBuf, sizeof(szArgFileBuf), "/proc/%ld/psinfo", (long)getpid());
        FILE* pFile = fopen(szArgFileBuf, "rb");
        if (pFile)
        {
            if (fread(&psi, sizeof(psi), 1, pFile) == 1)
            {
#   if 0     /* 100% safe:*/
                RTLogLoggerEx(pLogger, 0, ~0U, "Args: %s\n", psi.pr_psargs);
#   else     /* probably safe: */
                const char * const *argv = (const char * const *)psi.pr_argv;
                for (int iArg = 0; iArg < psi.pr_argc; iArg++)
                    RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%d]: %s\n", iArg, argv[iArg]);
#   endif

            }
            fclose(pFile);
        }

#  elif defined(RT_OS_LINUX)
        FILE *pFile = fopen("/proc/self/cmdline", "r");
        if (pFile)
        {
            /* braindead */
            unsigned iArg = 0;
            int ch;
            bool fNew = true;
            while (!feof(pFile) && (ch = fgetc(pFile)) != EOF)
            {
                if (fNew)
                {
                    RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: ", iArg++);
                    fNew = false;
                }
                if (ch)
                    RTLogLoggerEx(pLogger, 0, ~0U, "%c", ch);
                else
                {
                    RTLogLoggerEx(pLogger, 0, ~0U, "\n");
                    fNew = true;
                }
            }
            if (!fNew)
                RTLogLoggerEx(pLogger, 0, ~0U, "\n");
            fclose(pFile);
        }

#  elif defined(RT_OS_HAIKU)
        team_info info;
        if (get_team_info(0, &info) == B_OK)
        {
            /* there is an info.argc, but no way to know arg boundaries */
            RTLogLoggerEx(pLogger, 0, ~0U, "Commandline: %.64s\n", info.args);
        }

#  elif defined(RT_OS_FREEBSD)
        /* Retrieve the required length first */
        int aiName[4];
        aiName[0] = CTL_KERN;
        aiName[1] = KERN_PROC;
        aiName[2] = KERN_PROC_ARGS;     /* Introduced in FreeBSD 4.0 */
        aiName[3] = getpid();
        size_t cchArgs = 0;
        int rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), NULL, &cchArgs, NULL, 0);
        if (cchArgs > 0)
        {
            char *pszArgFileBuf = (char *)RTMemAllocZ(cchArgs + 1 /* Safety */);
            if (pszArgFileBuf)
            {
                /* Retrieve the argument list */
                rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), pszArgFileBuf, &cchArgs, NULL, 0);
                if (!rcBSD)
                {
                    unsigned    iArg = 0;
                    size_t      off = 0;
                    while (off < cchArgs)
                    {
                        size_t cchArg = strlen(&pszArgFileBuf[off]);
                        RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: %s\n", iArg, &pszArgFileBuf[off]);

                        /* advance */
                        off += cchArg + 1;
                        iArg++;
                    }
                }
                RTMemFree(pszArgFileBuf);
            }
        }

#  elif defined(RT_OS_OS2) || defined(RT_OS_DARWIN)
        /* commandline? */
#  else
#   error needs porting.
#  endif
    }

# else  /* IN_GUEST */
    /* The user destination is backdoor logging. */
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox.log");
# endif /* IN_GUEST */

#else /* IN_RING0 */

    /* Some platforms has trouble allocating memory with interrupts and/or
       preemption disabled. Check and fail before we panic. */
# if defined(RT_OS_DARWIN)
    if (   !ASMIntAreEnabled()
        || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        return NULL;
# endif

# ifndef IN_GUEST
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE, "VBox-ring0.log");
# else  /* IN_GUEST */
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox-ring0.log");
# endif /* IN_GUEST */
    if (RT_SUCCESS(rc))
    {
        /*
         * This is where you set your ring-0 logging preferences.
         *
         * On platforms which don't differ between debugger and kernel
         * log printing, STDOUT is gonna be a stub and the DEBUGGER
         * destination is the one doing all the work. On platforms
         * that do differ (like Darwin), STDOUT is the kernel log.
         */
# if defined(DEBUG_bird)
        /*RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3");*/
        RTLogFlags(pLogger, "enabled unbuffered pid tid");
#  ifndef IN_GUEST
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
#  endif
# endif
# if defined(DEBUG_sandervl) && !defined(IN_GUEST)
        RTLogGroupSettings(pLogger, "+all");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_ramshankar)  /* Guest ring-0 as well */
        RTLogGroupSettings(pLogger, "+all.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_aleksey)  /* Guest ring-0 as well */
        //RTLogGroupSettings(pLogger, "net_flt_drv.e.l.f.l3.l4.l5 +net_adp_drv.e.l.f.l3.l4.l5");
        RTLogGroupSettings(pLogger, "net_flt_drv.e.l.f.l3.l4.l5.l6");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if defined(DEBUG_andy)  /* Guest ring-0 as well */
        RTLogGroupSettings(pLogger, "+all.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered pid tid");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if defined(DEBUG_misha) /* Guest ring-0 as well */
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_michael) && defined(IN_GUEST)
        RTLogGroupSettings(pLogger, "+vga.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if 0 /* vboxdrv logging - ATTENTION: this is what we're referring to guys! Change to '# if 1'. */
        RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3");
        RTLogFlags(pLogger, "enabled unbuffered tid");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
    }
#endif /* IN_RING0 */
    return g_pLogger = RT_SUCCESS(rc) ? pLogger : NULL;
}
示例#22
0
/**
 * Service request callback function.
 *
 * @returns VBox status code.
 * @param   pSession    The caller's session.
 * @param   u64Arg      64-bit integer argument.
 * @param   pReqHdr     The request header. Input / Output. Optional.
 */
DECLEXPORT(int) TSTR0ThreadPreemptionSrvReqHandler(PSUPDRVSESSION pSession, uint32_t uOperation,
                                                   uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr)
{
    NOREF(pSession);
    if (u64Arg)
        return VERR_INVALID_PARAMETER;
    if (!VALID_PTR(pReqHdr))
        return VERR_INVALID_PARAMETER;
    char   *pszErr = (char *)(pReqHdr + 1);
    size_t  cchErr = pReqHdr->cbReq - sizeof(*pReqHdr);
    if (cchErr < 32 || cchErr >= 0x10000)
        return VERR_INVALID_PARAMETER;
    *pszErr = '\0';

    /*
     * The big switch.
     */
    switch (uOperation)
    {
        case TSTR0THREADPREMEPTION_SANITY_OK:
            break;

        case TSTR0THREADPREMEPTION_SANITY_FAILURE:
            RTStrPrintf(pszErr, cchErr, "!42failure42%1024s", "");
            break;

        case TSTR0THREADPREMEPTION_BASIC:
        {
            if (!ASMIntAreEnabled())
                RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
            else if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false by default");
            else
            {
                RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
                RTThreadPreemptDisable(&State);
                if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable");
                else if (!ASMIntAreEnabled())
                    RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
                RTThreadPreemptRestore(&State);
            }
            break;
        }

        case TSTR0THREADPREMEPTION_IS_PENDING:
        {
            RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
            RTThreadPreemptDisable(&State);
            if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
            {
                if (ASMIntAreEnabled())
                {
                    uint64_t    u64StartTS    = RTTimeNanoTS();
                    uint64_t    u64StartSysTS = RTTimeSystemNanoTS();
                    uint64_t    cLoops        = 0;
                    uint64_t    cNanosSysElapsed;
                    uint64_t    cNanosElapsed;
                    bool        fPending;
                    do
                    {
                        fPending         = RTThreadPreemptIsPending(NIL_RTTHREAD);
                        cNanosElapsed    = RTTimeNanoTS()       - u64StartTS;
                        cNanosSysElapsed = RTTimeSystemNanoTS() - u64StartSysTS;
                        cLoops++;
                    } while (   !fPending
                             && cNanosElapsed    < UINT64_C(2)*1000U*1000U*1000U
                             && cNanosSysElapsed < UINT64_C(2)*1000U*1000U*1000U
                             && cLoops           < 100U*_1M);
                    if (!fPending)
                        RTStrPrintf(pszErr, cchErr, "!Preempt not pending after %'llu loops / %'llu ns / %'llu ns (sys)",
                                    cLoops, cNanosElapsed, cNanosSysElapsed);
                    else if (cLoops == 1)
                        RTStrPrintf(pszErr, cchErr, "!cLoops=1\n");
                    else
                        RTStrPrintf(pszErr, cchErr, "RTThreadPreemptIsPending returned true after %'llu loops / %'llu ns / %'llu ns (sys)",
                                    cLoops, cNanosElapsed, cNanosSysElapsed);
                }
                else
                    RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
            }
            else
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable");
            RTThreadPreemptRestore(&State);
            break;
        }

        case TSTR0THREADPREMEPTION_NESTED:
        {
            bool const fDefault = RTThreadPreemptIsEnabled(NIL_RTTHREAD);
            RTTHREADPREEMPTSTATE State1 = RTTHREADPREEMPTSTATE_INITIALIZER;
            RTThreadPreemptDisable(&State1);
            if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
            {
                RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
                RTThreadPreemptDisable(&State2);
                if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                {
                    RTTHREADPREEMPTSTATE State3 = RTTHREADPREEMPTSTATE_INITIALIZER;
                    RTThreadPreemptDisable(&State3);
                    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                        RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 3rd RTThreadPreemptDisable");

                    RTThreadPreemptRestore(&State3);
                    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr)
                        RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptRestore");
                }
                else
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptDisable");

                RTThreadPreemptRestore(&State2);
                if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr)
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptRestore");
            }
            else
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptDisable");
            RTThreadPreemptRestore(&State1);
            if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) != fDefault && !*pszErr)
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false after 3rd RTThreadPreemptRestore");
            break;
        }

        default:
            RTStrPrintf(pszErr, cchErr, "!Unknown test #%d", uOperation);
            break;
    }

    /* The error indicator is the '!' in the message buffer. */
    return VINF_SUCCESS;
}
示例#23
0
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}