RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD);

    return curthread->td_critnest == 0
        && ASMIntAreEnabled(); /** @todo is there a native freebsd function/macro for this? */
}
RTDECL(bool) RTThreadIsInInterrupt(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD); NOREF(hThread);
    /** @todo FreeBSD: Implement RTThreadIsInInterrupt. Required for guest
     *        additions! */
    return !ASMIntAreEnabled();
}
Exemple #3
0
RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
#ifdef CONFIG_PREEMPT
    Assert(hThread == NIL_RTTHREAD);
# ifdef preemptible
    return preemptible();
# else
    return preempt_count() == 0 && !in_atomic() && !irqs_disabled();
# endif
#else
    int32_t c;

    Assert(hThread == NIL_RTTHREAD);
    c = g_acPreemptDisabled[smp_processor_id()];
    AssertMsg(c >= 0 && c < 32, ("%d\n", c));
    if (c != 0)
        return false;
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
    if (in_atomic())
        return false;
# endif
# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 28)
    if (irqs_disabled())
        return false;
# else
    if (!ASMIntAreEnabled())
        return false;
# endif
    return true;
#endif
}
RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD);

	//XXX: can't do this, it might actually be held by another cpu
    //return !B_SPINLOCK_IS_LOCKED(&gThreadSpinlock);
    return ASMIntAreEnabled(); /** @todo find a better way. */
}
Exemple #5
0
RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD);
    int32_t c = g_acPreemptDisabled[ASMGetApicId()];
    AssertMsg(c >= 0 && c < 32, ("%d\n", c));
    return c == 0
        && ASMIntAreEnabled();
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    KernAcquireSpinLock(&pThis->Spinlock);
    Assert(!ASMIntAreEnabled()); /** @todo verify that interrupts are disabled. */
}
RTDECL(bool) RTThreadPreemptIsEnabled(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD);
    KIRQL Irql = KeGetCurrentIrql();
    if (Irql > APC_LEVEL)
        return false;
    if (!ASMIntAreEnabled())
        return false;
    return true;
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_VAR();
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = ASMIntDisableFlags();
#endif
        mutex_enter(&pThis->Mtx);

        /*
         * Solaris 10 doesn't preserve the interrupt flag, but since we're at PIL_MAX we should be
         * fine and not get interrupts while lock is held. Re-disable interrupts to not upset
         * assertions & assumptions callers might have.
         */
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ASMIntDisable();
#endif

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        Assert(!ASMIntAreEnabled());
#endif
        pThis->fIntSaved = fIntSaved;
    }
    else
    {
#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
        bool fIntsOn = ASMIntAreEnabled();
#endif

        mutex_enter(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
#endif
    }

    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
}
RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
{
    if (pch[cb] != '\0')
        AssertBreakpoint();
    if (    !g_frtSolarisSplSetsEIF
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ||  ASMIntAreEnabled()
#else
/* PORTME: Check if interrupts are enabled, if applicable. */
#endif
        )
        cmn_err(CE_CONT, pch);
    return;
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();

    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;
#endif
        mutex_exit(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ASMSetFlags(fIntSaved);
#endif
    }
    else
    {
#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
        bool fIntsOn = ASMIntAreEnabled();
#endif

        mutex_exit(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
#endif
    }

    RT_ASSERT_PREEMPT_CPUID();
}
Exemple #11
0
RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
{
    if (pch[cb] != '\0')
        AssertBreakpoint();
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))    /** @todo this will change when preemptions hook are implemented. */
        return;
    if (    !g_frtSolSplSetsEIF
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ||  ASMIntAreEnabled()
#else
/* PORTME: Check if interrupts are enabled, if applicable. */
#endif
        )
        cmn_err(CE_CONT, pch);
    return;
}
RTDECL(void) RTLogWriteDebugger(const char *pch, size_t cb)
{
    if (pch[cb] != '\0')
        AssertBreakpoint();

    /* cmn_err() acquires adaptive mutexes. Not preemption safe, see @bugref{6657}. */
    if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        return;

    if (    !g_frtSolSplSetsEIF
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ||  ASMIntAreEnabled()
#else
/* PORTME: Check if interrupts are enabled, if applicable. */
#endif
        )
    {
        cmn_err(CE_CONT, pch);
    }

    return;
}
DECLHIDDEN(int) rtR0InitNative(void)
{
    /*
     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
     */
    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
    if (RT_SUCCESS(rc))
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        /*
         * Detect whether spl*() is preserving the interrupt flag or not.
         * This is a problem on S10.
         */
        RTCCUINTREG uOldFlags = ASMIntDisableFlags();
        int iOld = splr(DISP_LEVEL);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        splx(iOld);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        ASMSetFlags(uOldFlags);
#else
        /* PORTME: See if the amd64/x86 problem applies to this architecture. */
#endif
        /*
         * Mandatory: Preemption offsets.
         */
        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
            goto errorbail;
        }
        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n",    g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n",  g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n",     g_offrtSolThreadId, g_offrtSolThreadId);
        cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n",    g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
        cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n",   g_offrtSolThreadLock, g_offrtSolThreadLock);
        cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n",   g_offrtSolThreadProc, g_offrtSolThreadProc);

        /*
         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            if (ncpus > IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
        }
        else
        {
            g_frtSolOldIPI = true;
            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
            {
                g_frtSolOldIPIUlong = true;
                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
            }
            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
                        IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
        }

        /*
         * Mandatory: Thread-context hooks.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx",  NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx   = (void *)removectx;
        }
        else
        {
            g_frtSolOldThreadCtx = true;
            g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx_old   = (void *)removectx;
        }

        /*
         * Optional: Timeout hooks.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
                                   (void **)&g_pfnrtR0Sol_timeout_generic);
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
                                   (void **)&g_pfnrtR0Sol_untimeout_generic);
        if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
        {
            static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
            bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
            cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
            g_pfnrtR0Sol_timeout_generic   = NULL;
            g_pfnrtR0Sol_untimeout_generic = NULL;
        }
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
                                   (void **)&g_pfnrtR0Sol_cyclic_reprogram);

        /*
         * Optional: Querying page no-relocation support.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
                                   (void **)&g_pfnrtR0Sol_page_noreloc_supported);

        /*
         * Weak binding failures: contig_free
         */
        if (g_pfnrtR0Sol_contig_free == NULL)
        {
            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
                                            (void **)&g_pfnrtR0Sol_contig_free);
            if (RT_FAILURE(rc))
            {
                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
                goto errorbail;
            }
        }

        g_frtSolInitDone = true;
        return VINF_SUCCESS;
    }
    else
    {
        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
        return rc;
    }

errorbail:
    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
    return rc;
}
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
/**
 * Common worker for the debug and normal APIs.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   rcBusy              The status code to return when we're in GC or R0
 *                              and the section is busy.
 */
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    Assert(pCritSect->s.Core.cNestings >= 0);

    /*
     * If the critical section has already been destroyed, then inform the caller.
     */
    AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
                    ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
                    VERR_SEM_DESTROYED);

    /*
     * See if we're lucky.
     */
    /* NOP ... */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return VINF_SUCCESS;

    RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    /* ... not owned ... */
    if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);

    /* ... or nested. */
    if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
    {
        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
        ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings > 1);
        return VINF_SUCCESS;
    }

    /*
     * Spin for a bit without incrementing the counter.
     */
    /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
     *        cpu systems. */
    int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
    while (cSpinsLeft-- > 0)
    {
        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        ASMNopPause();
        /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
           cli'ed pendingpreemption check up front using sti w/ instruction fusing
           for avoiding races. Hmm ... This is assuming the other party is actually
           executing code on another CPU ... which we could keep track of if we
           wanted. */
    }

#ifdef IN_RING3
    /*
     * Take the slow path.
     */
    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

#else
# ifdef IN_RING0
    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     *        and would be better off switching out of that while waiting for
     *        the lock.  Several of the locks jumps back to ring-3 just to
     *        get the lock, the ring-3 code will then call the kernel to do
     *        the lock wait and when the call return it will call ring-0
     *        again and resume via in setjmp style.  Not very efficient. */
#  if 0
    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
                             * callers not prepared for longjmp/blocking to
                             * use PDMCritSectTryEnter. */
    {
        /*
         * Leave HWACCM context while waiting if necessary.
         */
        int rc;
        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
        }
        else
        {
            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
            PVMCPU  pVCpu = VMMGetCpu(pVM);
            HWACCMR0Leave(pVM, pVCpu);
            RTThreadPreemptRestore(NIL_RTTHREAD, ????);

            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);

            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
            HWACCMR0Enter(pVM, pVCpu);
        }
        return rc;
    }
#  else
    /*
     * We preemption hasn't been disabled, we can block here in ring-0.
     */
    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
        && ASMIntAreEnabled())
        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
#  endif
#endif /* IN_RING0 */

    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);

    /*
     * Call ring-3 to acquire the critical section?
     */
    if (rcBusy == VINF_SUCCESS)
    {
        PVM     pVM   = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
        PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
        return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
    }

    /*
     * Return busy.
     */
    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    return rcBusy;
#endif /* !IN_RING3 */
}
Exemple #16
0
/**
 * Service request callback function.
 *
 * @returns VBox status code.
 * @param   pSession    The caller's session.
 * @param   u64Arg      64-bit integer argument.
 * @param   pReqHdr     The request header. Input / Output. Optional.
 */
DECLEXPORT(int) TSTR0ThreadPreemptionSrvReqHandler(PSUPDRVSESSION pSession, uint32_t uOperation,
                                                   uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr)
{
    NOREF(pSession);
    if (u64Arg)
        return VERR_INVALID_PARAMETER;
    if (!VALID_PTR(pReqHdr))
        return VERR_INVALID_PARAMETER;
    char   *pszErr = (char *)(pReqHdr + 1);
    size_t  cchErr = pReqHdr->cbReq - sizeof(*pReqHdr);
    if (cchErr < 32 || cchErr >= 0x10000)
        return VERR_INVALID_PARAMETER;
    *pszErr = '\0';

    /*
     * The big switch.
     */
    switch (uOperation)
    {
        case TSTR0THREADPREMEPTION_SANITY_OK:
            break;

        case TSTR0THREADPREMEPTION_SANITY_FAILURE:
            RTStrPrintf(pszErr, cchErr, "!42failure42%1024s", "");
            break;

        case TSTR0THREADPREMEPTION_BASIC:
        {
            if (!ASMIntAreEnabled())
                RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
            else if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false by default");
            else
            {
                RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
                RTThreadPreemptDisable(&State);
                if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable");
                else if (!ASMIntAreEnabled())
                    RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
                RTThreadPreemptRestore(&State);
            }
            break;
        }

        case TSTR0THREADPREMEPTION_IS_PENDING:
        {
            RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
            RTThreadPreemptDisable(&State);
            if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
            {
                if (ASMIntAreEnabled())
                {
                    uint64_t    u64StartTS    = RTTimeNanoTS();
                    uint64_t    u64StartSysTS = RTTimeSystemNanoTS();
                    uint64_t    cLoops        = 0;
                    uint64_t    cNanosSysElapsed;
                    uint64_t    cNanosElapsed;
                    bool        fPending;
                    do
                    {
                        fPending         = RTThreadPreemptIsPending(NIL_RTTHREAD);
                        cNanosElapsed    = RTTimeNanoTS()       - u64StartTS;
                        cNanosSysElapsed = RTTimeSystemNanoTS() - u64StartSysTS;
                        cLoops++;
                    } while (   !fPending
                             && cNanosElapsed    < UINT64_C(2)*1000U*1000U*1000U
                             && cNanosSysElapsed < UINT64_C(2)*1000U*1000U*1000U
                             && cLoops           < 100U*_1M);
                    if (!fPending)
                        RTStrPrintf(pszErr, cchErr, "!Preempt not pending after %'llu loops / %'llu ns / %'llu ns (sys)",
                                    cLoops, cNanosElapsed, cNanosSysElapsed);
                    else if (cLoops == 1)
                        RTStrPrintf(pszErr, cchErr, "!cLoops=1\n");
                    else
                        RTStrPrintf(pszErr, cchErr, "RTThreadPreemptIsPending returned true after %'llu loops / %'llu ns / %'llu ns (sys)",
                                    cLoops, cNanosElapsed, cNanosSysElapsed);
                }
                else
                    RTStrPrintf(pszErr, cchErr, "!Interrupts disabled");
            }
            else
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after RTThreadPreemptDisable");
            RTThreadPreemptRestore(&State);
            break;
        }

        case TSTR0THREADPREMEPTION_NESTED:
        {
            bool const fDefault = RTThreadPreemptIsEnabled(NIL_RTTHREAD);
            RTTHREADPREEMPTSTATE State1 = RTTHREADPREEMPTSTATE_INITIALIZER;
            RTThreadPreemptDisable(&State1);
            if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
            {
                RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
                RTThreadPreemptDisable(&State2);
                if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                {
                    RTTHREADPREEMPTSTATE State3 = RTTHREADPREEMPTSTATE_INITIALIZER;
                    RTThreadPreemptDisable(&State3);
                    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
                        RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 3rd RTThreadPreemptDisable");

                    RTThreadPreemptRestore(&State3);
                    if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr)
                        RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptRestore");
                }
                else
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptDisable");

                RTThreadPreemptRestore(&State2);
                if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) && !*pszErr)
                    RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 2nd RTThreadPreemptRestore");
            }
            else
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns true after 1st RTThreadPreemptDisable");
            RTThreadPreemptRestore(&State1);
            if (RTThreadPreemptIsEnabled(NIL_RTTHREAD) != fDefault && !*pszErr)
                RTStrPrintf(pszErr, cchErr, "!RTThreadPreemptIsEnabled returns false after 3rd RTThreadPreemptRestore");
            break;
        }

        default:
            RTStrPrintf(pszErr, cchErr, "!Unknown test #%d", uOperation);
            break;
    }

    /* The error indicator is the '!' in the message buffer. */
    return VINF_SUCCESS;
}
/**
 * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
 *
 * This will check the current context and see if it's usui
 *
 * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
 * @param   pState      Output structure.
 */
static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
{
#ifndef RT_OS_WINDOWS
    RTTHREADPREEMPTSTATE const StateInit = RTTHREADPREEMPTSTATE_INITIALIZER;
#endif
    int rc  = VINF_SUCCESS;

    /** @todo Later #1: When entering in interrupt context and we're not able to
     *        wake up threads from it, we could try switch the lock into pure
     *        spinlock mode. This would require that there are no other threads
     *        currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
     *        flag is set.
     *
     *        Later #2: Similarly, it is possible to turn on the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
     *        semaphore ownership at interrupt time. We might want to try delay the
     *        RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
     */

#ifdef RT_OS_WINDOWS
    /*
     * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
     */
    pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
    if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
        return VERR_SEM_BAD_CONTEXT;

    if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
        pState->fSpin = true;
    else
    {
        pState->fSpin = false;
        KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
        Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
    }

#elif defined(RT_OS_SOLARIS)
    /*
     * Solaris:  RTSemEventSignal will do bad stuff on S10 if interrupts are disabled.
     */
    if (!ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2)
    /*
     * OSes on which RTSemEventSignal can be called from any context.
     */
    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    if (RTThreadIsInInterrupt(NIL_RTTHREAD))
    {
        if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
            rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
        pState->fSpin = true;
    }
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);

#else /* PORTME: Check for context where we cannot wake up threads. */
    /*
     * Default: ASSUME thread can be woken up if interrupts are enabled and
     *          we're not in an interrupt context.
     *          ASSUME that we can go to sleep if preemption is enabled.
     */
    if (    RTThreadIsInInterrupt(NIL_RTTHREAD)
        ||  !ASMIntAreEnabled())
        return VERR_SEM_BAD_CONTEXT;

    pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    pState->PreemptState = StateInit;
    RTThreadPreemptDisable(&pState->PreemptState);
#endif

    /*
     * Disable interrupts if necessary.
     */
    pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
    if (pState->fValidFlags)
        pState->fSavedFlags = ASMIntDisableFlags();
    else
        pState->fSavedFlags = 0;

    return rc;
}
Exemple #18
0
/**
 * Creates the default logger instance for a VBox process.
 *
 * @returns Pointer to the logger instance.
 */
RTDECL(PRTLOGGER) RTLogDefaultInit(void)
{
    /*
     * Initialize the default logger instance.
     * Take care to do this once and not recursively.
     */
    static volatile uint32_t fInitializing = 0;
    PRTLOGGER pLogger;
    int rc;

    if (g_pLogger || !ASMAtomicCmpXchgU32(&fInitializing, 1, 0))
        return g_pLogger;

#ifdef IN_RING3
    /*
     * Assert the group definitions.
     */
#define ASSERT_LOG_GROUP(grp)  ASSERT_LOG_GROUP2(LOG_GROUP_##grp, #grp)
#define ASSERT_LOG_GROUP2(def, str) \
    do { if (strcmp(g_apszGroups[def], str)) {printf("%s='%s' expects '%s'\n", #def, g_apszGroups[def], str); RTAssertDoPanic(); } } while (0)
    ASSERT_LOG_GROUP(DEFAULT);
    ASSERT_LOG_GROUP(AUDIO_MIXER);
    ASSERT_LOG_GROUP(AUDIO_MIXER_BUFFER);
    ASSERT_LOG_GROUP(CFGM);
    ASSERT_LOG_GROUP(CPUM);
    ASSERT_LOG_GROUP(CSAM);
    ASSERT_LOG_GROUP(DBGC);
    ASSERT_LOG_GROUP(DBGF);
    ASSERT_LOG_GROUP(DBGF_INFO);
    ASSERT_LOG_GROUP(DEV);
    ASSERT_LOG_GROUP(DEV_AC97);
    ASSERT_LOG_GROUP(DEV_ACPI);
    ASSERT_LOG_GROUP(DEV_APIC);
    ASSERT_LOG_GROUP(DEV_FDC);
    ASSERT_LOG_GROUP(DEV_HDA);
    ASSERT_LOG_GROUP(DEV_HDA_CODEC);
    ASSERT_LOG_GROUP(DEV_HPET);
    ASSERT_LOG_GROUP(DEV_IDE);
    ASSERT_LOG_GROUP(DEV_KBD);
    ASSERT_LOG_GROUP(DEV_LPC);
    ASSERT_LOG_GROUP(DEV_NE2000);
    ASSERT_LOG_GROUP(DEV_PC);
    ASSERT_LOG_GROUP(DEV_PC_ARCH);
    ASSERT_LOG_GROUP(DEV_PC_BIOS);
    ASSERT_LOG_GROUP(DEV_PCI);
    ASSERT_LOG_GROUP(DEV_PCNET);
    ASSERT_LOG_GROUP(DEV_PIC);
    ASSERT_LOG_GROUP(DEV_PIT);
    ASSERT_LOG_GROUP(DEV_RTC);
    ASSERT_LOG_GROUP(DEV_SB16);
    ASSERT_LOG_GROUP(DEV_SERIAL);
    ASSERT_LOG_GROUP(DEV_SMC);
    ASSERT_LOG_GROUP(DEV_VGA);
    ASSERT_LOG_GROUP(DEV_VMM);
    ASSERT_LOG_GROUP(DEV_VMM_STDERR);
    ASSERT_LOG_GROUP(DIS);
    ASSERT_LOG_GROUP(DRV);
    ASSERT_LOG_GROUP(DRV_ACPI);
    ASSERT_LOG_GROUP(DRV_AUDIO);
    ASSERT_LOG_GROUP(DRV_BLOCK);
    ASSERT_LOG_GROUP(DRV_FLOPPY);
    ASSERT_LOG_GROUP(DRV_HOST_AUDIO);
    ASSERT_LOG_GROUP(DRV_HOST_DVD);
    ASSERT_LOG_GROUP(DRV_HOST_FLOPPY);
    ASSERT_LOG_GROUP(DRV_ISO);
    ASSERT_LOG_GROUP(DRV_KBD_QUEUE);
    ASSERT_LOG_GROUP(DRV_MOUSE_QUEUE);
    ASSERT_LOG_GROUP(DRV_NAT);
    ASSERT_LOG_GROUP(DRV_RAW_IMAGE);
    ASSERT_LOG_GROUP(DRV_TUN);
    ASSERT_LOG_GROUP(DRV_USBPROXY);
    ASSERT_LOG_GROUP(DRV_VBOXHDD);
    ASSERT_LOG_GROUP(DRV_VRDE_AUDIO);
    ASSERT_LOG_GROUP(DRV_VSWITCH);
    ASSERT_LOG_GROUP(DRV_VUSB);
    ASSERT_LOG_GROUP(EM);
    ASSERT_LOG_GROUP(GUI);
    ASSERT_LOG_GROUP(HGCM);
    ASSERT_LOG_GROUP(HM);
    ASSERT_LOG_GROUP(IOM);
    ASSERT_LOG_GROUP(LWIP);
    ASSERT_LOG_GROUP(MAIN);
    ASSERT_LOG_GROUP(MM);
    ASSERT_LOG_GROUP(MM_HEAP);
    ASSERT_LOG_GROUP(MM_HYPER);
    ASSERT_LOG_GROUP(MM_HYPER_HEAP);
    ASSERT_LOG_GROUP(MM_PHYS);
    ASSERT_LOG_GROUP(MM_POOL);
    ASSERT_LOG_GROUP(NAT_SERVICE);
    ASSERT_LOG_GROUP(NET_SERVICE);
    ASSERT_LOG_GROUP(PATM);
    ASSERT_LOG_GROUP(PDM);
    ASSERT_LOG_GROUP(PDM_DEVICE);
    ASSERT_LOG_GROUP(PDM_DRIVER);
    ASSERT_LOG_GROUP(PDM_LDR);
    ASSERT_LOG_GROUP(PDM_QUEUE);
    ASSERT_LOG_GROUP(PGM);
    ASSERT_LOG_GROUP(PGM_POOL);
    ASSERT_LOG_GROUP(REM);
    ASSERT_LOG_GROUP(REM_DISAS);
    ASSERT_LOG_GROUP(REM_HANDLER);
    ASSERT_LOG_GROUP(REM_IOPORT);
    ASSERT_LOG_GROUP(REM_MMIO);
    ASSERT_LOG_GROUP(REM_PRINTF);
    ASSERT_LOG_GROUP(REM_RUN);
    ASSERT_LOG_GROUP(SELM);
    ASSERT_LOG_GROUP(SSM);
    ASSERT_LOG_GROUP(STAM);
    ASSERT_LOG_GROUP(SUP);
    ASSERT_LOG_GROUP(TM);
    ASSERT_LOG_GROUP(TRPM);
    ASSERT_LOG_GROUP(VM);
    ASSERT_LOG_GROUP(VMM);
    ASSERT_LOG_GROUP(VRDP);
#undef ASSERT_LOG_GROUP
#undef ASSERT_LOG_GROUP2
#endif /* IN_RING3 */

    /*
     * Create the default logging instance.
     */
#ifdef IN_RING3
# ifndef IN_GUEST
    char szExecName[RTPATH_MAX];
    if (!RTProcGetExecutablePath(szExecName, sizeof(szExecName)))
        strcpy(szExecName, "VBox");
    RTTIMESPEC TimeSpec;
    RTTIME Time;
    RTTimeExplode(&Time, RTTimeNow(&TimeSpec));
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE,
                     "./%04d-%02d-%02d-%02d-%02d-%02d.%03d-%s-%d.log",
                     Time.i32Year, Time.u8Month, Time.u8MonthDay, Time.u8Hour, Time.u8Minute, Time.u8Second, Time.u32Nanosecond / 10000000,
                     RTPathFilename(szExecName), RTProcSelf());
    if (RT_SUCCESS(rc))
    {
        /*
         * Write a log header.
         */
        char szBuf[RTPATH_MAX];
        RTTimeSpecToString(&TimeSpec, szBuf, sizeof(szBuf));
        RTLogLoggerEx(pLogger, 0, ~0U, "Log created: %s\n", szBuf);
        RTLogLoggerEx(pLogger, 0, ~0U, "Executable: %s\n", szExecName);

        /* executable and arguments - tricky and all platform specific. */
#  if defined(RT_OS_WINDOWS)
        RTLogLoggerEx(pLogger, 0, ~0U, "Commandline: %ls\n", GetCommandLineW());

#  elif defined(RT_OS_SOLARIS)
        psinfo_t psi;
        char szArgFileBuf[80];
        RTStrPrintf(szArgFileBuf, sizeof(szArgFileBuf), "/proc/%ld/psinfo", (long)getpid());
        FILE* pFile = fopen(szArgFileBuf, "rb");
        if (pFile)
        {
            if (fread(&psi, sizeof(psi), 1, pFile) == 1)
            {
#   if 0     /* 100% safe:*/
                RTLogLoggerEx(pLogger, 0, ~0U, "Args: %s\n", psi.pr_psargs);
#   else     /* probably safe: */
                const char * const *argv = (const char * const *)psi.pr_argv;
                for (int iArg = 0; iArg < psi.pr_argc; iArg++)
                    RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%d]: %s\n", iArg, argv[iArg]);
#   endif

            }
            fclose(pFile);
        }

#  elif defined(RT_OS_LINUX)
        FILE *pFile = fopen("/proc/self/cmdline", "r");
        if (pFile)
        {
            /* braindead */
            unsigned iArg = 0;
            int ch;
            bool fNew = true;
            while (!feof(pFile) && (ch = fgetc(pFile)) != EOF)
            {
                if (fNew)
                {
                    RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: ", iArg++);
                    fNew = false;
                }
                if (ch)
                    RTLogLoggerEx(pLogger, 0, ~0U, "%c", ch);
                else
                {
                    RTLogLoggerEx(pLogger, 0, ~0U, "\n");
                    fNew = true;
                }
            }
            if (!fNew)
                RTLogLoggerEx(pLogger, 0, ~0U, "\n");
            fclose(pFile);
        }

#  elif defined(RT_OS_HAIKU)
        team_info info;
        if (get_team_info(0, &info) == B_OK)
        {
            /* there is an info.argc, but no way to know arg boundaries */
            RTLogLoggerEx(pLogger, 0, ~0U, "Commandline: %.64s\n", info.args);
        }

#  elif defined(RT_OS_FREEBSD)
        /* Retrieve the required length first */
        int aiName[4];
        aiName[0] = CTL_KERN;
        aiName[1] = KERN_PROC;
        aiName[2] = KERN_PROC_ARGS;     /* Introduced in FreeBSD 4.0 */
        aiName[3] = getpid();
        size_t cchArgs = 0;
        int rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), NULL, &cchArgs, NULL, 0);
        if (cchArgs > 0)
        {
            char *pszArgFileBuf = (char *)RTMemAllocZ(cchArgs + 1 /* Safety */);
            if (pszArgFileBuf)
            {
                /* Retrieve the argument list */
                rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), pszArgFileBuf, &cchArgs, NULL, 0);
                if (!rcBSD)
                {
                    unsigned    iArg = 0;
                    size_t      off = 0;
                    while (off < cchArgs)
                    {
                        size_t cchArg = strlen(&pszArgFileBuf[off]);
                        RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: %s\n", iArg, &pszArgFileBuf[off]);

                        /* advance */
                        off += cchArg + 1;
                        iArg++;
                    }
                }
                RTMemFree(pszArgFileBuf);
            }
        }

#  elif defined(RT_OS_OS2) || defined(RT_OS_DARWIN)
        /* commandline? */
#  else
#   error needs porting.
#  endif
    }

# else  /* IN_GUEST */
    /* The user destination is backdoor logging. */
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox.log");
# endif /* IN_GUEST */

#else /* IN_RING0 */

    /* Some platforms has trouble allocating memory with interrupts and/or
       preemption disabled. Check and fail before we panic. */
# if defined(RT_OS_DARWIN)
    if (   !ASMIntAreEnabled()
        || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
        return NULL;
# endif

# ifndef IN_GUEST
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE, "VBox-ring0.log");
# else  /* IN_GUEST */
    rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox-ring0.log");
# endif /* IN_GUEST */
    if (RT_SUCCESS(rc))
    {
        /*
         * This is where you set your ring-0 logging preferences.
         *
         * On platforms which don't differ between debugger and kernel
         * log printing, STDOUT is gonna be a stub and the DEBUGGER
         * destination is the one doing all the work. On platforms
         * that do differ (like Darwin), STDOUT is the kernel log.
         */
# if defined(DEBUG_bird)
        /*RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3");*/
        RTLogFlags(pLogger, "enabled unbuffered pid tid");
#  ifndef IN_GUEST
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
#  endif
# endif
# if defined(DEBUG_sandervl) && !defined(IN_GUEST)
        RTLogGroupSettings(pLogger, "+all");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_ramshankar)  /* Guest ring-0 as well */
        RTLogGroupSettings(pLogger, "+all.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_aleksey)  /* Guest ring-0 as well */
        //RTLogGroupSettings(pLogger, "net_flt_drv.e.l.f.l3.l4.l5 +net_adp_drv.e.l.f.l3.l4.l5");
        RTLogGroupSettings(pLogger, "net_flt_drv.e.l.f.l3.l4.l5.l6");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if defined(DEBUG_andy)  /* Guest ring-0 as well */
        RTLogGroupSettings(pLogger, "+all.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered pid tid");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if defined(DEBUG_misha) /* Guest ring-0 as well */
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER;
# endif
# if defined(DEBUG_michael) && defined(IN_GUEST)
        RTLogGroupSettings(pLogger, "+vga.e.l.f");
        RTLogFlags(pLogger, "enabled unbuffered");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
# if 0 /* vboxdrv logging - ATTENTION: this is what we're referring to guys! Change to '# if 1'. */
        RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3");
        RTLogFlags(pLogger, "enabled unbuffered tid");
        pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT;
# endif
    }
#endif /* IN_RING0 */
    return g_pLogger = RT_SUCCESS(rc) ? pLogger : NULL;
}