RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic));

    KIRQL SavedIrql = pThis->SavedIrql;
    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        RTCCUINTREG fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;

#ifndef RTSPINLOCK_NT_HACK_NOIRQ
        KeReleaseSpinLock(&pThis->Spinlock, SavedIrql);
        ASMSetFlags(fIntSaved);
#else
        Assert(pThis->u32Hack == RTSPINLOCK_NT_HACK_NOIRQ_TAKEN);

        ASMAtomicWriteU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_FREE);
        ASMSetFlags(fIntSaved);
        if (SavedIrql < DISPATCH_LEVEL)
            KeLowerIrql(SavedIrql);
#endif
    }
    else
        KeReleaseSpinLock(&pThis->Spinlock, SavedIrql);
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();

    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;
        if (ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
            ASMSetFlags(fIntSaved);
        else
            AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));
    }
    else
    {
        if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
            AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));
    }

    critical_exit();
}
Ejemplo n.º 3
0
RTDECL(void) RTThreadPreemptDisable(PRTTHREADPREEMPTSTATE pState)
{
    AssertPtr(pState);
    Assert(pState->u32Reserved == 0);
    pState->u32Reserved = 42;

    /*
     * Disable to prevent preemption while we grab the per-cpu spin lock.
     * Note! Only take the lock on the first call or we end up spinning for ever.
     */
    RTCCUINTREG fSavedFlags = ASMIntDisableFlags();
    RTCPUID     idCpu       = RTMpCpuId();
    if (RT_UNLIKELY(idCpu < RT_ELEMENTS(g_aPreemptHacks)))
    {
        Assert(g_aPreemptHacks[idCpu].cRecursion < UINT32_MAX / 2);
        if (++g_aPreemptHacks[idCpu].cRecursion == 1)
        {
            lck_spin_t *pSpinLock = g_aPreemptHacks[idCpu].pSpinLock;
            if (pSpinLock)
                lck_spin_lock(pSpinLock);
            else
                AssertFailed();
        }
    }
    ASMSetFlags(fSavedFlags);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    RT_ASSERT_PREEMPT_CPUID_DISABLE(pState);
}
Ejemplo n.º 4
0
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC,
              ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0));

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved   = 0;
#endif

        if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
            AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ASMSetFlags(fIntSaved);
#endif
    }
    else
    {
        if (!ASMAtomicCmpXchgU32(&pThis->fLocked, 0, 1))
            AssertMsgFailed(("Spinlock %p was not locked!\n", pThis));
    }
}
Ejemplo n.º 5
0
VOID EFIAPI
DebugPrint(IN UINTN ErrorLevel, IN CONST CHAR8 *Format, ...)
{
    CHAR8       szBuf[256];
    VA_LIST     va;
    UINTN       cch;
    RTCCUINTREG SavedFlags;

    /* No pool noise, please. */
    if (ErrorLevel == DEBUG_POOL)
        return;

    VA_START(va, Format);
    cch = AsciiVSPrint(szBuf, sizeof(szBuf), Format, va);
    VA_END(va);

    /* make sure it's terminated and doesn't end with a newline */
    if (cch >= sizeof(szBuf))
        cch = sizeof(szBuf) - 1;
    while (cch > 0 && (szBuf[cch - 1] == '\n' || szBuf[cch - 1] == '\r'))
        cch--;
    szBuf[cch] = '\0';

    SavedFlags = ASMIntDisableFlags();

    VBoxPrintString("dbg/");
    VBoxPrintHex(ErrorLevel, sizeof(ErrorLevel));
    VBoxPrintChar(' ');
    VBoxPrintString(szBuf);
    VBoxPrintChar('\n');

    ASMSetFlags(SavedFlags);

}
Ejemplo n.º 6
0
/**
 * Disables all host calls, except certain fatal ones.
 *
 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
 * @thread  EMT.
 */
VMMRZDECL(void) VMMRZCallRing3Disable(PVMCPU pVCpu)
{
    VMCPU_ASSERT_EMT(pVCpu);
#if defined(LOG_ENABLED) && defined(IN_RING0)
    RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
#endif

    Assert(pVCpu->vmm.s.cCallRing3Disabled < 16);
    if (ASMAtomicUoIncU32(&pVCpu->vmm.s.cCallRing3Disabled) == 1)
    {
        /** @todo it might make more sense to just disable logging here, then we
         * won't flush away important bits... but that goes both ways really. */
#ifdef IN_RC
        pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = true;
#else
# ifdef LOG_ENABLED
        if (pVCpu->vmm.s.pR0LoggerR0)
            pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
# endif
#endif
    }

#if defined(LOG_ENABLED) && defined(IN_RING0)
    ASMSetFlags(fFlags);
#endif
}
Ejemplo n.º 7
0
RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread)
{
    Assert(hThread == NIL_RTTHREAD); RT_NOREF1(hThread);

    /*
     * Read the globals and check if they are useful.
     */
/** @todo Should we check KPRCB.InterruptRequest and KPRCB.DpcInterruptRequested (older kernels).  */
    uint32_t const offQuantumEnd     = g_offrtNtPbQuantumEnd;
    uint32_t const cbQuantumEnd      = g_cbrtNtPbQuantumEnd;
    uint32_t const offDpcQueueDepth  = g_offrtNtPbDpcQueueDepth;
    if (!offQuantumEnd && !cbQuantumEnd && !offDpcQueueDepth)
        return false;
    Assert((offQuantumEnd && cbQuantumEnd) || (!offQuantumEnd && !cbQuantumEnd));

    /*
     * Disable interrupts so we won't be messed around.
     */
    bool            fPending;
    RTCCUINTREG     fSavedFlags  = ASMIntDisableFlags();

#ifdef RT_ARCH_X86
    PKPCR       pPcr   = (PKPCR)__readfsdword(RT_OFFSETOF(KPCR,SelfPcr));
    uint8_t    *pbPrcb = (uint8_t *)pPcr->Prcb;

#elif defined(RT_ARCH_AMD64)
    /* HACK ALERT! The offset is from windbg/vista64. */
    PKPCR       pPcr   = (PKPCR)__readgsqword(RT_OFFSETOF(KPCR,Self));
    uint8_t    *pbPrcb = (uint8_t *)pPcr->CurrentPrcb;

#else
# error "port me"
#endif

    /* Check QuantumEnd. */
    if (cbQuantumEnd == 1)
    {
        uint8_t volatile *pbQuantumEnd = (uint8_t volatile *)(pbPrcb + offQuantumEnd);
        fPending = *pbQuantumEnd == TRUE;
    }
    else if (cbQuantumEnd == sizeof(uint32_t))
    {
        uint32_t volatile *pu32QuantumEnd = (uint32_t volatile *)(pbPrcb + offQuantumEnd);
        fPending = *pu32QuantumEnd != 0;
    }
    else
        fPending = false;

    /* Check DpcQueueDepth. */
    if (    !fPending
        &&  offDpcQueueDepth)
    {
        uint32_t volatile *pu32DpcQueueDepth = (uint32_t volatile *)(pbPrcb + offDpcQueueDepth);
        fPending = *pu32DpcQueueDepth > 0;
    }

    ASMSetFlags(fSavedFlags);
    return fPending;
}
Ejemplo n.º 8
0
/**
 * Worker for supdrvOSMsrProberModify.
 */
static DECLCALLBACK(void) supdrvLnxMsrProberModifyOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
{
    PSUPMSRPROBER               pReq    = (PSUPMSRPROBER)pvUser1;
    register uint32_t           uMsr    = pReq->u.In.uMsr;
    bool const                  fFaster = pReq->u.In.enmOp == SUPMSRPROBEROP_MODIFY_FASTER;
    uint64_t                    uBefore;
    uint64_t                    uWritten;
    uint64_t                    uAfter;
    int                         rcBefore, rcWrite, rcAfter, rcRestore;
    RTCCUINTREG                 fOldFlags;

    /* Initialize result variables. */
    uBefore = uWritten = uAfter    = 0;
    rcWrite = rcAfter  = rcRestore = -EIO;

    /*
     * Do the job.
     */
    fOldFlags = ASMIntDisableFlags();
    ASMCompilerBarrier(); /* paranoia */
    if (!fFaster)
        ASMWriteBackAndInvalidateCaches();

    rcBefore = rdmsrl_safe(uMsr, &uBefore);
    if (rcBefore >= 0)
    {
        register uint64_t uRestore = uBefore;
        uWritten  = uRestore;
        uWritten &= pReq->u.In.uArgs.Modify.fAndMask;
        uWritten |= pReq->u.In.uArgs.Modify.fOrMask;

        rcWrite   = wrmsr_safe(uMsr, RT_LODWORD(uWritten), RT_HIDWORD(uWritten));
        rcAfter   = rdmsrl_safe(uMsr, &uAfter);
        rcRestore = wrmsr_safe(uMsr, RT_LODWORD(uRestore), RT_HIDWORD(uRestore));

        if (!fFaster)
        {
            ASMWriteBackAndInvalidateCaches();
            ASMReloadCR3();
            ASMNopPause();
        }
    }

    ASMCompilerBarrier(); /* paranoia */
    ASMSetFlags(fOldFlags);

    /*
     * Write out the results.
     */
    pReq->u.Out.uResults.Modify.uBefore    = uBefore;
    pReq->u.Out.uResults.Modify.uWritten   = uWritten;
    pReq->u.Out.uResults.Modify.uAfter     = uAfter;
    pReq->u.Out.uResults.Modify.fBeforeGp  = rcBefore  != 0;
    pReq->u.Out.uResults.Modify.fModifyGp  = rcWrite   != 0;
    pReq->u.Out.uResults.Modify.fAfterGp   = rcAfter   != 0;
    pReq->u.Out.uResults.Modify.fRestoreGp = rcRestore != 0;
    RT_ZERO(pReq->u.Out.uResults.Modify.afReserved);
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_VAR();
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        for (;;)
        {
            uint32_t fIntSaved = ASMIntDisableFlags();
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    pThis->fIntSaved = fIntSaved;
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            /* Enable interrupts while we sleep. */
            ASMSetFlags(fIntSaved);
            critical_exit();
            DELAY(1);
        }
    }
    else
    {
        for (;;)
        {
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            critical_exit();
            DELAY(1);
        }
    }
}
Ejemplo n.º 10
0
/**
 * Try undo the harm done by the init function.
 *
 * This may leave the system in an unstable state since we might have been
 * hijacking memory below 1MB that is in use by the kernel.
 */
void vmmR0TripleFaultHackTerm(void)
{
    /*
     * Restore overwritten memory.
     */
    if (   g_pvSavedLowCore
        && g_pbLowCore)
        memcpy(g_pbLowCore, g_pvSavedLowCore, PAGE_SIZE);

    if (g_pbPage0)
    {
        g_pbPage0[0x467+0] = RT_BYTE1(g_u32SavedVector);
        g_pbPage0[0x467+1] = RT_BYTE2(g_u32SavedVector);
        g_pbPage0[0x467+2] = RT_BYTE3(g_u32SavedVector);
        g_pbPage0[0x467+3] = RT_BYTE4(g_u32SavedVector);

        g_pbPage0[0x472+0] = RT_BYTE1(g_u16SavedCadIndicator);
        g_pbPage0[0x472+1] = RT_BYTE2(g_u16SavedCadIndicator);
    }

    /*
     * Fix the CMOS.
     */
    if (g_pvSavedLowCore)
    {
        uint32_t fSaved = ASMIntDisableFlags();

        ASMOutU8(0x70, 0x0f);
        ASMOutU8(0x71, 0x0a);

        ASMOutU8(0x70, 0x00);
        ASMInU8(0x71);

        ASMReloadCR3();
        ASMWriteBackAndInvalidateCaches();

        ASMSetFlags(fSaved);
    }

    /*
     * Release resources.
     */
    RTMemFree(g_pvSavedLowCore);
    g_pvSavedLowCore = NULL;

    RTR0MemObjFree(g_hMemLowCore, true /*fFreeMappings*/);
    g_hMemLowCore   = NIL_RTR0MEMOBJ;
    g_hMapLowCore   = NIL_RTR0MEMOBJ;
    g_pbLowCore     = NULL;
    g_HCPhysLowCore = NIL_RTHCPHYS;

    RTR0MemObjFree(g_hMemPage0, true /*fFreeMappings*/);
    g_hMemPage0     = NIL_RTR0MEMOBJ;
    g_hMapPage0     = NIL_RTR0MEMOBJ;
    g_pbPage0       = NULL;
}
Ejemplo n.º 11
0
/**
 * Internal worker for getting the GIP CPU array index for the calling CPU.
 *
 * @returns Index into SUPGLOBALINFOPAGE::aCPUs or UINT16_MAX.
 * @param   pGip    The GIP.
 */
DECLINLINE(uint16_t) supGetGipCpuIndex(PSUPGLOBALINFOPAGE pGip)
{
    uint16_t iGipCpu;
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very fast. */
        uint16_t cbLim = ASMGetIdtrLimit();
        uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us what need need and more. */
        uint32_t iCpuSet;
        ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction. */
        uint8_t idApic = ASMGetApicId();
        iGipCpu = pGip->aiCpuFromApicId[idApic];
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId() (disables cli to avoid host OS assertions about unsafe CPU number usage). */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif
    return iGipCpu;
}
Ejemplo n.º 12
0
VOID EFIAPI
DebugAssert(IN CONST CHAR8 *FileName, IN UINTN LineNumber, IN CONST CHAR8 *Description)
{
    RTCCUINTREG SavedFlags = ASMIntDisableFlags();

    VBoxPrintString("EFI Assertion failed! File=");
    VBoxPrintString(FileName ? FileName : "<NULL>");
    VBoxPrintString(" line=0x");
    VBoxPrintHex(LineNumber, sizeof(LineNumber));
    VBoxPrintString("\nDescription: ");
    VBoxPrintString(Description ? Description : "<NULL>");

    ASMOutU8(EFI_PANIC_PORT, 2); /** @todo fix this. */

    ASMSetFlags(SavedFlags);
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;
        lck_spin_unlock(pThis->pSpinLock);
        ASMSetFlags(fIntSaved);
    }
    else
        lck_spin_unlock(pThis->pSpinLock);
}
Ejemplo n.º 14
0
VOID EFIAPI
DebugAssert(IN CONST CHAR8 *FileName, IN UINTN LineNumber, IN CONST CHAR8 *Description)
{
    RTCCUINTREG SavedFlags = ASMIntDisableFlags();

    ASMOutU8(EFI_PANIC_PORT, EFI_PANIC_CMD_START_MSG);
    VBoxPanicMsgString("EFI Assertion failed!"
                       "\nFile:  ");
    VBoxPanicMsgString(FileName ? FileName : "<NULL>");
    VBoxPanicMsgString("\nLine:  ");
    VBoxPanicMsgDecimalU32((uint32_t)LineNumber);
    VBoxPanicMsgString("\nEDescription: ");
    VBoxPanicMsgString(Description ? Description : "<NULL>");
    ASMOutU8(EFI_PANIC_PORT, EFI_PANIC_CMD_END_MSG);

    ASMSetFlags(SavedFlags);
}
/**
 * Helper for RTSemSpinMutexTryRequest, RTSemSpinMutexRequest and
 * RTSemSpinMutexRelease.
 *
 * @param  pState
 */
DECL_FORCE_INLINE(void) rtSemSpinMutexLeave(RTSEMSPINMUTEXSTATE *pState)
{
    /*
     * Restore the interrupt flag.
     */
    if (pState->fValidFlags)
        ASMSetFlags(pState->fSavedFlags);

#ifdef RT_OS_WINDOWS
    /*
     * NT: Lower the IRQL if we raised it.
     */
    if (pState->PreemptState.uchOldIrql < DISPATCH_LEVEL)
        KeLowerIrql(pState->PreemptState.uchOldIrql);
#else
    /*
     * Default: Restore preemption.
     */
    RTThreadPreemptRestore(&pState->PreemptState);
#endif
}
Ejemplo n.º 16
0
/**
 * Our own log worker function, avoid the dbg/00000xxx prefix and makes it clear
 * which log statements we added..
 *
 * @param   pszFormat           Format string. EFI style!
 * @param   ...                 Argument referneced in the format string.
 */
VOID EFIAPI
VBoxLogWorker(const char *pszFormat, ...)
{
    CHAR8       szBuf[384];
    VA_LIST     va;
    RTCCUINTREG SavedFlags;

    /* Format it. */
    VA_START(va, pszFormat);
    AsciiVSPrint(szBuf, sizeof(szBuf), pszFormat, va);
    VA_END(va);
    szBuf[sizeof(szBuf) - 1] = '\0';

    /* Output the log string. */
    SavedFlags = ASMIntDisableFlags();

    VBoxPrintString(szBuf);
    VBoxPrintChar('\n');

    ASMSetFlags(SavedFlags);
}
/**
 * Hook function for the thread-preempting event.
 *
 * @param   pPreemptNotifier    Pointer to the preempt_notifier struct.
 * @param   pNext               Pointer to the task that is preempting the
 *                              current thread.
 *
 * @remarks Called with the rq (runqueue) lock held and with preemption and
 *          interrupts disabled!
 */
static void rtThreadCtxHooksLnxSchedOut(struct preempt_notifier *pPreemptNotifier, struct task_struct *pNext)
{
    PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier);
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    RTCCUINTREG fSavedEFlags = ASMGetFlags();
    stac();
#endif

    AssertPtr(pThis);
    AssertPtr(pThis->pfnThreadCtxHook);
    Assert(pThis->fRegistered);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_PREEMPTING, pThis->pvUser);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    ASMSetFlags(fSavedEFlags);
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
    pThis->fSavedRFlags = fSavedEFlags;
# endif
#endif
}
/**
 * Hook function for the thread-resumed event.
 *
 * @param   pPreemptNotifier    Pointer to the preempt_notifier struct.
 * @param   iCpu                The CPU this thread is scheduled on.
 *
 * @remarks Called without holding the rq (runqueue) lock and with preemption
 *          enabled!
 */
static void rtThreadCtxHooksLnxSchedIn(struct preempt_notifier *pPreemptNotifier, int iCpu)
{
    PRTTHREADCTXINT pThis = RT_FROM_MEMBER(pPreemptNotifier, RTTHREADCTXINT, hPreemptNotifier);
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    RTCCUINTREG fSavedEFlags = ASMGetFlags();
    stac();
#endif

    AssertPtr(pThis);
    AssertPtr(pThis->pfnThreadCtxHook);
    Assert(pThis->fRegistered);

    pThis->pfnThreadCtxHook(RTTHREADCTXEVENT_RESUMED, pThis->pvUser);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 19) && defined(RT_ARCH_AMD64)
    fSavedEFlags &= ~RT_BIT_64(18) /*X86_EFL_AC*/;
    fSavedEFlags |= pThis->fSavedRFlags & RT_BIT_64(18) /*X86_EFL_AC*/;
# endif
    ASMSetFlags(fSavedEFlags);
#endif
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE_VARS();

    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);
    RT_ASSERT_PREEMPT_CPUID_SPIN_RELEASE(pThis);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = pThis->fIntSaved;
        pThis->fIntSaved = 0;
#endif
        mutex_exit(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        ASMSetFlags(fIntSaved);
#endif
    }
    else
    {
#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
        bool fIntsOn = ASMIntAreEnabled();
#endif

        mutex_exit(&pThis->Mtx);

#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn));
#endif
    }

    RT_ASSERT_PREEMPT_CPUID();
}
Ejemplo n.º 20
0
/**
 * Counters VMMRZCallRing3Disable() and re-enables host calls.
 *
 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
 * @thread  EMT.
 */
VMMRZDECL(void) VMMRZCallRing3Enable(PVMCPU pVCpu)
{
    VMCPU_ASSERT_EMT(pVCpu);
#if defined(LOG_ENABLED) && defined(IN_RING0)
    RTCCUINTREG fFlags = ASMIntDisableFlags(); /* preemption consistency. */
#endif

    Assert(pVCpu->vmm.s.cCallRing3Disabled > 0);
    if (ASMAtomicUoDecU32(&pVCpu->vmm.s.cCallRing3Disabled) == 0)
    {
#ifdef IN_RC
        pVCpu->pVMRC->vmm.s.fRCLoggerFlushingDisabled = false;
#else
# ifdef LOG_ENABLED
        if (pVCpu->vmm.s.pR0LoggerR0)
            pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
# endif
#endif
    }

#if defined(LOG_ENABLED) && defined(IN_RING0)
    ASMSetFlags(fFlags);
#endif
}
Ejemplo n.º 21
0
DECLHIDDEN(int) rtR0InitNative(void)
{
    /*
     * IPRT has not yet been initialized at this point, so use Solaris' native cmn_err() for logging.
     */
    int rc = RTR0DbgKrnlInfoOpen(&g_hKrnlDbgInfo, 0 /* fFlags */);
    if (RT_SUCCESS(rc))
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        /*
         * Detect whether spl*() is preserving the interrupt flag or not.
         * This is a problem on S10.
         */
        RTCCUINTREG uOldFlags = ASMIntDisableFlags();
        int iOld = splr(DISP_LEVEL);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        splx(iOld);
        if (ASMIntAreEnabled())
            g_frtSolSplSetsEIF = true;
        ASMSetFlags(uOldFlags);
#else
        /* PORTME: See if the amd64/x86 problem applies to this architecture. */
#endif
        /*
         * Mandatory: Preemption offsets.
         */
        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_runrun", &g_offrtSolCpuPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_runrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "cpu_t", "cpu_kprunrun", &g_offrtSolCpuForceKernelPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find cpu_t::cpu_kprunrun!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_preempt", &g_offrtSolThreadPreempt);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_preempt!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_did", &g_offrtSolThreadId);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_did!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_intr", &g_offrtSolThreadIntrThread);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_intr!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_lockp", &g_offrtSolThreadLock);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_lockp!\n");
            goto errorbail;
        }

        rc = RTR0DbgKrnlInfoQueryMember(g_hKrnlDbgInfo, NULL, "kthread_t", "t_procp", &g_offrtSolThreadProc);
        if (RT_FAILURE(rc))
        {
            cmn_err(CE_NOTE, "Failed to find kthread_t::t_procp!\n");
            goto errorbail;
        }
        cmn_err(CE_CONT, "!cpu_t::cpu_runrun @ 0x%lx (%ld)\n",    g_offrtSolCpuPreempt, g_offrtSolCpuPreempt);
        cmn_err(CE_CONT, "!cpu_t::cpu_kprunrun @ 0x%lx (%ld)\n",  g_offrtSolCpuForceKernelPreempt, g_offrtSolCpuForceKernelPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_preempt @ 0x%lx (%ld)\n", g_offrtSolThreadPreempt, g_offrtSolThreadPreempt);
        cmn_err(CE_CONT, "!kthread_t::t_did @ 0x%lx (%ld)\n",     g_offrtSolThreadId, g_offrtSolThreadId);
        cmn_err(CE_CONT, "!kthread_t::t_intr @ 0x%lx (%ld)\n",    g_offrtSolThreadIntrThread, g_offrtSolThreadIntrThread);
        cmn_err(CE_CONT, "!kthread_t::t_lockp @ 0x%lx (%ld)\n",   g_offrtSolThreadLock, g_offrtSolThreadLock);
        cmn_err(CE_CONT, "!kthread_t::t_procp @ 0x%lx (%ld)\n",   g_offrtSolThreadProc, g_offrtSolThreadProc);

        /*
         * Mandatory: CPU cross call infrastructure. Refer the-solaris-kernel.h for details.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "xc_init_cpu", NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            if (ncpus > IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: CPU count mismatch! ncpus=%d IPRT_SOL_NCPUS=%d\n", ncpus, IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
            g_rtSolXcCall.u.pfnSol_xc_call = (void *)xc_call;
        }
        else
        {
            g_frtSolOldIPI = true;
            g_rtSolXcCall.u.pfnSol_xc_call_old = (void *)xc_call;
            if (max_cpuid + 1 == sizeof(ulong_t) * 8)
            {
                g_frtSolOldIPIUlong = true;
                g_rtSolXcCall.u.pfnSol_xc_call_old_ulong = (void *)xc_call;
            }
            else if (max_cpuid + 1 != IPRT_SOL_NCPUS)
            {
                cmn_err(CE_NOTE, "rtR0InitNative: cpuset_t size mismatch! max_cpuid=%d IPRT_SOL_NCPUS=%d\n", max_cpuid,
                        IPRT_SOL_NCPUS);
                rc = VERR_NOT_SUPPORTED;
                goto errorbail;
            }
        }

        /*
         * Mandatory: Thread-context hooks.
         */
        rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "exitctx",  NULL /* ppvSymbol */);
        if (RT_SUCCESS(rc))
        {
            g_rtSolThreadCtx.Install.pfnSol_installctx = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx   = (void *)removectx;
        }
        else
        {
            g_frtSolOldThreadCtx = true;
            g_rtSolThreadCtx.Install.pfnSol_installctx_old = (void *)installctx;
            g_rtSolThreadCtx.Remove.pfnSol_removectx_old   = (void *)removectx;
        }

        /*
         * Optional: Timeout hooks.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "timeout_generic",
                                   (void **)&g_pfnrtR0Sol_timeout_generic);
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "untimeout_generic",
                                   (void **)&g_pfnrtR0Sol_untimeout_generic);
        if ((g_pfnrtR0Sol_timeout_generic == NULL) != (g_pfnrtR0Sol_untimeout_generic == NULL))
        {
            static const char *s_apszFn[2] = { "timeout_generic", "untimeout_generic" };
            bool iMissingFn = g_pfnrtR0Sol_timeout_generic == NULL;
            cmn_err(CE_NOTE, "rtR0InitNative: Weird! Found %s but not %s!\n", s_apszFn[!iMissingFn], s_apszFn[iMissingFn]);
            g_pfnrtR0Sol_timeout_generic   = NULL;
            g_pfnrtR0Sol_untimeout_generic = NULL;
        }
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "cyclic_reprogram",
                                   (void **)&g_pfnrtR0Sol_cyclic_reprogram);

        /*
         * Optional: Querying page no-relocation support.
         */
        RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /*pszModule */, "page_noreloc_supported",
                                   (void **)&g_pfnrtR0Sol_page_noreloc_supported);

        /*
         * Weak binding failures: contig_free
         */
        if (g_pfnrtR0Sol_contig_free == NULL)
        {
            rc = RTR0DbgKrnlInfoQuerySymbol(g_hKrnlDbgInfo, NULL /* pszModule */, "contig_free",
                                            (void **)&g_pfnrtR0Sol_contig_free);
            if (RT_FAILURE(rc))
            {
                cmn_err(CE_NOTE, "rtR0InitNative: failed to find contig_free!\n");
                goto errorbail;
            }
        }

        g_frtSolInitDone = true;
        return VINF_SUCCESS;
    }
    else
    {
        cmn_err(CE_NOTE, "RTR0DbgKrnlInfoOpen failed. rc=%d\n", rc);
        return rc;
    }

errorbail:
    RTR0DbgKrnlInfoRelease(g_hKrnlDbgInfo);
    return rc;
}
Ejemplo n.º 22
0
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC,
              ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0));

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
        uint32_t fIntSaved = ASMGetFlags();
#endif

#if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP
        for (;;)
        {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
            ASMIntDisable();
#endif
            for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
                    pThis->fIntSaved = fIntSaved;
# endif
                    return;
                }
                ASMNopPause();
            }
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
            ASMSetFlags(fIntSaved);
#endif
            RTThreadYield();
        }
#else
        for (;;)
        {
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
            ASMIntDisable();
#endif
            if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
            {
# if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
                pThis->fIntSaved = fIntSaved;
# endif
                return;
            }
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
            ASMSetFlags(fIntSaved);
#endif
            ASMNopPause();
        }
#endif
    }
    else
    {
#if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP
        for (;;)
        {
            for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                    return;
                ASMNopPause();
            }
            RTThreadYield();
        }
#else
        while (!ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
            ASMNopPause();
#endif
    }
}
Ejemplo n.º 23
0
static int VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg)
#endif
{
    PSUPDRVSESSION pSession = (PSUPDRVSESSION)pFilp->private_data;
    int rc;
#if defined(VBOX_STRICT) || defined(VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV)
    RTCCUINTREG fSavedEfl;

    /*
     * Refuse all I/O control calls if we've ever detected EFLAGS.AC being cleared.
     *
     * This isn't a problem, as there is absolutely nothing in the kernel context that
     * depend on user context triggering cleanups.  That would be pretty wild, right?
     */
    if (RT_UNLIKELY(g_DevExt.cBadContextCalls > 0))
    {
        SUPR0Printf("VBoxDrvLinuxIOCtl: EFLAGS.AC=0 detected %u times, refusing all I/O controls!\n", g_DevExt.cBadContextCalls);
        return ESPIPE;
    }

    fSavedEfl = ASMAddFlags(X86_EFL_AC);
# else
    stac();
# endif

    /*
     * Deal with the two high-speed IOCtl that takes it's arguments from
     * the session and iCmd, and only returns a VBox status code.
     */
#ifdef HAVE_UNLOCKED_IOCTL
    if (RT_LIKELY(   (   uCmd == SUP_IOCTL_FAST_DO_RAW_RUN
                      || uCmd == SUP_IOCTL_FAST_DO_HM_RUN
                      || uCmd == SUP_IOCTL_FAST_DO_NOP)
                  && pSession->fUnrestricted == true))
        rc = supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, pSession);
    else
        rc = VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg, pSession);
#else   /* !HAVE_UNLOCKED_IOCTL */
    unlock_kernel();
    if (RT_LIKELY(   (   uCmd == SUP_IOCTL_FAST_DO_RAW_RUN
                      || uCmd == SUP_IOCTL_FAST_DO_HM_RUN
                      || uCmd == SUP_IOCTL_FAST_DO_NOP)
                  && pSession->fUnrestricted == true))
        rc = supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, pSession);
    else
        rc = VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg, pSession);
    lock_kernel();
#endif  /* !HAVE_UNLOCKED_IOCTL */

#if defined(VBOX_STRICT) || defined(VBOX_WITH_EFLAGS_AC_SET_IN_VBOXDRV)
    /*
     * Before we restore AC and the rest of EFLAGS, check if the IOCtl handler code
     * accidentially modified it or some other important flag.
     */
    if (RT_UNLIKELY(   (ASMGetFlags() & (X86_EFL_AC | X86_EFL_IF | X86_EFL_DF | X86_EFL_IOPL))
                    != ((fSavedEfl    & (X86_EFL_AC | X86_EFL_IF | X86_EFL_DF | X86_EFL_IOPL)) | X86_EFL_AC) ))
    {
        char szTmp[48];
        RTStrPrintf(szTmp, sizeof(szTmp), "uCmd=%#x: %#x->%#x!", _IOC_NR(uCmd), (uint32_t)fSavedEfl, (uint32_t)ASMGetFlags());
        supdrvBadContext(&g_DevExt, "SUPDrv-linux.c",  __LINE__, szTmp);
    }
    ASMSetFlags(fSavedEfl);
#else
    clac();
#endif
    return rc;
}
/**
 * MSR write handler for KVM.
 *
 * @returns Strict VBox status code like CPUMSetGuestMsr().
 * @retval  VINF_CPUM_R3_MSR_WRITE
 * @retval  VERR_CPUM_RAISE_GP_0
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   idMsr       The MSR being written.
 * @param   pRange      The range this MSR belongs to.
 * @param   uRawValue   The raw value with the ignored bits not masked.
 */
VMM_INT_DECL(VBOXSTRICTRC) gimKvmWriteMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uRawValue)
{
    NOREF(pRange);
    PVM     pVM  = pVCpu->CTX_SUFF(pVM);
    PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
    PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;

    switch (idMsr)
    {
        case MSR_GIM_KVM_SYSTEM_TIME:
        case MSR_GIM_KVM_SYSTEM_TIME_OLD:
        {
            bool fEnable = RT_BOOL(uRawValue & MSR_GIM_KVM_SYSTEM_TIME_ENABLE_BIT);
#ifdef IN_RING0
            gimR0KvmUpdateSystemTime(pVM, pVCpu);
            return VINF_CPUM_R3_MSR_WRITE;
#elif defined(IN_RC)
            Assert(pVM->cCpus == 1);
            if (fEnable)
            {
                RTCCUINTREG fEFlags  = ASMIntDisableFlags();
                pKvmCpu->uTsc        = TMCpuTickGetNoCheck(pVCpu) | UINT64_C(1);
                pKvmCpu->uVirtNanoTS = TMVirtualGetNoCheck(pVM)   | UINT64_C(1);
                ASMSetFlags(fEFlags);
            }
            return VINF_CPUM_R3_MSR_WRITE;
#else /* IN_RING3 */
            if (!fEnable)
            {
                gimR3KvmDisableSystemTime(pVM);
                pKvmCpu->u64SystemTimeMsr = uRawValue;
                return VINF_SUCCESS;
            }

            /* Is the system-time struct. already enabled? If so, get flags that need preserving. */
            uint8_t fFlags = 0;
            GIMKVMSYSTEMTIME SystemTime;
            RT_ZERO(SystemTime);
            if (   MSR_GIM_KVM_SYSTEM_TIME_IS_ENABLED(pKvmCpu->u64SystemTimeMsr)
                && MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue) == pKvmCpu->GCPhysSystemTime)
            {
                int rc2 = PGMPhysSimpleReadGCPhys(pVM, &SystemTime, pKvmCpu->GCPhysSystemTime, sizeof(GIMKVMSYSTEMTIME));
                if (RT_SUCCESS(rc2))
                    pKvmCpu->fSystemTimeFlags = (SystemTime.fFlags & GIM_KVM_SYSTEM_TIME_FLAGS_GUEST_PAUSED);
            }

            /* Enable and populate the system-time struct. */
            pKvmCpu->u64SystemTimeMsr      = uRawValue;
            pKvmCpu->GCPhysSystemTime      = MSR_GIM_KVM_SYSTEM_TIME_GUEST_GPA(uRawValue);
            pKvmCpu->u32SystemTimeVersion += 2;
            int rc = gimR3KvmEnableSystemTime(pVM, pVCpu);
            if (RT_FAILURE(rc))
            {
                pKvmCpu->u64SystemTimeMsr = 0;
                return VERR_CPUM_RAISE_GP_0;
            }
            return VINF_SUCCESS;
#endif
        }

        case MSR_GIM_KVM_WALL_CLOCK:
        case MSR_GIM_KVM_WALL_CLOCK_OLD:
        {
#ifndef IN_RING3
            return VINF_CPUM_R3_MSR_WRITE;
#else
            /* Enable the wall-clock struct. */
            RTGCPHYS GCPhysWallClock = MSR_GIM_KVM_WALL_CLOCK_GUEST_GPA(uRawValue);
            if (RT_LIKELY(RT_ALIGN_64(GCPhysWallClock, 4) == GCPhysWallClock))
            {
                int rc = gimR3KvmEnableWallClock(pVM, GCPhysWallClock);
                if (RT_SUCCESS(rc))
                {
                    pKvm->u64WallClockMsr = uRawValue;
                    return VINF_SUCCESS;
                }
            }
            return VERR_CPUM_RAISE_GP_0;
#endif /* IN_RING3 */
        }

        default:
        {
#ifdef IN_RING3
            static uint32_t s_cTimes = 0;
            if (s_cTimes++ < 20)
                LogRel(("GIM: KVM: Unknown/invalid WrMsr (%#x,%#x`%08x) -> #GP(0)\n", idMsr,
                        uRawValue & UINT64_C(0xffffffff00000000), uRawValue & UINT64_C(0xffffffff)));
#endif
            LogFunc(("Unknown/invalid WrMsr (%#RX32,%#RX64) -> #GP(0)\n", idMsr, uRawValue));
            break;
        }
    }

    return VERR_CPUM_RAISE_GP_0;
}
Ejemplo n.º 25
0
/**
 * The slow case for SUPReadTsc where we need to apply deltas.
 *
 * Must only be called when deltas are applicable, so please do not call it
 * directly.
 *
 * @returns TSC with delta applied.
 * @param   pGip        Pointer to the GIP.
 *
 * @remarks May be called with interrupts disabled in ring-0!  This is why the
 *          ring-0 code doesn't attempt to figure the delta.
 *
 * @internal
 */
SUPDECL(uint64_t) SUPReadTscWithDelta(PSUPGLOBALINFOPAGE pGip)
{
    uint64_t            uTsc;
    uint16_t            iGipCpu;
    AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS));
    AssertCompile(RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx) >= RTCPUSET_MAX_CPUS);
    Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO);

    /*
     * Read the TSC and get the corresponding aCPUs index.
     */
#ifdef IN_RING3
    if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
    {
        /* RDTSCP gives us all we need, no loops/cli. */
        uint32_t iCpuSet;
        uTsc      = ASMReadTscWithAux(&iCpuSet);
        iCpuSet  &= RTCPUSET_MAX_CPUS - 1;
        iGipCpu   = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    }
    else if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
    {
        /* Storing the IDTR is normally very quick, but we need to loop. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint16_t cbLim = ASMGetIdtrLimit();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetIdtrLimit() == cbLim))
            {
                uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8);
                iCpuSet &= RTCPUSET_MAX_CPUS - 1;
                iGipCpu  = pGip->aiCpuFromCpuSetIdx[iCpuSet];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
    else
    {
        /* Get APIC ID via the slow CPUID instruction, requires looping. */
        uint32_t cTries = 0;
        for (;;)
        {
            uint8_t idApic = ASMGetApicId();
            uTsc = ASMReadTSC();
            if (RT_LIKELY(ASMGetApicId() == idApic))
            {
                iGipCpu = pGip->aiCpuFromApicId[idApic];
                break;
            }
            if (cTries >= 16)
            {
                iGipCpu = UINT16_MAX;
                break;
            }
            cTries++;
        }
    }
#elif defined(IN_RING0)
    /* Ring-0: Use use RTMpCpuId(), no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags();
    int         iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId());
    if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);

# elif defined(IN_RC)
    /* Raw-mode context: We can get the host CPU set index via VMCPU, no loops. */
    RTCCUINTREG uFlags  = ASMIntDisableFlags(); /* Are already disable, but play safe. */
    uint32_t    iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet;
    if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)))
        iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet];
    else
        iGipCpu = UINT16_MAX;
    uTsc = ASMReadTSC();
    ASMSetFlags(uFlags);
#else
# error "IN_RING3, IN_RC or IN_RING0 must be defined!"
#endif

    /*
     * If the delta is valid, apply it.
     */
    if (RT_LIKELY(iGipCpu < pGip->cCpus))
    {
        int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta;
        if (RT_LIKELY(iTscDelta != INT64_MAX))
            return uTsc - iTscDelta;

# ifdef IN_RING3
        /*
         * The delta needs calculating, call supdrv to get the TSC.
         */
        int rc = SUPR3ReadTsc(&uTsc, NULL);
        if (RT_SUCCESS(rc))
            return uTsc;
        AssertMsgFailed(("SUPR3ReadTsc -> %Rrc\n", rc));
        uTsc = ASMReadTSC();
# endif /* IN_RING3 */
    }

    /*
     * This shouldn't happen, especially not in ring-3 and raw-mode context.
     * But if it does, return something that's half useful.
     */
    AssertMsgFailed(("iGipCpu=%d (%#x) cCpus=%d fGetGipCpu=%#x\n", iGipCpu, iGipCpu, pGip->cCpus, pGip->fGetGipCpu));
    return uTsc;
}
Ejemplo n.º 26
0
/**
 * Initalizes the triple fault / boot hack.
 *
 * Always call vmmR0TripleFaultHackTerm to clean up, even when this call fails.
 *
 * @returns VBox status code.
 */
int vmmR0TripleFaultHackInit(void)
{
    /*
     * Map the first page.
     */
    int rc = RTR0MemObjEnterPhys(&g_hMemPage0, 0, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
    AssertRCReturn(rc, rc);
    rc = RTR0MemObjMapKernel(&g_hMapPage0, g_hMemPage0, (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    AssertRCReturn(rc, rc);
    g_pbPage0 = (uint8_t *)RTR0MemObjAddress(g_hMapPage0);
    LogRel(("0040:0067 = %04x:%04x\n", RT_MAKE_U16(g_pbPage0[0x467+2],  g_pbPage0[0x467+3]),  RT_MAKE_U16(g_pbPage0[0x467+0],  g_pbPage0[0x467+1]) ));

    /*
     * Allocate some "low core" memory.  If that fails, just grab some memory.
     */
    //rc = RTR0MemObjAllocPhys(&g_hMemLowCore, PAGE_SIZE, _1M - 1);
    //__debugbreak();
    rc = RTR0MemObjEnterPhys(&g_hMemLowCore, 0x7000, PAGE_SIZE, RTMEM_CACHE_POLICY_DONT_CARE);
    AssertRCReturn(rc, rc);
    rc = RTR0MemObjMapKernel(&g_hMapLowCore, g_hMemLowCore, (void *)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
    AssertRCReturn(rc, rc);
    g_pbLowCore = (uint8_t *)RTR0MemObjAddress(g_hMapLowCore);
    g_HCPhysLowCore = RTR0MemObjGetPagePhysAddr(g_hMapLowCore, 0);
    LogRel(("Low core at %RHp mapped at %p\n", g_HCPhysLowCore, g_pbLowCore));

    /*
     * Save memory we'll be overwriting.
     */
    g_pvSavedLowCore = RTMemAlloc(PAGE_SIZE);
    AssertReturn(g_pvSavedLowCore, VERR_NO_MEMORY);
    memcpy(g_pvSavedLowCore, g_pbLowCore, PAGE_SIZE);

    g_u32SavedVector = RT_MAKE_U32_FROM_U8(g_pbPage0[0x467], g_pbPage0[0x467+1], g_pbPage0[0x467+2], g_pbPage0[0x467+3]);
    g_u16SavedCadIndicator = RT_MAKE_U16(g_pbPage0[0x472], g_pbPage0[0x472+1]);

    /*
     * Install the code.
     */
    size_t cbCode = (uintptr_t)&vmmR0TripleFaultHackEnd - (uintptr_t)&vmmR0TripleFaultHackStart;
    AssertLogRelReturn(cbCode <= PAGE_SIZE, VERR_OUT_OF_RANGE);
    memcpy(g_pbLowCore, &vmmR0TripleFaultHackStart, cbCode);

    g_pbPage0[0x467+0] = 0x00;
    g_pbPage0[0x467+1] = 0x70;
    g_pbPage0[0x467+2] = 0x00;
    g_pbPage0[0x467+3] = 0x00;

    g_pbPage0[0x472+0] = 0x34;
    g_pbPage0[0x472+1] = 0x12;

    /*
     * Configure the status port and cmos shutdown command.
     */
    uint32_t fSaved = ASMIntDisableFlags();

    ASMOutU8(0x70, 0x0f);
    ASMOutU8(0x71, 0x0a);

    ASMOutU8(0x70, 0x05);
    ASMInU8(0x71);

    ASMReloadCR3();
    ASMWriteBackAndInvalidateCaches();

    ASMSetFlags(fSaved);

#if 1 /* For testing & debugging. */
    vmmR0TripleFaultHackTripleFault();
#endif

    return VINF_SUCCESS;
}