/**
 * Process the critical sections (both types) queued for ring-3 'leave'.
 *
 * @param   pVCpu         The cross context virtual CPU structure.
 */
VMM_INT_DECL(void) PDMCritSectBothFF(PVMCPU pVCpu)
{
    uint32_t i;
    Assert(   pVCpu->pdm.s.cQueuedCritSectLeaves       > 0
           || pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves > 0
           || pVCpu->pdm.s.cQueuedCritSectRwExclLeaves > 0);

    /* Shared leaves. */
    i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves;
    pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves = 0;
    while (i-- > 0)
    {
# ifdef IN_RING3
        PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i];
# else
        PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM),
                                                                   pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i]);
# endif

        pdmCritSectRwLeaveSharedQueued(pCritSectRw);
        LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
    }

    /* Last, exclusive leaves. */
    i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves;
    pVCpu->pdm.s.cQueuedCritSectRwExclLeaves = 0;
    while (i-- > 0)
    {
# ifdef IN_RING3
        PPDMCRITSECTRW pCritSectRw = pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i];
# else
        PPDMCRITSECTRW pCritSectRw = (PPDMCRITSECTRW)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM),
                                                                   pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]);
# endif

        pdmCritSectRwLeaveExclQueued(pCritSectRw);
        LogFlow(("PDMR3CritSectFF: %p (R/W)\n", pCritSectRw));
    }

    /* Normal leaves. */
    i = pVCpu->pdm.s.cQueuedCritSectLeaves;
    pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
    while (i-- > 0)
    {
# ifdef IN_RING3
        PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectLeaves[i];
# else
        PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectLeaves[i]);
# endif

        PDMCritSectLeave(pCritSect);
        LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
    }

    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
}
/**
 * Read the current CPU timestamp counter.
 *
 * @returns Gets the CPU tsc.
 * @param   pVCpu       The VMCPU to operate on.
 */
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
    uint64_t u64;

    if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
    {
        PVM pVM = pVCpu->CTX_SUFF(pVM);
        if (pVM->tm.s.fTSCVirtualized)
        {
            if (pVM->tm.s.fTSCUseRealTSC)
                u64 = ASMReadTSC();
            else
                u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
            u64 -= pVCpu->tm.s.offTSCRawSrc;
        }
        else
            u64 = ASMReadTSC();

        /* Never return a value lower than what the guest has already seen. */
        if (u64 < pVCpu->tm.s.u64TSCLastSeen)
        {
            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
            pVCpu->tm.s.u64TSCLastSeen += 64;   /* @todo choose a good increment here */
            u64 = pVCpu->tm.s.u64TSCLastSeen;
        }
    }
    else
        u64 = pVCpu->tm.s.u64TSC;
    return u64;
}
/**
 * MSR read handler for KVM.
 *
 * @returns Strict VBox status code like CPUMQueryGuestMsr().
 * @retval  VINF_CPUM_R3_MSR_READ
 * @retval  VERR_CPUM_RAISE_GP_0
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   idMsr       The MSR being read.
 * @param   pRange      The range this MSR belongs to.
 * @param   puValue     Where to store the MSR value read.
 */
VMM_INT_DECL(VBOXSTRICTRC) gimKvmReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
{
    NOREF(pRange);
    PVM     pVM        = pVCpu->CTX_SUFF(pVM);
    PGIMKVM pKvm       = &pVM->gim.s.u.Kvm;
    PGIMKVMCPU pKvmCpu = &pVCpu->gim.s.u.KvmCpu;

    switch (idMsr)
    {
        case MSR_GIM_KVM_SYSTEM_TIME:
        case MSR_GIM_KVM_SYSTEM_TIME_OLD:
            *puValue = pKvmCpu->u64SystemTimeMsr;
            return VINF_SUCCESS;

        case MSR_GIM_KVM_WALL_CLOCK:
        case MSR_GIM_KVM_WALL_CLOCK_OLD:
            *puValue = pKvm->u64WallClockMsr;
            return VINF_SUCCESS;

        default:
        {
#ifdef IN_RING3
            static uint32_t s_cTimes = 0;
            if (s_cTimes++ < 20)
                LogRel(("GIM: KVM: Unknown/invalid RdMsr (%#x) -> #GP(0)\n", idMsr));
#endif
            LogFunc(("Unknown/invalid RdMsr (%#RX32) -> #GP(0)\n", idMsr));
            break;
        }
    }

    return VERR_CPUM_RAISE_GP_0;
}
Exemple #4
0
/**
 * Read the current CPU timestamp counter.
 *
 * @returns Gets the CPU tsc.
 * @param   pVCpu           The cross context virtual CPU structure.
 * @param   fCheckTimers    Whether to check timers.
 */
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
    uint64_t u64;

    if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
    {
        PVM pVM = pVCpu->CTX_SUFF(pVM);
        if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
            u64 = SUPReadTsc();
        else
            u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
        u64 -= pVCpu->tm.s.offTSCRawSrc;

        /* Always return a value higher than what the guest has already seen. */
        if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
            pVCpu->tm.s.u64TSCLastSeen = u64;
        else
        {
            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
            pVCpu->tm.s.u64TSCLastSeen += 64;   /** @todo choose a good increment here */
            u64 = pVCpu->tm.s.u64TSCLastSeen;
        }
    }
    else
        u64 = pVCpu->tm.s.u64TSC;
    return u64;
}
Exemple #5
0
/**
 * Disassembles the instruction at RIP and if it's a hypercall
 * instruction, performs the hypercall.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 * @param   pCtx        Pointer to the guest-CPU context.
 * @param   pcbInstr    Where to store the disassembled instruction length.
 *                      Optional, can be NULL.
 *
 * @todo    This interface should disappear when IEM/REM execution engines
 *          handle VMCALL/VMMCALL instructions to call into GIM when
 *          required. See @bugref{7270#c168}.
 */
VMM_INT_DECL(VBOXSTRICTRC) GIMExecHypercallInstr(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t *pcbInstr)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    VMCPU_ASSERT_EMT(pVCpu);

    if (RT_UNLIKELY(!GIMIsEnabled(pVM)))
        return VERR_GIM_NOT_ENABLED;

    unsigned    cbInstr;
    DISCPUSTATE Dis;
    int rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, &cbInstr);
    if (RT_SUCCESS(rc))
    {
        if (pcbInstr)
            *pcbInstr = (uint8_t)cbInstr;
        switch (pVM->gim.s.enmProviderId)
        {
        case GIMPROVIDERID_HYPERV:
            return gimHvExecHypercallInstr(pVCpu, pCtx, &Dis);

        case GIMPROVIDERID_KVM:
            return gimKvmExecHypercallInstr(pVCpu, pCtx, &Dis);

        default:
            AssertMsgFailed(("GIMExecHypercallInstr: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId));
            return VERR_GIM_HYPERCALLS_NOT_AVAILABLE;
        }
    }

    Log(("GIM: GIMExecHypercallInstr: Failed to disassemble CS:RIP=%04x:%08RX64. rc=%Rrc\n", pCtx->cs.Sel, pCtx->rip, rc));
    return rc;
}
/**
 * Notifies VMM that paravirtualized hypercalls are now disabled.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 */
VMM_INT_DECL(void) VMMHypercallsDisable(PVMCPU pVCpu)
{
    /* If there is anything to do for raw-mode, do it here. */
#ifndef IN_RC
    if (HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
        HMHypercallsDisable(pVCpu);
#endif
}
/**
 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
 * use the raw TSC.
 *
 * @returns The number of host CPU clock ticks to the next timer deadline.
 * @param   pVCpu           The current CPU.
 * @param   poffRealTSC     The offset against the TSC of the current CPU.
 * @thread  EMT(pVCpu).
 * @remarks Superset of TMCpuTickCanUseRealTSC.
 */
VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC)
{
    PVM         pVM = pVCpu->CTX_SUFF(pVM);
    uint64_t    cTicksToDeadline;

    /*
     * We require:
     *     1. A fixed TSC, this is checked at init time.
     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     *     3. Either that we're using the real TSC as time source or
     *          a) we don't have any lag to catch up, and
     *          b) the virtual sync clock hasn't been halted by an expired timer, and
     *          c) we're not using warp drive (accelerated virtual guest time).
     */
    if (    pVM->tm.s.fMaybeUseOffsettedHostTSC
        &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
        &&  (   pVM->tm.s.fTSCUseRealTSC
             || (   !pVM->tm.s.fVirtualSyncCatchUp
                 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
                 && !pVM->tm.s.fVirtualWarpDrive))
       )
    {
        *pfOffsettedTsc = true;
        if (!pVM->tm.s.fTSCUseRealTSC)
        {
            /* The source is the timer synchronous virtual clock. */
            Assert(pVM->tm.s.fTSCVirtualized);

            uint64_t cNsToDeadline;
            uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
            uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
                            ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
                            : u64NowVirtSync;
            u64Now -= pVCpu->tm.s.offTSCRawSrc;
            *poffRealTSC = u64Now - ASMReadTSC();
            cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
        }
        else
        {
            /* The source is the real TSC. */
            if (pVM->tm.s.fTSCVirtualized)
                *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
            else
                *poffRealTSC = 0;
            cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
        }
    }
    else
    {
#ifdef VBOX_WITH_STATISTICS
        tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
#endif
        *pfOffsettedTsc  = false;
        *poffRealTSC     = 0;
        cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    }
    return cTicksToDeadline;
}
Exemple #8
0
/**
 * Handles the Hyper-V hypercall.
 *
 * @returns VBox status code.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   pCtx            Pointer to the guest-CPU context.
 */
VMM_INT_DECL(int) gimHvHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (!MSR_GIM_HV_HYPERCALL_IS_ENABLED(pVM->gim.s.u.Hv.u64HypercallMsr))
        return VERR_GIM_HYPERCALLS_NOT_ENABLED;

    /** @todo Handle hypercalls. Fail for now */
    return VERR_GIM_IPE_3;
}
/**
 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
 *
 * @returns true/false accordingly.
 * @param   pVCpu       The VMCPU to operate on.
 * @param   poffRealTSC     The offset against the TSC of the current CPU.
 *                          Can be NULL.
 * @thread EMT.
 */
VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);

    /*
     * We require:
     *     1. A fixed TSC, this is checked at init time.
     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     *     3. Either that we're using the real TSC as time source or
     *          a) we don't have any lag to catch up, and
     *          b) the virtual sync clock hasn't been halted by an expired timer, and
     *          c) we're not using warp drive (accelerated virtual guest time).
     */
    if (    pVM->tm.s.fMaybeUseOffsettedHostTSC
        &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
        &&  (   pVM->tm.s.fTSCUseRealTSC
             || (   !pVM->tm.s.fVirtualSyncCatchUp
                 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
                 && !pVM->tm.s.fVirtualWarpDrive))
       )
    {
        if (!pVM->tm.s.fTSCUseRealTSC)
        {
            /* The source is the timer synchronous virtual clock. */
            Assert(pVM->tm.s.fTSCVirtualized);

            if (poffRealTSC)
            {
                uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
                                - pVCpu->tm.s.offTSCRawSrc;
                /** @todo When we start collecting statistics on how much time we spend executing
                 * guest code before exiting, we should check this against the next virtual sync
                 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
                 * the chance that we'll get interrupted right after the timer expired. */
                *poffRealTSC = u64Now - ASMReadTSC();
            }
        }
        else if (poffRealTSC)
        {
            /* The source is the real TSC. */
            if (pVM->tm.s.fTSCVirtualized)
                *poffRealTSC = pVCpu->tm.s.offTSCRawSrc;
            else
                *poffRealTSC = 0;
        }
        /** @todo count this? */
        return true;
    }

#ifdef VBOX_WITH_STATISTICS
    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
#endif
    return false;
}
Exemple #10
0
/**
 * Set the TPR (task priority register?).
 *
 * @returns VBox status code.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   u8TPR           The new TPR.
 */
VMMDECL(int) PDMApicSetTPR(PVMCPU pVCpu, uint8_t u8TPR)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR));
        pdmLock(pVM);
        pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, u8TPR);
        pdmUnlock(pVM);
        return VINF_SUCCESS;
    }
    return VERR_PDM_NO_APIC_INSTANCE;
}
/**
 * Checks the specified VCPU is the owner of the critical section.
 *
 * @returns true if owner.
 * @returns false if not owner.
 * @param   pCritSect   The critical section.
 * @param   pVCpu       The virtual CPU handle.
 */
VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, PVMCPU pVCpu)
{
#ifdef IN_RING3
    NOREF(pVCpu);
    return RTCritSectIsOwner(&pCritSect->s.Core);
#else
    Assert(&pVCpu->CTX_SUFF(pVM)->aCpus[pVCpu->idCpu] == pVCpu);
    if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
        return false;
    return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
        || pCritSect->s.Core.cNestings > 1;
#endif
}
Exemple #12
0
/**
 * Gets the pending interrupt.
 *
 * @returns VBox status code.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   pu8Interrupt    Where to store the interrupt on success.
 */
VMMDECL(int) PDMGetInterrupt(PVMCPU pVCpu, uint8_t *pu8Interrupt)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);

    pdmLock(pVM);

    /*
     * The local APIC has a higher priority than the PIC.
     */
    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC))
    {
        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_APIC);
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pDevIns));
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt));
        uint32_t uTagSrc;
        int i = pVM->pdm.s.Apic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, &uTagSrc);
        AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i));
        if (i >= 0)
        {
            pdmUnlock(pVM);
            *pu8Interrupt = (uint8_t)i;
            VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i);
            return VINF_SUCCESS;
        }
    }

    /*
     * Check the PIC.
     */
    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC))
    {
        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_PIC);
        Assert(pVM->pdm.s.Pic.CTX_SUFF(pDevIns));
        Assert(pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt));
        uint32_t uTagSrc;
        int i = pVM->pdm.s.Pic.CTX_SUFF(pfnGetInterrupt)(pVM->pdm.s.Pic.CTX_SUFF(pDevIns), &uTagSrc);
        AssertMsg(i <= 255 && i >= 0, ("i=%d\n", i));
        if (i >= 0)
        {
            pdmUnlock(pVM);
            *pu8Interrupt = (uint8_t)i;
            VBOXVMM_PDM_IRQ_GET(pVCpu, RT_LOWORD(uTagSrc), RT_HIWORD(uTagSrc), i);
            return VINF_SUCCESS;
        }
    }

    /** @todo Figure out exactly why we can get here without anything being set. (REM) */

    pdmUnlock(pVM);
    return VERR_NO_DATA;
}
Exemple #13
0
/**
 * Prepares the host FPU/SSE/AVX stuff for IEM action.
 *
 * This will make sure the FPU/SSE/AVX guest state is _not_ loaded in the CPU.
 * This will make sure the FPU/SSE/AVX host state is saved.
 * Finally, it will make sure the FPU/SSE/AVX host features can be safely
 * accessed.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 */
VMMRZ_INT_DECL(void)    CPUMRZFpuStatePrepareHostCpuForUse(PVMCPU pVCpu)
{
    pVCpu->cpum.s.fChanged |= CPUM_CHANGED_FPU_REM;
    switch (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST))
    {
    case 0:
#ifdef IN_RC
        cpumRZSaveHostFPUState(&pVCpu->cpum.s);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM);     /* Must recalc CR0 before executing more code! */
#else
        if (cpumRZSaveHostFPUState(&pVCpu->cpum.s) == VINF_CPUM_HOST_CR0_MODIFIED)
            HMR0NotifyCpumModifiedHostCr0(pVCpu);
#endif
        Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #0 - %#x\n", ASMGetCR0()));
        break;

    case CPUM_USED_FPU_HOST:
#ifdef IN_RC
        VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM);     /* (should be set already) */
#elif defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
        if (pVCpu->cpum.s.fUseFlags | CPUM_SYNC_FPU_STATE)
        {
            pVCpu->cpum.s.fUseFlags &= ~CPUM_SYNC_FPU_STATE;
            HMR0NotifyCpumUnloadedGuestFpuState(pVCpu);
        }
#endif
        Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #1 - %#x\n", ASMGetCR0()));
        break;

    case CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST:
#if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
            HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
        else
#endif
            cpumRZSaveGuestFpuState(&pVCpu->cpum.s, true /*fLeaveFpuAccessible*/);
#ifdef IN_RING0
        HMR0NotifyCpumUnloadedGuestFpuState(pVCpu);
#else
        VMCPU_FF_SET(pVCpu, VMCPU_FF_CPUM);     /* Must recalc CR0 before executing more code! */
#endif
        Log6(("CPUMRZFpuStatePrepareHostCpuForUse: #2 - %#x\n", ASMGetCR0()));
        break;

    default:
        AssertFailed();
    }

}
Exemple #14
0
/**
 * Get the APIC base from the APIC device. This is slow and involves
 * taking the PDM lock, this is currently only used by CPUM to cache the APIC
 * base once (during init./load state), all other callers should use
 * PDMApicGetBase() and not this function.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VMCPU.
 * @param   pu64Base        Where to store the APIC base.
 */
VMMDECL(int) PDMApicGetBase(PVMCPU pVCpu, uint64_t *pu64Base)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase));
        pdmLock(pVM);
        *pu64Base = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu);
        pdmUnlock(pVM);
        return VINF_SUCCESS;
    }
    *pu64Base = 0;
    return VERR_PDM_NO_APIC_INSTANCE;
}
Exemple #15
0
/**
 * Check if the APIC has a pending interrupt/if a TPR change would active one.
 *
 * @returns VINF_SUCCESS or VERR_PDM_NO_APIC_INSTANCE.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   pfPending       Pending state (out).
 */
VMM_INT_DECL(int) PDMApicHasPendingIrq(PVMCPU pVCpu, bool *pfPending)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetTPR));
        pdmLock(pVM);
        *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu,
                                                                NULL /* pu8PendingIrq */);
        pdmUnlock(pVM);
        return VINF_SUCCESS;
    }
    return VERR_PDM_NO_APIC_INSTANCE;
}
Exemple #16
0
/**
 * Returns whether the guest has configured and enabled calls to the hypervisor.
 *
 * @returns true if hypercalls are enabled and usable, false otherwise.
 * @param   pVCpu           Pointer to the VMCPU.
 */
VMM_INT_DECL(bool) GIMAreHypercallsEnabled(PVMCPU pVCpu)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (!GIMIsEnabled(pVM))
        return false;

    switch (pVM->gim.s.enmProviderId)
    {
        case GIMPROVIDERID_HYPERV:
            return GIMHvAreHypercallsEnabled(pVCpu);

        default:
            return false;
    }
}
Exemple #17
0
/**
 * Makes sure the FPU/SSE/AVX state in CPUMCPU::Guest is up to date.
 *
 * This will not cause CPUM_USED_FPU_GUEST to change.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 */
VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeForRead(PVMCPU pVCpu)
{
    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
    {
#if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
            HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
        else
#endif
            cpumRZSaveGuestFpuState(&pVCpu->cpum.s, false /*fLeaveFpuAccessible*/);
        pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST;
        Log7(("CPUMRZFpuStateActualizeForRead\n"));
    }
}
Exemple #18
0
/**
 * Get the TPR (task priority register).
 *
 * @returns The current TPR.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   pu8TPR          Where to store the TRP.
 * @param   pfPending       Pending interrupt state (out).
*/
VMMDECL(int) PDMApicGetTPR(PVMCPU pVCpu, uint8_t *pu8TPR, bool *pfPending)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR));
        /* We don't acquire the PDM lock here as we're just reading information. Doing so causes massive
         * contention as this function is called very often by each and every VCPU.
         */
        *pu8TPR = pVM->pdm.s.Apic.CTX_SUFF(pfnGetTPR)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu);
        if (pfPending)
            *pfPending = pVM->pdm.s.Apic.CTX_SUFF(pfnHasPendingIrq)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns));
        return VINF_SUCCESS;
    }
    *pu8TPR = 0;
    return VERR_PDM_NO_APIC_INSTANCE;
}
Exemple #19
0
/**
 * Invokes the read-MSR handler for the GIM provider configured for the VM.
 *
 * @returns Strict VBox status code like CPUMQueryGuestMsr.
 * @retval  VINF_CPUM_R3_MSR_READ
 * @retval  VERR_CPUM_RAISE_GP_0
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   idMsr       The MSR to read.
 * @param   pRange      The range this MSR belongs to.
 * @param   puValue     Where to store the MSR value read.
 */
VMM_INT_DECL(VBOXSTRICTRC) GIMReadMsr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
{
    Assert(pVCpu);
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    Assert(GIMIsEnabled(pVM));
    VMCPU_ASSERT_EMT(pVCpu);

    switch (pVM->gim.s.enmProviderId)
    {
        case GIMPROVIDERID_HYPERV:
            return GIMHvReadMsr(pVCpu, idMsr, pRange, puValue);

        default:
            AssertMsgFailed(("GIMReadMsr: for unknown provider %u idMsr=%#RX32 -> #GP(0)", pVM->gim.s.enmProviderId, idMsr));
            return VERR_CPUM_RAISE_GP_0;
    }
}
/**
 * Makes sure the YMM0..YMM15 and MXCSR state in CPUMCPU::Guest is up to date.
 *
 * This will not cause CPUM_USED_FPU_GUEST to change.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 */
VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeAvxForRead(PVMCPU pVCpu)
{
    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_GUEST)
    {
#if defined(IN_RING0) && ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
        {
            Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE));
            HMR0SaveFPUState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
            pVCpu->cpum.s.fUseFlags |= CPUM_USED_FPU_GUEST;
        }
        else
#endif
            cpumRZSaveGuestAvxRegisters(&pVCpu->cpum.s);
        Log7(("CPUMRZFpuStateActualizeAvxForRead\n"));
    }
}
Exemple #21
0
/**
 * Whether \#UD exceptions in the guest needs to be intercepted by the GIM
 * provider.
 *
 * At the moment, the reason why this isn't a more generic interface wrt to
 * exceptions is because of performance (each VM-exit would have to manually
 * check whether or not GIM needs to be notified). Left as a todo for later if
 * really required.
 *
 * @returns true if needed, false otherwise.
 * @param   pVCpu       The cross context virtual CPU structure.
 */
VMM_INT_DECL(bool) GIMShouldTrapXcptUD(PVMCPU pVCpu)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (!GIMIsEnabled(pVM))
        return false;

    switch (pVM->gim.s.enmProviderId)
    {
    case GIMPROVIDERID_KVM:
        return gimKvmShouldTrapXcptUD(pVCpu);

    case GIMPROVIDERID_HYPERV:
        return gimHvShouldTrapXcptUD(pVCpu);

    default:
        return false;
    }
}
Exemple #22
0
/**
 * Exception handler for \#UD when requested by the GIM provider.
 *
 * @returns Strict VBox status code.
 * @retval  VINF_SUCCESS if the hypercall succeeded (even if its operation
 *          failed).
 * @retval  VINF_GIM_R3_HYPERCALL restart the hypercall from ring-3.
 * @retval  VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
 *          RIP.
 * @retval  VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
 * @retval  VERR_GIM_INVALID_HYPERCALL_INSTR instruction at RIP is not a valid
 *          hypercall instruction.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 * @param   pCtx        Pointer to the guest-CPU context.
 * @param   pDis        Pointer to the disassembled instruction state at RIP.
 *                      If NULL is passed, it implies the disassembly of the
 *                      the instruction at RIP is the responsibility of the
 *                      GIM provider.
 * @param   pcbInstr    Where to store the instruction length of the hypercall
 *                      instruction. Optional, can be NULL.
 *
 * @thread  EMT(pVCpu).
 */
VMM_INT_DECL(VBOXSTRICTRC) GIMXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis, uint8_t *pcbInstr)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    Assert(GIMIsEnabled(pVM));
    Assert(pDis || pcbInstr);

    switch (pVM->gim.s.enmProviderId)
    {
    case GIMPROVIDERID_KVM:
        return gimKvmXcptUD(pVCpu, pCtx, pDis, pcbInstr);

    case GIMPROVIDERID_HYPERV:
        return gimHvXcptUD(pVCpu, pCtx, pDis, pcbInstr);

    default:
        return VERR_GIM_OPERATION_FAILED;
    }
}
Exemple #23
0
/**
 * Implements a GIM hypercall with the provider configured for the VM.
 *
 * @returns VBox status code.
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   pCtx        Pointer to the guest-CPU context.
 */
VMM_INT_DECL(int) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    VMCPU_ASSERT_EMT(pVCpu);

    if (RT_UNLIKELY(!GIMIsEnabled(pVM)))
        return VERR_GIM_NOT_ENABLED;

    switch (pVM->gim.s.enmProviderId)
    {
        case GIMPROVIDERID_HYPERV:
            return GIMHvHypercall(pVCpu, pCtx);

        default:
            AssertMsgFailed(("GIMHypercall: for unknown provider %u\n", pVM->gim.s.enmProviderId));
            return VERR_GIM_IPE_3;
    }
}
/**
 * Get the current privilege level of the guest.
 *
 * @returns CPL
 * @param   pVCpu       The current virtual CPU.
 * @param   pRegFrame   Pointer to the register frame.
 *
 * @todo    r=bird: This is very similar to CPUMGetGuestCPL and I cannot quite
 *          see why this variant of the code is necessary.
 */
VMMDECL(uint32_t) CPUMRCGetGuestCPL(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
{
    /*
     * CPL can reliably be found in SS.DPL (hidden regs valid) or SS if not.
     *
     * Note! We used to check CS.DPL here, assuming it was always equal to
     * CPL even if a conforming segment was loaded.  But this truned out to
     * only apply to older AMD-V.  With VT-x we had an ACP2 regression
     * during install after a far call to ring 2 with VT-x.  Then on newer
     * AMD-V CPUs we have to move the VMCB.guest.u8CPL into cs.Attr.n.u2Dpl
     * as well as ss.Attr.n.u2Dpl to make this (and other) code work right.
     *
     * So, forget CS.DPL, always use SS.DPL.
     *
     * Note! The SS RPL is always equal to the CPL, while the CS RPL
     * isn't necessarily equal if the segment is conforming.
     * See section 4.11.1 in the AMD manual.
     */
    uint32_t uCpl;
    if (!pRegFrame->eflags.Bits.u1VM)
    {
        uCpl = (pRegFrame->ss.Sel & X86_SEL_RPL);
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
# ifdef VBOX_WITH_RAW_RING1
        if (pVCpu->cpum.s.fRawEntered)
        {
            if (   uCpl == 2
                && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM)) )
                uCpl = 1;
            else if (uCpl == 1)
                uCpl = 0;
        }
        Assert(uCpl != 2);  /* ring 2 support not allowed anymore. */
# else
        if (uCpl == 1)
            uCpl = 0;
# endif
#endif
    }
    else
        uCpl = 3; /* V86 has CPL=3; REM doesn't set DPL=3 in V8086 mode. See @bugref{5130}. */

    return uCpl;
}
/**
 * Read the current CPU timestamp counter.
 *
 * @returns Gets the CPU tsc.
 * @param   pVCpu           The cross context virtual CPU structure.
 * @param   fCheckTimers    Whether to check timers.
 */
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
    uint64_t u64;

    if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
    {
        PVM pVM = pVCpu->CTX_SUFF(pVM);
        switch (pVM->tm.s.enmTSCMode)
        {
            case TMTSCMODE_REAL_TSC_OFFSET:
                u64 = SUPReadTsc();
                break;
            case TMTSCMODE_VIRT_TSC_EMULATED:
            case TMTSCMODE_DYNAMIC:
                u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
                break;
#ifndef IN_RC
            case TMTSCMODE_NATIVE_API:
            {
                u64 = 0;
                int rcNem = NEMHCQueryCpuTick(pVCpu, &u64, NULL);
                AssertLogRelRCReturn(rcNem, SUPReadTsc());
                break;
            }
#endif
            default:
                AssertFailedBreakStmt(u64 = SUPReadTsc());
        }
        u64 -= pVCpu->tm.s.offTSCRawSrc;

        /* Always return a value higher than what the guest has already seen. */
        if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
            pVCpu->tm.s.u64TSCLastSeen = u64;
        else
        {
            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
            pVCpu->tm.s.u64TSCLastSeen += 64;   /** @todo choose a good increment here */
            u64 = pVCpu->tm.s.u64TSCLastSeen;
        }
    }
    else
        u64 = pVCpu->tm.s.u64TSC;
    return u64;
}
Exemple #26
0
/**
 * Set the APIC base.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VMCPU.
 * @param   u64Base         The new base.
 */
VMMDECL(int) PDMApicSetBase(PVMCPU pVCpu, uint64_t u64Base)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    if (pVM->pdm.s.Apic.CTX_SUFF(pDevIns))
    {
        Assert(pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase));
        pdmLock(pVM);
        pVM->pdm.s.Apic.CTX_SUFF(pfnSetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu, u64Base);

        /* Update CPUM's copy of the APIC base. */
        PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
        Assert(pCtx);
        pCtx->msrApicBase = pVM->pdm.s.Apic.CTX_SUFF(pfnGetBase)(pVM->pdm.s.Apic.CTX_SUFF(pDevIns), pVCpu->idCpu);

        pdmUnlock(pVM);
        return VINF_SUCCESS;
    }
    return VERR_PDM_NO_APIC_INSTANCE;
}
/**
 * Process the critical sections queued for ring-3 'leave'.
 *
 * @param   pVCpu         The VMCPU handle.
 */
VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
{
    Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);

    const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
    for (RTUINT i = 0; i < c; i++)
    {
# ifdef IN_RING3
        PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
# else
        PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
# endif

        PDMCritSectLeave(pCritSect);
        LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
    }

    pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
}
Exemple #28
0
/**
 * Implements a GIM hypercall with the provider configured for the VM.
 *
 * @returns Strict VBox status code.
 * @retval  VINF_SUCCESS if the hypercall succeeded (even if its operation
 *          failed).
 * @retval  VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
 * @retval  VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
 * @retval  VERR_GIM_HYPERCALLS_NOT_AVAILABLE hypercalls unavailable.
 * @retval  VERR_GIM_NOT_ENABLED GIM is not enabled (shouldn't really happen)
 * @retval  VERR_GIM_HYPERCALL_MEMORY_READ_FAILED hypercall failed while reading
 *          memory.
 * @retval  VERR_GIM_HYPERCALL_MEMORY_WRITE_FAILED hypercall failed while
 *          writing memory.
 *
 * @param   pVCpu       The cross context virtual CPU structure.
 * @param   pCtx        Pointer to the guest-CPU context.
 *
 * @thread  EMT.
 */
VMM_INT_DECL(VBOXSTRICTRC) GIMHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
{
    PVM pVM = pVCpu->CTX_SUFF(pVM);
    VMCPU_ASSERT_EMT(pVCpu);

    if (RT_UNLIKELY(!GIMIsEnabled(pVM)))
        return VERR_GIM_NOT_ENABLED;

    switch (pVM->gim.s.enmProviderId)
    {
    case GIMPROVIDERID_HYPERV:
        return gimHvHypercall(pVCpu, pCtx);

    case GIMPROVIDERID_KVM:
        return gimKvmHypercall(pVCpu, pCtx);

    default:
        AssertMsgFailed(("GIMHypercall: for provider %u not available/implemented\n", pVM->gim.s.enmProviderId));
        return VERR_GIM_HYPERCALLS_NOT_AVAILABLE;
    }
}
/**
 * Transforms the guest CPU state to raw-ring mode.
 *
 * This function will change the any of the cs and ss register with DPL=0 to DPL=1.
 *
 * Used by emInterpretIret() after the new state has been loaded.
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   pCtxCore    The context core (for trap usage).
 * @see     @ref pg_raw
 * @remarks Will be probably obsoleted by #5653 (it will leave and reenter raw
 *          mode instead, I think).
 */
VMMDECL(void) CPUMRCRecheckRawState(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
{
    /*
     * Are we in Ring-0?
     */
    if (    pCtxCore->ss.Sel
        &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 0
        &&  !pCtxCore->eflags.Bits.u1VM)
    {
        /*
         * Set CPL to Ring-1.
         */
        pCtxCore->ss.Sel |= 1;
        if (    pCtxCore->cs.Sel
            &&  (pCtxCore->cs.Sel & X86_SEL_RPL) == 0)
            pCtxCore->cs.Sel |= 1;
    }
    else
    {
        if (    EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))
            &&  !pCtxCore->eflags.Bits.u1VM
            &&  (pCtxCore->ss.Sel & X86_SEL_RPL) == 1)
        {
            /* Set CPL to Ring-2. */
            pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2;
            if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1)
                pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2;
        }
    }

    /*
     * Assert sanity.
     */
    AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n"));
    AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0,
                     ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL));

    pCtxCore->eflags.u32        |= X86_EFL_IF; /* paranoia */
}
Exemple #30
0
/**
 * Returns whether the guest has configured and enabled the use of Hyper-V's
 * hypercall interface.
 *
 * @returns true if hypercalls are enabled, false otherwise.
 * @param   pVCpu       Pointer to the VMCPU.
 */
VMM_INT_DECL(bool) gimHvAreHypercallsEnabled(PVMCPU pVCpu)
{
    return MSR_GIM_HV_HYPERCALL_IS_ENABLED(pVCpu->CTX_SUFF(pVM)->gim.s.u.Hv.u64HypercallMsr);
}