/**
 * Called by TRPM and CPUM assembly code to make sure the guest state is
 * ready for execution.
 *
 * @param   pVM                 The VM handle.
 */
DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM)
{
    /*
     * Check some important assumptions before resuming guest execution.
     */
    PVMCPU         pVCpu     = VMMGetCpu0(pVM);
    PCCPUMCTX      pCtx      = &pVCpu->cpum.s.Guest;
    uint8_t  const uRawCpl   = CPUMGetGuestCPL(pVCpu);
    uint32_t const u32EFlags = CPUMRawGetEFlags(pVCpu);
    bool     const fPatch    = PATMIsPatchGCAddr(pVM, pCtx->eip);
    AssertMsg(pCtx->eflags.Bits.u1IF,                ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    AssertMsg(pCtx->eflags.Bits.u2IOPL < RT_MAX(uRawCpl, 1U),
                                                     ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    if (!(u32EFlags & X86_EFL_VM))
    {
        AssertMsg((u32EFlags & X86_EFL_IF) || fPatch,("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->cs.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    }
    AssertMsg(CPUMIsGuestInRawMode(pVCpu),           ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
}
/**
 * Synchronizes any segment registers refering to the given GDT entry.
 *
 * This is called before any changes performed and shadowed, so it's possible to
 * look in both the shadow and guest descriptor table entries for hidden
 * register content.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       The current virtual CPU.
 * @param   pCtx        The CPU context.
 * @param   iGDTEntry   The GDT entry to sync.
 */
void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry)
{
    /*
     * Validate the offset.
     */
    VBOXGDTR GdtrGuest;
    CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
    unsigned offEntry = iGDTEntry * sizeof(X86DESC);
    if (    iGDTEntry >= SELM_GDT_ELEMENTS
        ||  offEntry  >  GdtrGuest.cbGdt)
        return;

    /*
     * Sync outdated segment registers using this entry.
     */
    PCX86DESC       pDesc    = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
    uint32_t        uCpl     = CPUMGetGuestCPL(pVCpu);
    PCPUMSELREG     paSReg   = CPUMCTX_FIRST_SREG(pCtx);
    for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
    {
        if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
        {
            if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
            {
                if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
                {
                    selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
                    Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
                }
                else
                    Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
                         iGDTEntry, g_aszSRegNms[iSReg], pDesc));
            }
        }
    }
}
Example #3
0
/**
 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
 *
 * This function contains the raw-mode version of the inner
 * execution loop (the outer loop being in EMR3ExecuteVM()).
 *
 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
 *          VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
 *
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure.
 * @param   pfFFDone    Where to store an indicator telling whether or not
 *                      FFs were done before returning.
 */
int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
{
    int      rc = VERR_IPE_UNINITIALIZED_STATUS;
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;

    LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
    *pfFFDone = false;

    STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry);

#ifdef EM_NOTIFY_HM
    HMR3NotifyScheduled(pVCpu);
#endif

    /*
     * Spin till we get a forced action which returns anything but VINF_SUCCESS.
     */
    for (;;)
    {
        STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a);

        /* Check if a forced reschedule is pending. */
        if (HMR3IsRescheduleRequired(pVM, pCtx))
        {
            rc = VINF_EM_RESCHEDULE;
            break;
        }

        /*
         * Process high priority pre-execution raw-mode FFs.
         */
#ifdef VBOX_WITH_RAW_MODE
        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
#endif
        if (    VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
        {
            rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
            if (rc != VINF_SUCCESS)
                break;
        }

#ifdef LOG_ENABLED
        /*
         * Log important stuff before entering GC.
         */
        if (TRPMHasTrap(pVCpu))
            Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip));

        uint32_t cpl = CPUMGetGuestCPL(pVCpu);

        if (pVM->cCpus == 1)
        {
            if (pCtx->eflags.Bits.u1VM)
                Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
            else if (CPUMIsGuestIn64BitCodeEx(pCtx))
                Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
            else
                Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel,          pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
        }
        else
        {
            if (pCtx->eflags.Bits.u1VM)
                Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
            else if (CPUMIsGuestIn64BitCodeEx(pCtx))
                Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
            else
                Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel,          pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
        }
#endif /* LOG_ENABLED */

        /*
         * Execute the code.
         */
        STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a);

        if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
        {
            STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x);
            rc = VMMR3HmRunGC(pVM, pVCpu);
            STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x);
        }
        else
        {
            /* Give up this time slice; virtual time continues */
            STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
            RTThreadSleep(5);
            STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
            rc = VINF_SUCCESS;
        }


        /*
         * Deal with high priority post execution FFs before doing anything else.
         */
        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
        if (    VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
            rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);

        /*
         * Process the returned status code.
         */
        if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
            break;

        rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
        if (rc != VINF_SUCCESS)
            break;

        /*
         * Check and execute forced actions.
         */
#ifdef VBOX_HIGH_RES_TIMERS_HACK
        TMTimerPollVoid(pVM, pVCpu);
#endif
        if (    VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
        {
            rc = emR3ForcedActions(pVM, pVCpu, rc);
            VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
            if (    rc != VINF_SUCCESS
                &&  rc != VINF_EM_RESCHEDULE_HM)
            {
                *pfFFDone = true;
                break;
            }
        }
    }

    /*
     * Return to outer loop.
     */
#if defined(LOG_ENABLED) && defined(DEBUG)
    RTLogFlush(NULL);
#endif
    return rc;
}
Example #4
0
/**
 * Trap handler for illegal opcode fault (\#UD).
 *
 * @returns VBox status code.
 *          VINF_SUCCESS means we completely handled this trap,
 *          other codes are passed execution to host context.
 *
 * @param   pTrpmCpu    Pointer to TRPMCPU data (within VM).
 * @param   pRegFrame   Pointer to the register frame for the trap.
 * @internal
 */
DECLASM(int) TRPMGCTrap06Handler(PTRPMCPU pTrpmCpu, PCPUMCTXCORE pRegFrame)
{
    LogFlow(("TRPMGC06: %04x:%08x efl=%x\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->eflags.u32));
    PVM     pVM   = TRPMCPU_2_VM(pTrpmCpu);
    PVMCPU  pVCpu = TRPMCPU_2_VMCPU(pTrpmCpu);
    int     rc;
    PGMRZDynMapStartAutoSet(pVCpu);

    if (CPUMGetGuestCPL(pVCpu) == 0)
    {
        /*
         * Decode the instruction.
         */
        RTGCPTR PC;
        rc = SELMValidateAndConvertCSAddr(pVCpu, pRegFrame->eflags, pRegFrame->ss.Sel, pRegFrame->cs.Sel, &pRegFrame->cs,
                                          pRegFrame->rip, &PC);
        if (RT_FAILURE(rc))
        {
            Log(("TRPMGCTrap06Handler: Failed to convert %RTsel:%RX32 (cpl=%d) - rc=%Rrc !!\n", pRegFrame->cs.Sel, pRegFrame->eip, pRegFrame->ss.Sel & X86_SEL_RPL, rc));
            rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_GUEST_TRAP, pRegFrame);
            Log6(("TRPMGC06: %Rrc (%04x:%08x) (SELM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
            return rc;
        }

        DISCPUSTATE Cpu;
        uint32_t    cbOp;
        rc = EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)PC, pRegFrame, &Cpu, &cbOp);
        if (RT_FAILURE(rc))
        {
            rc = trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame);
            Log6(("TRPMGC06: %Rrc (%04x:%08x) (EM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
            return rc;
        }

        /*
         * UD2 in a patch?
         * Note! PATMGCHandleIllegalInstrTrap doesn't always return.
         */
        if (    Cpu.pCurInstr->uOpcode == OP_ILLUD2
            &&  PATMIsPatchGCAddr(pVM, pRegFrame->eip))
        {
            LogFlow(("TRPMGCTrap06Handler: -> PATMRCHandleIllegalInstrTrap\n"));
            rc = PATMRCHandleIllegalInstrTrap(pVM, pRegFrame);
            /** @todo  These tests are completely unnecessary, should just follow the
             *         flow and return at the end of the function. */
            if (    rc == VINF_SUCCESS
                ||  rc == VINF_EM_RAW_EMULATE_INSTR
                ||  rc == VINF_PATM_DUPLICATE_FUNCTION
                ||  rc == VINF_PATM_PENDING_IRQ_AFTER_IRET
                ||  rc == VINF_EM_RESCHEDULE)
            {
                rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
                Log6(("TRPMGC06: %Rrc (%04x:%08x) (PATM)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
                return rc;
            }
        }
        /*
         * Speed up dtrace and don't entrust invalid lock sequences to the recompiler.
         */
        else if (Cpu.fPrefix & DISPREFIX_LOCK)
        {
            Log(("TRPMGCTrap06Handler: pc=%08x op=%d\n", pRegFrame->eip, Cpu.pCurInstr->uOpcode));
#ifdef DTRACE_EXPERIMENT /** @todo fix/remove/permanent-enable this when DIS/PATM handles invalid lock sequences. */
            Assert(!PATMIsPatchGCAddr(pVM, pRegFrame->eip));
            rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
            Assert(rc == VINF_EM_RAW_GUEST_TRAP);
#else
            rc = VINF_EM_RAW_EMULATE_INSTR;
#endif
        }
        /*
         * Handle MONITOR - it causes an #UD exception instead of #GP when not executed in ring 0.
         */
        else if (Cpu.pCurInstr->uOpcode == OP_MONITOR)
        {
            LogFlow(("TRPMGCTrap06Handler: -> EMInterpretInstructionCPU\n"));
            rc = EMInterpretInstructionDisasState(pVCpu, &Cpu, pRegFrame, PC, EMCODETYPE_SUPERVISOR);
        }
        /* Never generate a raw trap here; it might be an instruction, that requires emulation. */
        else
        {
            LogFlow(("TRPMGCTrap06Handler: -> VINF_EM_RAW_EMULATE_INSTR\n"));
            rc = VINF_EM_RAW_EMULATE_INSTR;
        }
    }
    else
    {
        LogFlow(("TRPMGCTrap06Handler: -> TRPMForwardTrap\n"));
        rc = TRPMForwardTrap(pVCpu, pRegFrame, 0x6, 0, TRPM_TRAP_NO_ERRORCODE, TRPM_TRAP, 0x6);
        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    }

    rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
    Log6(("TRPMGC06: %Rrc (%04x:%08x)\n", rc, pRegFrame->cs.Sel, pRegFrame->eip));
    return rc;
}
csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Log(("csamCodePageWriteHandler: write to %RGv LB %zu\n", GCPtr, cbBuf));
    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    Assert(VMCPU_IS_EMT(pVCpu));

    /*
     * Check if it's a dummy write that doesn't change anything.
     */
    if (   PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
        && !memcmp(pvPtr, pvBuf, cbBuf))
    {
        Log(("csamCodePageWriteHandler: dummy write -> ignore\n"));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

#ifdef IN_RING3
    /*
     * Ring-3: Do proper handling.
     */
    int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
    AssertRC(rc);
    return VINF_PGM_HANDLER_DO_DEFAULT;

#else
    /*
     * Raw-mode: Try avoid needing to go to ring-3 (same as csamRCCodePageWritePfHandler).
     */
    uint32_t     const cpl            = CPUMGetGuestCPL(pVCpu);
    bool         const fPatchCode     = PATMIsPatchGCAddr(pVM, CPUMGetGuestRIP(pVCpu));
    PPATMGCSTATE       pPATMGCState   = PATMGetGCState(pVM);

    Assert(pVM->csam.s.cDirtyPages < CSAM_MAX_DIRTY_PAGES);
    Assert(pPATMGCState);
    Assert(pPATMGCState->fPIF || fPatchCode);

# ifdef VBOX_WITH_REM
    /* Flush the recompilers translation block cache as the guest seems to be modifying instructions. */
    /** @todo a bit overkill?? */
    REMFlushTBs(pVM);
# endif

    /*
     * When patch code is executing instructions that must complete, then we
     * must *never* interrupt it.
     */
    if (!pPATMGCState->fPIF && fPatchCode)
    {
        Log(("csamRCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", CPUMGetGuestRIP(pVCpu)));
        return VINF_PGM_HANDLER_DO_DEFAULT;
    }

    Log(("csamRCCodePageWriteHandler: code page write at %RGv (cpl=%d)\n", GCPtr, cpl));

    /*
     * If user code is modifying one of our monitored pages, then we can safely
     * write to it as it's no longer being used for supervisor code.
     */
    if (cpl != 3)
    {
        VBOXSTRICTRC rcStrict = PATMRCHandleWriteToPatchPage(pVM, NULL /* pRegFrame = no interpret */, (RTRCPTR)GCPtr, cbBuf);
        if (   rcStrict == VINF_PGM_HANDLER_DO_DEFAULT
            || rcStrict == VINF_SUCCESS)
            return rcStrict;
        if (rcStrict == VINF_EM_RAW_EMULATE_INSTR)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatDangerousWrite);
            return VINF_EM_RAW_EMULATE_INSTR;
        }
        Assert(rcStrict == VERR_PATCH_NOT_FOUND);
    }

    /*
     * Schedule ring-3 activity.
     * Note that GCPtr might be a different address in case of aliases.  So,
     * take down both alternatives.
     */
    VMCPU_FF_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
    pVM->csam.s.pvDirtyBasePage[pVM->csam.s.cDirtyPages]  = (RTRCPTR)GCPtr;
    pVM->csam.s.pvDirtyFaultPage[pVM->csam.s.cDirtyPages] = (RTRCPTR)GCPtr;
    if (++pVM->csam.s.cDirtyPages == CSAM_MAX_DIRTY_PAGES)
        return VINF_CSAM_PENDING_ACTION;

    /*
     * Continue with the write. The VM_FF_CSAM_FLUSH_DIRTY_PAGE handler will reset it to readonly again.
     */
    Log(("csamRCCodePageWriteHandler: enabled r/w for page %RGv (%RGv)\n", GCPtr, GCPtr));
    STAM_COUNTER_INC(&pVM->csam.s.StatCodePageModified);
    return VINF_PGM_HANDLER_DO_DEFAULT;
#endif
}
/**
 * Handles the KVM hypercall.
 *
 * @returns VBox status code.
 * @param   pVCpu           Pointer to the VMCPU.
 * @param   pCtx            Pointer to the guest-CPU context.
 */
VMM_INT_DECL(int) gimKvmHypercall(PVMCPU pVCpu, PCPUMCTX pCtx)
{
    /*
     * Get the hypercall operation and arguments.
     */
    bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx);
    uint64_t uHyperOp       = pCtx->rax;
    uint64_t uHyperArg0     = pCtx->rbx;
    uint64_t uHyperArg1     = pCtx->rcx;
    uint64_t uHyperArg2     = pCtx->rdi;
    uint64_t uHyperArg3     = pCtx->rsi;
    uint64_t uHyperRet      = KVM_HYPERCALL_RET_ENOSYS;
    uint64_t uAndMask       = UINT64_C(0xffffffffffffffff);
    if (!fIs64BitMode)
    {
        uAndMask    = UINT64_C(0xffffffff);
        uHyperOp   &= UINT64_C(0xffffffff);
        uHyperArg0 &= UINT64_C(0xffffffff);
        uHyperArg1 &= UINT64_C(0xffffffff);
        uHyperArg2 &= UINT64_C(0xffffffff);
        uHyperArg3 &= UINT64_C(0xffffffff);
        uHyperRet  &= UINT64_C(0xffffffff);
    }

    /*
     * Verify that guest ring-0 is the one making the hypercall.
     */
    uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
    if (uCpl)
    {
        pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask;
        return VINF_SUCCESS;
    }

    /*
     * Do the work.
     */
    switch (uHyperOp)
    {
        case KVM_HYPERCALL_OP_KICK_CPU:
        {
            PVM pVM = pVCpu->CTX_SUFF(pVM);
            if (uHyperArg1 < pVM->cCpus)
            {
                PVMCPU pVCpuTarget = &pVM->aCpus[uHyperArg1];   /** ASSUMES pVCpu index == ApicId of the VCPU. */
                VMCPU_FF_SET(pVCpuTarget, VMCPU_FF_UNHALT);
#ifdef IN_RING0
                /*
                 * We might be here with preemption disabled or enabled (i.e. depending on thread-context hooks
                 * being used), so don't try obtaining the GVMMR0 used lock here. See @bugref{7270#c148}.
                 */
                GVMMR0SchedWakeUpEx(pVM, pVCpuTarget->idCpu, false /* fTakeUsedLock */);
#elif defined(IN_RING3)
                int rc2 = SUPR3CallVMMR0(pVM->pVMR0, pVCpuTarget->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL);
                AssertRC(rc2);
#elif defined(IN_RC)
                /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
                Assert(pVM->cCpus == 1);
#endif
                uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
            }
            break;
        }

        case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ:
            uHyperRet = KVM_HYPERCALL_RET_SUCCESS;
            break;

        default:
            break;
    }

    /*
     * Place the result in rax/eax.
     */
    pCtx->rax = uHyperRet & uAndMask;
    return VINF_SUCCESS;
}
/**
 * Exception handler for #UD.
 *
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   pCtx        Pointer to the guest-CPU context.
 * @param   pDis        Pointer to the disassembled instruction state at RIP.
 *                      Optional, can be NULL.
 */
VMM_INT_DECL(int) gimKvmXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PDISCPUSTATE pDis)
{
    /*
     * If we didn't ask for #UD to be trapped, bail.
     */
    PVM     pVM  = pVCpu->CTX_SUFF(pVM);
    PGIMKVM pKvm = &pVM->gim.s.u.Kvm;
    if (RT_UNLIKELY(!pVM->gim.s.u.Kvm.fTrapXcptUD))
        return VERR_GIM_OPERATION_FAILED;

    /*
     * Make sure guest ring-0 is the one making the hypercall.
     */
    if (CPUMGetGuestCPL(pVCpu))
        return VERR_GIM_HYPERCALL_ACCESS_DENIED;

    int rc = VINF_SUCCESS;
    if (!pDis)
    {
        /*
         * Disassemble the instruction at RIP to figure out if it's the Intel VMCALL instruction
         * or the AMD VMMCALL instruction and if so, handle it as a hypercall.
         */
        DISCPUSTATE Dis;
        rc = EMInterpretDisasCurrent(pVM, pVCpu, &Dis, NULL /* pcbInstr */);
        pDis = &Dis;
    }

    if (RT_SUCCESS(rc))
    {
        /*
         * Patch the instruction to so we don't have to spend time disassembling it each time.
         * Makes sense only for HM as with raw-mode we will be getting a #UD regardless.
         */
        if (   pDis->pCurInstr->uOpcode == OP_VMCALL
            || pDis->pCurInstr->uOpcode == OP_VMMCALL)
        {
            if (   pDis->pCurInstr->uOpcode != pKvm->uOpCodeNative
                && HMIsEnabled(pVM))
            {
                uint8_t abHypercall[3];
                size_t  cbWritten = 0;
                rc = VMMPatchHypercall(pVM, &abHypercall, sizeof(abHypercall), &cbWritten);
                AssertRC(rc);
                Assert(sizeof(abHypercall) == pDis->cbInstr);
                Assert(sizeof(abHypercall) == cbWritten);

                rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, &abHypercall, sizeof(abHypercall));
            }

            /*
             * Perform the hypercall and update RIP.
             *
             * For HM, we can simply resume guest execution without performing the hypercall now and
             * do it on the next VMCALL/VMMCALL exit handler on the patched instruction.
             *
             * For raw-mode we need to do this now anyway. So we do it here regardless with an added
             * advantage is that it saves one world-switch for the HM case.
             */
            if (RT_SUCCESS(rc))
            {
                int rc2 = gimKvmHypercall(pVCpu, pCtx);
                AssertRC(rc2);
                pCtx->rip += pDis->cbInstr;
            }
            return rc;
        }
    }

    return VERR_GIM_OPERATION_FAILED;
}