Exemple #1
0
/**
 * Internal - statistics only.
 */
DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
{
#ifdef VBOX_WITH_STATISTICS
    switch (cb)
    {
        case 1:
            STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
            break;
        case 2:
            STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
            break;
        case 4:
            STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
            break;
        case 8:
            STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
            break;
        default:
            /* No way. */
            AssertMsgFailed(("Invalid data length %d\n", cb));
            break;
    }
#else
    NOREF(pVM); NOREF(cb);
#endif
}
Exemple #2
0
/**
 * Executes one (or perhaps a few more) IO instruction(s).
 *
 * @returns VBox status code suitable for EM.
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure.
 */
static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
    PCPUMCTX pCtx = pVCpu->em.s.pCtx;

    STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);

    /*
     * Try to restart the io instruction that was refused in ring-0.
     */
    VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
    if (IOM_SUCCESS(rcStrict))
    {
        STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
        STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
        return VBOXSTRICTRC_TODO(rcStrict);     /* rip already updated. */
    }
    AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
                    RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));

    /*
     * Hand it over to the interpreter.
     */
    rcStrict = IEMExecOne(pVCpu);
    LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
    STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
    return VBOXSTRICTRC_TODO(rcStrict);
}
selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    NOREF(enmAccessType);
    Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf));
    NOREF(GCPtr);
    NOREF(cbBuf);
    NOREF(pvPtr);
    NOREF(pvBuf);
    NOREF(enmOrigin);
    NOREF(pvUser);

#  ifdef IN_RING3
    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    return VINF_PGM_HANDLER_DO_DEFAULT;

#  else  /* IN_RC: */
    /*
     * Execute the write, doing necessary pre and post shadow GDT checks.
     */
    PCPUMCTX pCtx        = CPUMQueryGuestCtxPtr(pVCpu);
    uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr;
    selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
    memcpy(pvBuf, pvPtr, cbBuf);
    VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx);
    if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT))
        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
    else
        STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
    return rcStrict;
#  endif
}
/**
 * Executes one (or perhaps a few more) IO instruction(s).
 *
 * @returns VBox status code suitable for EM.
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure.
 */
static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
{
    RT_NOREF(pVM);
    STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);

    VBOXSTRICTRC rcStrict;
    uint32_t     idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
    RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
    if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
    {
        /*
         * Hand it over to the interpreter.
         */
        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
        rcStrict = IEMExecOne(pVCpu);
        LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    }
    else
    {
        RT_UNTRUSTED_VALIDATED_FENCE();
        CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
        rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
        LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
        STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
    }

    STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
    STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
    return VBOXSTRICTRC_TODO(rcStrict);
}
/*
 * Try and write() to the socket, whatever doesn't get written
 * append to the buffer... for a host with a fast net connection,
 * this prevents an unnecessary copy of the data
 * (the socket is non-blocking, so we won't hang)
 */
void
sbappend(PNATState pData, struct socket *so, struct mbuf *m)
{
    int ret = 0;
    int mlen = 0;

    STAM_PROFILE_START(&pData->StatIOSBAppend_pf, a);
    LogFlow(("sbappend: so = %lx, m = %lx, m->m_len = %d\n", (long)so, (long)m, m ? m->m_len : 0));

    STAM_COUNTER_INC(&pData->StatIOSBAppend);
    /* Shouldn't happen, but...  e.g. foreign host closes connection */
    mlen = m_length(m, NULL);
    if (mlen <= 0)
    {
        STAM_COUNTER_INC(&pData->StatIOSBAppend_zm);
        goto done;
    }

    /*
     * If there is urgent data, call sosendoob
     * if not all was sent, sowrite will take care of the rest
     * (The rest of this function is just an optimisation)
     */
    if (so->so_urgc)
    {
        sbappendsb(pData, &so->so_rcv, m);
        m_freem(pData, m);
        sosendoob(so);
        return;
    }

    /*
     * We only write if there's nothing in the buffer,
     * otherwise it'll arrive out of order, and hence corrupt
     */
    if (so->so_rcv.sb_cc == 0)
    {
        caddr_t buf = NULL;

        if (m->m_next)
        {
            buf = RTMemAlloc(mlen);
            if (buf == NULL)
            {
                ret = 0;
                goto no_sent;
            }
            m_copydata(m, 0, mlen, buf);
        }
        else
            buf = mtod(m, char *);

        ret = send(so->s, buf, mlen, 0);

        if (m->m_next)
            RTMemFree(buf);
    }
Exemple #6
0
/**
 * Adds branch pair to the lookup cache of the particular branch instruction
 *
 * @returns VBox status
 * @param   pVM                 Pointer to the VM.
 * @param   pJumpTableGC        Pointer to branch instruction lookup cache
 * @param   pBranchTarget       Original branch target
 * @param   pRelBranchPatch     Relative duplicated function address
 */
int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
{
    PPATCHJUMPTABLE pJumpTable;

    Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));

    AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);

#ifdef IN_RC
    pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
#else
    pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
#endif
    Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
    if (pJumpTable->cAddresses < pJumpTable->nrSlots)
    {
        uint32_t i;

        for (i=0;i<pJumpTable->nrSlots;i++)
        {
            if (pJumpTable->Slot[i].pInstrGC == 0)
            {
                pJumpTable->Slot[i].pInstrGC    = pBranchTarget;
                /* Relative address - eases relocation */
                pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
                pJumpTable->cAddresses++;
                break;
            }
        }
        AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
#ifdef VBOX_WITH_STATISTICS
        STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
        if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
            pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
#endif
    }
    else
    {
        /* Replace an old entry. */
        /** @todo replacement strategy isn't really bright. change to something better if required. */
        Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
        Assert((pJumpTable->nrSlots & 1) == 0);

        pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
        pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC    = pBranchTarget;
        /* Relative address - eases relocation */
        pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;

        pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);

        STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
    }

    return VINF_SUCCESS;
}
/**
 * Deals with the contended case in ring-3 and ring-0.
 *
 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
 * @param   pCritSect           The critsect.
 * @param   hNativeSelf         The native thread handle.
 */
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
    /*
     * Start waiting.
     */
    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
# ifdef IN_RING3
    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
# else
    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
# endif

    /*
     * The wait loop.
     */
    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
# ifdef IN_RING3
#  ifdef PDMCRITSECT_STRICT
    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    if (RT_FAILURE(rc2))
        return rc2;
#  else
    RTTHREAD        hThreadSelf = RTThreadSelf();
#  endif
# endif
    for (;;)
    {
# ifdef PDMCRITSECT_STRICT
        int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
                                                      !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
                                                      RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
        if (RT_FAILURE(rc9))
            return rc9;
# elif defined(IN_RING3)
        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
# endif
        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
# ifdef IN_RING3
        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
# endif

        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
            return VERR_SEM_DESTROYED;
        if (rc == VINF_SUCCESS)
            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
        AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
    }
    /* won't get here */
}
/**
 * Record why we refused to use offsetted TSC.
 *
 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
 *
 * @param   pVM         The VM handle.
 * @param   pVCpu       The current CPU.
 */
DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
{

    /* Sample the reason for refusing. */
    if (!pVM->tm.s.fMaybeUseOffsettedHostTSC)
       STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
    else if (!pVCpu->tm.s.fTSCTicking)
       STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
    else if (!pVM->tm.s.fTSCUseRealTSC)
    {
        if (pVM->tm.s.fVirtualSyncCatchUp)
        {
           if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
           else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
           else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
           else
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
        }
        else if (!pVM->tm.s.fVirtualSyncTicking)
           STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
        else if (pVM->tm.s.fVirtualWarpDrive)
           STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
    }
}
/**
 * Record why we refused to use offsetted TSC.
 *
 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
 *
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
 */
DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu)
{
    /* Sample the reason for refusing. */
    if (pVM->tm.s.enmTSCMode != TMTSCMODE_DYNAMIC)
       STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed);
    else if (!pVCpu->tm.s.fTSCTicking)
       STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking);
    else if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
    {
        if (pVM->tm.s.fVirtualSyncCatchUp)
        {
           if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010);
           else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025);
           else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100)
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100);
           else
               STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther);
        }
        else if (!pVM->tm.s.fVirtualSyncTicking)
           STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking);
        else if (pVM->tm.s.fVirtualWarpDrive)
           STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp);
    }
}
/**
 * \#PF Virtual Handler callback for Guest write access to the Guest's own current IDT.
 *
 * @returns VBox status code (appropriate for trap handling and GC return).
 * @param   pVM         Pointer to the VM.
 * @param   uErrorCode   CPU Error code.
 * @param   pRegFrame   Trap register frame.
 * @param   pvFault     The fault address (cr2).
 * @param   pvRange     The base address of the handled virtual range.
 * @param   offRange    The offset of the access into this range.
 *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
 */
VMMRCDECL(int) trpmRCGuestIDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
    PVMCPU      pVCpu = VMMGetCpu0(pVM);
    uint16_t    cbIDT;
    RTGCPTR     GCPtrIDT    = (RTGCPTR)CPUMGetGuestIDTR(pVCpu, &cbIDT);
#ifdef VBOX_STRICT
    RTGCPTR     GCPtrIDTEnd = (RTGCPTR)((RTGCUINTPTR)GCPtrIDT + cbIDT + 1);
#endif
    uint32_t    iGate       = ((RTGCUINTPTR)pvFault - (RTGCUINTPTR)GCPtrIDT)/sizeof(VBOXIDTE);

    AssertMsg(offRange < (uint32_t)cbIDT+1, ("pvFault=%RGv GCPtrIDT=%RGv-%RGv pvRange=%RGv\n", pvFault, GCPtrIDT, GCPtrIDTEnd, pvRange));
    Assert((RTGCPTR)(RTRCUINTPTR)pvRange == GCPtrIDT);
    NOREF(uErrorCode);

#if 0
    /* Note! this causes problems in Windows XP as instructions following the update can be dangerous (str eax has been seen) */
    /* Note! not going back to ring 3 could make the code scanner miss them. */
    /* Check if we can handle the write here. */
    if (     iGate != 3                                         /* Gate 3 is handled differently; could do it here as well, but let ring 3 handle this case for now. */
        &&  !ASMBitTest(&pVM->trpm.s.au32IdtPatched[0], iGate)) /* Passthru gates need special attention too. */
    {
        uint32_t cb;
        int rc = EMInterpretInstructionEx(pVM, pVCpu, pRegFrame, pvFault, &cb);
        if (RT_SUCCESS(rc) && cb)
        {
            uint32_t iGate1 = (offRange + cb - 1)/sizeof(VBOXIDTE);

            Log(("trpmRCGuestIDTWriteHandler: write to gate %x (%x) offset %x cb=%d\n", iGate, iGate1, offRange, cb));

            trpmClearGuestTrapHandler(pVM, iGate);
            if (iGate != iGate1)
                trpmClearGuestTrapHandler(pVM, iGate1);

            STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTHandled);
            return VINF_SUCCESS;
        }
    }
#else
    NOREF(iGate);
#endif

    Log(("trpmRCGuestIDTWriteHandler: eip=%RGv write to gate %x offset %x\n", pRegFrame->eip, iGate, offRange));

    /** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */
    VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);

    STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault);
    return VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT;
}
/**
 * Read the current CPU timestamp counter.
 *
 * @returns Gets the CPU tsc.
 * @param   pVCpu       The VMCPU to operate on.
 */
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
    uint64_t u64;

    if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
    {
        PVM pVM = pVCpu->CTX_SUFF(pVM);
        if (pVM->tm.s.fTSCVirtualized)
        {
            if (pVM->tm.s.fTSCUseRealTSC)
                u64 = ASMReadTSC();
            else
                u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
            u64 -= pVCpu->tm.s.offTSCRawSrc;
        }
        else
            u64 = ASMReadTSC();

        /* Never return a value lower than what the guest has already seen. */
        if (u64 < pVCpu->tm.s.u64TSCLastSeen)
        {
            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
            pVCpu->tm.s.u64TSCLastSeen += 64;   /* @todo choose a good increment here */
            u64 = pVCpu->tm.s.u64TSCLastSeen;
        }
    }
    else
        u64 = pVCpu->tm.s.u64TSC;
    return u64;
}
Exemple #12
0
/* raise irq to CPU if necessary. must be called every time the active
   irq may change */
static int pic_update_irq(PDEVPIC pThis)
{
    PicState *pics = &pThis->aPics[0];
    int irq2, irq;

    /* first look at slave pic */
    irq2 = pic_get_irq(&pics[1]);
    Log(("pic_update_irq irq2=%d\n", irq2));
    if (irq2 >= 0) {
        /* if irq request by slave pic, signal master PIC */
        pic_set_irq1(&pics[0], 2, 1);
    } else {
        /* If not, clear the IR on the master PIC. */
        pic_set_irq1(&pics[0], 2, 0);
    }
    /* look at requested irq */
    irq = pic_get_irq(&pics[0]);
    if (irq >= 0)
    {
        /* If irq 2 is pending on the master pic, then there must be one pending on the slave pic too! Otherwise we'll get
         * spurious slave interrupts in picGetInterrupt.
         */
        if (irq != 2 || irq2 != -1)
        {
#if defined(DEBUG_PIC)
            int i;
            for(i = 0; i < 2; i++) {
                Log(("pic%d: imr=%x irr=%x padd=%d\n",
                    i, pics[i].imr, pics[i].irr,
                    pics[i].priority_add));
            }
            Log(("pic: cpu_interrupt\n"));
#endif
            pThis->CTX_SUFF(pPicHlp)->pfnSetInterruptFF(pThis->CTX_SUFF(pDevIns));
        }
        else
        {
            STAM_COUNTER_INC(&pThis->StatClearedActiveIRQ2);
            Log(("pic_update_irq: irq 2 is active, but no interrupt is pending on the slave pic!!\n"));
            /* Clear it here, so lower priority interrupts can still be dispatched. */

            /* if this was the only pending irq, then we must clear the interrupt ff flag */
            pThis->CTX_SUFF(pPicHlp)->pfnClearInterruptFF(pThis->CTX_SUFF(pDevIns));

            /** @note Is this correct? */
            pics[0].irr &= ~(1 << 2);

            /* Call ourselves again just in case other interrupts are pending */
            return pic_update_irq(pThis);
        }
    }
    else
    {
        Log(("pic_update_irq: no interrupt is pending!!\n"));

        /* we must clear the interrupt ff flag */
        pThis->CTX_SUFF(pPicHlp)->pfnClearInterruptFF(pThis->CTX_SUFF(pDevIns));
    }
    return VINF_SUCCESS;
}
Exemple #13
0
/**
 * Read the current CPU timestamp counter.
 *
 * @returns Gets the CPU tsc.
 * @param   pVCpu           The cross context virtual CPU structure.
 * @param   fCheckTimers    Whether to check timers.
 */
DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers)
{
    uint64_t u64;

    if (RT_LIKELY(pVCpu->tm.s.fTSCTicking))
    {
        PVM pVM = pVCpu->CTX_SUFF(pVM);
        if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
            u64 = SUPReadTsc();
        else
            u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
        u64 -= pVCpu->tm.s.offTSCRawSrc;

        /* Always return a value higher than what the guest has already seen. */
        if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen))
            pVCpu->tm.s.u64TSCLastSeen = u64;
        else
        {
            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
            pVCpu->tm.s.u64TSCLastSeen += 64;   /** @todo choose a good increment here */
            u64 = pVCpu->tm.s.u64TSCLastSeen;
        }
    }
    else
        u64 = pVCpu->tm.s.u64TSC;
    return u64;
}
Exemple #14
0
/**
 * Resumes the CPU timestamp counter ticking.
 *
 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
 * @param   pVM     The cross context VM structure.
 * @param   pVCpu   The cross context virtual CPU structure.
 */
int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
{
    if (!pVCpu->tm.s.fTSCTicking)
    {
        /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
        pVCpu->tm.s.fTSCTicking = true;
        uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
        AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
        if (c == 1)
        {
            /* The first VCPU to resume. */
            uint64_t    offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;

            STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);

            /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
            if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
                pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
            else
                pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
                                         - pVM->tm.s.u64LastPausedTSC;

            /* Calculate the offset for other VCPUs to use. */
            pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
        }
        else
        {
            /* All other VCPUs (if any). */
            pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
        }
    }
    return VINF_SUCCESS;
}
/**
 * Converts a pool address to a physical address.
 * The specified allocation type must match with the address.
 *
 * @returns Physical address.
 * @returns NIL_RTHCPHYS if not found or eType is not matching.
 * @param   pPool   Pointer to the page pool.
 * @param   pv      The address to convert.
 * @thread  The Emulation Thread.
 */
RTHCPHYS mmPagePoolPtr2Phys(PMMPAGEPOOL pPool, void *pv)
{
#ifdef IN_RING3
    VM_ASSERT_EMT(pPool->pVM);
#endif
    /*
     * Lookup the virtual address.
     */
    PMMPPLOOKUPHCPTR pLookup = (PMMPPLOOKUPHCPTR)RTAvlPVGetBestFit(&pPool->pLookupVirt, pv, false);
    if (pLookup)
    {
        unsigned iPage = ((char *)pv - (char *)pLookup->pSubPool->pvPages) >> PAGE_SHIFT;
        if (iPage < pLookup->pSubPool->cPages)
        {
            /*
             * Convert the virtual address to a physical address.
             */
            STAM_COUNTER_INC(&pPool->cToPhysCalls);
            AssertMsg(     pLookup->pSubPool->paPhysPages[iPage].Phys
                      &&   !(pLookup->pSubPool->paPhysPages[iPage].Phys & PAGE_OFFSET_MASK),
                      ("Phys=%#x\n", pLookup->pSubPool->paPhysPages[iPage].Phys));
            AssertMsg((uintptr_t)pLookup->pSubPool == pLookup->pSubPool->paPhysPages[iPage].uReserved,
                      ("pSubPool=%p uReserved=%p\n", pLookup->pSubPool, pLookup->pSubPool->paPhysPages[iPage].uReserved));
            return pLookup->pSubPool->paPhysPages[iPage].Phys + ((uintptr_t)pv & PAGE_OFFSET_MASK);
        }
    }
    return NIL_RTHCPHYS;
}
Exemple #16
0
/*
 * Copy the data from m into sb
 * The caller is responsible to make sure there's enough room
 */
void
sbappendsb(PNATState pData, struct sbuf *sb, struct mbuf *m)
{
    int len, n,  nn;
#ifndef VBOX_WITH_STATISTICS
    NOREF(pData);
#endif

    len = m_length(m, NULL);

    STAM_COUNTER_INC(&pData->StatIOSBAppendSB);
    if (sb->sb_wptr < sb->sb_rptr)
    {
        STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_l_r);
        n = sb->sb_rptr - sb->sb_wptr;
        if (n > len)
            n = len;
        m_copydata(m, 0, n, sb->sb_wptr);
    }
    else
    {
        STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_ge_r);
        /* Do the right edge first */
        n = sb->sb_data + sb->sb_datalen - sb->sb_wptr;
        if (n > len)
            n = len;
        m_copydata(m, 0, n, sb->sb_wptr);
        len -= n;
        if (len)
        {
            /* Now the left edge */
            nn = sb->sb_rptr - sb->sb_data;
            if (nn > len)
                nn = len;
            m_copydata(m, n, nn, sb->sb_data);
            n += nn;
        }
    }

    sb->sb_cc += n;
    sb->sb_wptr += n;
    if (sb->sb_wptr >= sb->sb_data + sb->sb_datalen)
    {
        STAM_COUNTER_INC(&pData->StatIOSBAppendSB_w_alter);
        sb->sb_wptr -= sb->sb_datalen;
    }
}
Exemple #17
0
/**
 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
 *
 * @returns VBox status code (appropriate for trap handling and GC return).
 * @param   pVM         VM Handle.
 * @param   uErrorCode   CPU Error code.
 * @param   pRegFrame   Trap register frame.
 * @param   pvFault     The fault address (cr2).
 * @param   pvRange     The base address of the handled virtual range.
 * @param   offRange    The offset of the access into this range.
 *                      (If it's a EIP range this is the EIP, if not it's pvFault.)
 */
VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
{
    /** @todo To be implemented. */
    ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));

    VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_SELM_SYNC_LDT);
    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
}
Exemple #18
0
/**
 * Frees a page from the page pool.
 *
 * @param   pPool   Pointer to the page pool.
 * @param   pv      Pointer to the page to free.
 *                  I.e. pointer returned by mmR3PagePoolAlloc().
 * @thread  The Emulation Thread.
 */
DECLINLINE(void) mmR3PagePoolFree(PMMPAGEPOOL pPool, void *pv)
{
    VM_ASSERT_EMT(pPool->pVM);
    STAM_COUNTER_INC(&pPool->cFreeCalls);

    /*
     * Lookup the virtual address.
     */
    PMMPPLOOKUPHCPTR pLookup = (PMMPPLOOKUPHCPTR)RTAvlPVGetBestFit(&pPool->pLookupVirt, pv, false);
    if (    !pLookup
        ||  (uint8_t *)pv >= (uint8_t *)pLookup->pSubPool->pvPages + (pLookup->pSubPool->cPages << PAGE_SHIFT)
        )
    {
        STAM_COUNTER_INC(&pPool->cErrors);
        AssertMsgFailed(("invalid pointer %p\n", pv));
        return;
    }

    /*
     * Free the page.
     */
    PMMPAGESUBPOOL  pSubPool = pLookup->pSubPool;
    /* clear bitmap bit */
    const unsigned  iPage = ((uint8_t *)pv - (uint8_t *)pSubPool->pvPages) >> PAGE_SHIFT;
#ifdef USE_INLINE_ASM_BIT_OPS
    Assert(ASMBitTest(pSubPool->auBitmap, iPage));
    ASMBitClear(pSubPool->auBitmap, iPage);
#else
    unsigned    iBit   = iPage % (sizeof(pSubPool->auBitmap[0]) * 8);
    unsigned    iIndex = iPage / (sizeof(pSubPool->auBitmap[0]) * 8);
    pSubPool->auBitmap[iIndex] &= ~(1 << iBit);
#endif
    /* update stats. */
    pSubPool->cPagesFree++;
#ifdef VBOX_WITH_STATISTICS
    pPool->cFreePages++;
#endif
    if (pSubPool->cPagesFree == 1)
    {
        pSubPool->pNextFree = pPool->pHeadFree;
        pPool->pHeadFree = pSubPool;
    }
}
Exemple #19
0
/** @note if an interrupt line state changes from unmasked to masked, then it must be deactivated when currently pending! */
static void pic_update_imr(PDEVPIC pThis, PPICSTATE pPic, uint8_t val)
{
    int       irq, intno;
    PPICSTATE pActivePIC;

    /* Query the current pending irq, if any. */
    pActivePIC = &pThis->aPics[0];
    intno = irq = pic_get_irq(pActivePIC);
    if (irq == 2)
    {
        pActivePIC = &pThis->aPics[1];
        irq = pic_get_irq(pActivePIC);
        intno = irq + 8;
    }

    /* Update IMR */
    Log(("pic_update_imr: pic%u %#x -> %#x\n", pPic->idxPic, pPic->imr, val));
    pPic->imr = val;

    /* If an interrupt is pending and now masked, then clear the FF flag. */
    if (    irq >= 0
        &&  ((1 << irq) & ~pActivePIC->imr) == 0)
    {
        Log(("pic_update_imr: pic0: elcr=%x last_irr=%x irr=%x imr=%x isr=%x irq_base=%x\n",
            pThis->aPics[0].elcr, pThis->aPics[0].last_irr, pThis->aPics[0].irr, pThis->aPics[0].imr, pThis->aPics[0].isr, pThis->aPics[0].irq_base));
        Log(("pic_update_imr: pic1: elcr=%x last_irr=%x irr=%x imr=%x isr=%x irq_base=%x\n",
            pThis->aPics[1].elcr, pThis->aPics[1].last_irr, pThis->aPics[1].irr, pThis->aPics[1].imr, pThis->aPics[1].isr, pThis->aPics[1].irq_base));

        /* Clear pending IRQ 2 on master controller in case of slave interrupt. */
        /** @todo Is this correct? */
        if (intno > 7)
        {
            pThis->aPics[0].irr &= ~(1 << 2);
            STAM_COUNTER_INC(&pThis->StatClearedActiveSlaveIRQ);
        }
        else
            STAM_COUNTER_INC(&pThis->StatClearedActiveMasterIRQ);

        Log(("pic_update_imr: clear pending interrupt %d\n", intno));
        pThis->CTX_SUFF(pPicHlp)->pfnClearInterruptFF(pThis->CTX_SUFF(pDevIns));
    }
}
Exemple #20
0
/**
 * IN <AL|AX|EAX>, <DX|imm16>
 *
 * @returns Strict VBox status code. Informational status codes other than the one documented
 *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
 * @retval  VINF_SUCCESS                Success.
 * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
 *                                      status code must be passed on to EM.
 * @retval  VINF_IOM_R3_IOPORT_READ     Defer the read to ring-3. (R0/GC only)
 * @retval  VINF_EM_RAW_GUEST_TRAP      The exception was left pending. (TRPMRaiseXcptErr)
 * @retval  VINF_TRPM_XCPT_DISPATCHED   The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
 * @retval  VINF_EM_RESCHEDULE_REM      The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
 *
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
 * @param   pRegFrame   Pointer to CPUMCTXCORE guest registers structure.
 * @param   pCpu        Disassembler CPU state.
 */
static VBOXSTRICTRC iomRCInterpretIN(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
{
    STAM_COUNTER_INC(&pVM->iom.s.StatInstIn);
    Assert(pCpu->Param2.fUse & (DISUSE_IMMEDIATE8 | DISUSE_REG_GEN16));
    uint16_t u16Port = pCpu->Param2.fUse & DISUSE_REG_GEN16 ? pRegFrame->dx : (uint16_t)pCpu->Param2.uValue;

    Assert(pCpu->Param1.fUse & (DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8));
    uint8_t cbValue = pCpu->Param1.fUse & DISUSE_REG_GEN32 ? 4 : pCpu->Param1.fUse & DISUSE_REG_GEN16 ? 2 : 1;

    return IEMExecDecodedIn(pVCpu, pCpu->cbInstr, u16Port, cbValue);
}
Exemple #21
0
/**
 * Check if we've scanned this instruction before. If true, then we can emulate
 * it instead of returning to ring 3.
 *
 * Using a simple array here as there are generally few mov crx instructions and
 * tree lookup is likely to be more expensive. (as it would also have to be offset based)
 *
 * @returns boolean
 * @param   pVM         Pointer to the VM.
 * @param   GCPtr       GC pointer of page table entry
 */
VMM_INT_DECL(bool) CSAMIsKnownDangerousInstr(PVM pVM, RTRCUINTPTR GCPtr)
{
    for (uint32_t i=0;i<pVM->csam.s.cDangerousInstr;i++)
    {
        if (pVM->csam.s.aDangerousInstr[i] == (RTRCPTR)GCPtr)
        {
            STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheHit);
            return true;
        }
    }
    /* Record that we're about to process it in ring 3. */
    pVM->csam.s.aDangerousInstr[pVM->csam.s.iDangerousInstr++] = (RTRCPTR)GCPtr;
    pVM->csam.s.iDangerousInstr &= CSAM_MAX_DANGR_INSTR_MASK;

    if (++pVM->csam.s.cDangerousInstr > CSAM_MAX_DANGR_INSTR)
        pVM->csam.s.cDangerousInstr = CSAM_MAX_DANGR_INSTR;

    STAM_COUNTER_INC(&pVM->csam.s.StatInstrCacheMiss);
    return false;
}
Exemple #22
0
/**
 * Reads an I/O port register.
 *
 * @returns Strict VBox status code. Informational status codes other than the one documented
 *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
 * @retval  VINF_SUCCESS                Success.
 * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
 *                                      status code must be passed on to EM.
 * @retval  VINF_IOM_R3_IOPORT_READ     Defer the read to ring-3. (R0/GC only)
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       Pointer to the virtual CPU structure of the caller.
 * @param   Port        The port to read.
 * @param   pu32Value   Where to store the value read.
 * @param   cbValue     The size of the register to read in bytes. 1, 2 or 4 bytes.
 */
VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
{
/** @todo should initialize *pu32Value here because it can happen that some
 *        handle is buggy and doesn't handle all cases. */
    /* Take the IOM lock before performing any device I/O. */
    int rc2 = IOM_LOCK_SHARED(pVM);
#ifndef IN_RING3
    if (rc2 == VERR_SEM_BUSY)
        return VINF_IOM_R3_IOPORT_READ;
#endif
    AssertRC(rc2);
#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
    IEMNotifyIOPortRead(pVM, Port, cbValue);
#endif

#ifdef VBOX_WITH_STATISTICS
    /*
     * Get the statistics record.
     */
    PIOMIOPORTSTATS  pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastRead);
    if (!pStats || pStats->Core.Key != Port)
    {
        pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port);
        if (pStats)
            pVCpu->iom.s.CTX_SUFF(pStatsLastRead) = pStats;
    }
#endif

    /*
     * Get handler for current context.
     */
    CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastRead);
    if (    !pRange
        ||   (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts)
    {
        pRange = iomIOPortGetRange(pVM, Port);
        if (pRange)
            pVCpu->iom.s.CTX_SUFF(pRangeLastRead) = pRange;
    }
    MMHYPER_RC_ASSERT_RCPTR(pVM, pRange);
    if (pRange)
    {
        /*
         * Found a range, get the data in case we leave the IOM lock.
         */
        PFNIOMIOPORTIN  pfnInCallback = pRange->pfnInCallback;
#ifndef IN_RING3
        if (!pfnInCallback)
        {
            STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); });
            IOM_UNLOCK_SHARED(pVM);
            return VINF_IOM_R3_IOPORT_READ;
        }
Exemple #23
0
/**
 * Mark a page as scanned/not scanned
 *
 * @note: we always mark it as scanned, even if we haven't completely done so
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pPage       GC page address (not necessarily aligned)
 * @param   fScanned    Mark as scanned or not scanned
 *
 */
VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
{
    int pgdir, bit;
    uintptr_t page;

#ifdef LOG_ENABLED
    if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
       Log(("CSAMMarkPage %RRv\n", pPage));
#endif

    if (!CSAMIsEnabled(pVM))
        return VINF_SUCCESS;
    Assert(!HMIsEnabled(pVM));

    page  = (uintptr_t)pPage;
    pgdir = page >> X86_PAGE_4M_SHIFT;
    bit   = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;

    Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
    Assert(bit < PAGE_SIZE);

    if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
    {
        STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
        int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
        if (RT_FAILURE(rc))
        {
            Log(("MMHyperAlloc failed with %Rrc\n", rc));
            return rc;
        }
#ifdef IN_RC
        pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
        if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
            return rc;
        }
#else
        pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
        if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
            return rc;
        }
#endif
    }
    if(fScanned)
        ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
    else
        ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);

    return VINF_SUCCESS;
}
/**
 * Resumes the CPU timestamp counter ticking.
 *
 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
 * @param   pVM     The cross context VM structure.
 * @param   pVCpu   The cross context virtual CPU structure.
 */
int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu)
{
    if (!pVCpu->tm.s.fTSCTicking)
    {
        /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */
        pVCpu->tm.s.fTSCTicking = true;
        uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking);
        AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
        if (c == 1)
        {
            /* The first VCPU to resume. */
            uint64_t    offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc;

            STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume);

            /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
            switch (pVM->tm.s.enmTSCMode)
            {
                case TMTSCMODE_REAL_TSC_OFFSET:
                    pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
                    break;
                case TMTSCMODE_VIRT_TSC_EMULATED:
                case TMTSCMODE_DYNAMIC:
                    pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
                                             - pVM->tm.s.u64LastPausedTSC;
                    break;
                case TMTSCMODE_NATIVE_API:
                {
#ifndef IN_RC
                    int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC);
                    AssertRCReturn(rc, rc);
                    pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0;
#else
                    AssertFailedReturn(VERR_INTERNAL_ERROR_2);
#endif
                    break;
                }
                default:
                    AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE);
            }

            /* Calculate the offset addendum for other VCPUs to use. */
            pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld;
        }
        else
        {
            /* All other VCPUs (if any). */
            pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause;
        }
    }
    return VINF_SUCCESS;
}
Exemple #25
0
/**
 * Emulates RDTSC for the \#GP handler.
 *
 * @returns VINF_SUCCESS or VINF_EM_RAW_EMULATE_INSTR.
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       Pointer to the VMCPU.
 * @param   pRegFrame   Pointer to the register frame for the trap.
 *                      This will be updated on successful return.
 */
DECLINLINE(int) trpmGCTrap0dHandlerRdTsc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
{
    STAM_COUNTER_INC(&pVM->trpm.s.StatTrap0dRdTsc);

    if (CPUMGetGuestCR4(pVCpu) & X86_CR4_TSD)
        return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_EMULATE_INSTR, pRegFrame); /* will trap (optimize later). */

    uint64_t uTicks = TMCpuTickGet(pVCpu);
    pRegFrame->eax = uTicks;
    pRegFrame->edx = uTicks >> 32;
    pRegFrame->eip += 2;
    return trpmGCExitTrap(pVM, pVCpu, VINF_SUCCESS, pRegFrame);
}
/**
 * Convert a page in the page pool to a HC physical address.
 * This works for pages allocated by MMR3PageAlloc(), MMR3PageAllocPhys()
 * and MMR3PageAllocLow().
 *
 * @returns Physical address for the specified page table.
 * @param   pVM         The cross context VM structure.
 * @param   pvPage      Page which physical address we query.
 * @thread  The Emulation Thread.
 */
VMMDECL(RTHCPHYS) MMPage2Phys(PVM pVM, void *pvPage)
{
    RTHCPHYS HCPhys = mmPagePoolPtr2Phys(pVM->mm.s.CTX_SUFF(pPagePool), pvPage);
    if (HCPhys == NIL_RTHCPHYS)
    {
        HCPhys = mmPagePoolPtr2Phys(pVM->mm.s.CTX_SUFF(pPagePoolLow), pvPage);
        if (HCPhys == NIL_RTHCPHYS)
        {
            STAM_COUNTER_INC(&pVM->mm.s.CTX_SUFF(pPagePool)->cErrors);
            AssertMsgFailed(("Invalid pvPage=%p specified\n", pvPage));
        }
    }
    return HCPhys;
}
/**
 * Convert physical address of a page to a HC virtual address.
 * This works for pages allocated by MMR3PageAlloc(), MMR3PageAllocPhys()
 * and MMR3PageAllocLow().
 *
 * @returns Pointer to the page at that physical address.
 * @param   pVM         The cross context VM structure.
 * @param   HCPhysPage  The physical address of a page.
 * @thread  The Emulation Thread.
 */
VMMDECL(void *) MMPagePhys2Page(PVM pVM, RTHCPHYS HCPhysPage)
{
    void *pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.CTX_SUFF(pPagePool), HCPhysPage);
    if (!pvPage)
    {
        pvPage = mmPagePoolPhys2Ptr(pVM->mm.s.CTX_SUFF(pPagePoolLow), HCPhysPage);
        if (!pvPage)
        {
            STAM_COUNTER_INC(&pVM->mm.s.CTX_SUFF(pPagePool)->cErrors);
            AssertMsg(pvPage, ("Invalid HCPhysPage=%RHp specified\n", HCPhysPage));
        }
    }
    return pvPage;
}
/**
 * Raise interrupt.
 *
 * @param   pState      The device state structure.
 * @param   rcBusy      Status code to return when the critical section is busy.
 * @param   u8IntCause  Interrupt cause bit mask to set in PCI ISR port.
 */
int vpciRaiseInterrupt(VPCISTATE *pState, int rcBusy, uint8_t u8IntCause)
{
    // int rc = vpciCsEnter(pState, rcBusy);
    // if (RT_UNLIKELY(rc != VINF_SUCCESS))
    //     return rc;

    STAM_COUNTER_INC(&pState->StatIntsRaised);
    LogFlow(("%s vpciRaiseInterrupt: u8IntCause=%x\n",
             INSTANCE(pState), u8IntCause));

    pState->uISR |= u8IntCause;
    PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
    // vpciCsLeave(pState);
    return VINF_SUCCESS;
}
selmGuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
                         PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
    Log(("selmGuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);
    NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);

    VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
#  ifdef IN_RING3
    return VINF_PGM_HANDLER_DO_DEFAULT;
#  else
    STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
    return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
#  endif
}
Exemple #30
0
/**
 * Wrapper which does the read and updates range statistics when such are enabled.
 */
DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
                                       void *pvValue, unsigned cbValue)
{
#ifdef VBOX_WITH_STATISTICS
    int rcSem = IOM_LOCK_SHARED(pVM);
    if (rcSem == VERR_SEM_BUSY)
        return VINF_IOM_R3_MMIO_READ;
    PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
    if (!pStats)
# ifdef IN_RING3
        return VERR_NO_MEMORY;
# else
        return VINF_IOM_R3_MMIO_READ;
# endif
    STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
#else
    NOREF(pVCpu);
#endif

    VBOXSTRICTRC rcStrict;
    if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
    {
        if (   (   cbValue == 4
                && !(GCPhys & 3))
            || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
            || (    cbValue == 8
                && !(GCPhys & 7)
                && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
            rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
                                                         pvValue, cbValue);
        else
            rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
    }
    else
        rcStrict = VINF_IOM_MMIO_UNUSED_FF;
    if (rcStrict != VINF_SUCCESS)
    {
        switch (VBOXSTRICTRC_VAL(rcStrict))
        {
            case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
            case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
        }
    }

    STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
    STAM_COUNTER_INC(&pStats->Accesses);
    return rcStrict;
}