Exemple #1
0
/**
 * Locks PDM.
 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
 *
 * @param   pVM     Pointer to the VM.
 */
void pdmLock(PVM pVM)
{
#ifdef IN_RING3
    int rc = PDMCritSectEnter(&pVM->pdm.s.CritSect, VERR_IGNORED);
#else
    int rc = PDMCritSectEnter(&pVM->pdm.s.CritSect, VERR_GENERAL_FAILURE);
    if (rc == VERR_GENERAL_FAILURE)
        rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PDM_LOCK, 0);
#endif
    AssertRC(rc);
}
static void pdmNsFilterUnlink(PPDMNSFILTER pFilter)
{
    PPDMNSBWGROUP pBwGroup = pFilter->pBwGroupR3;
    /*
     * We need to make sure we hold the shaper lock since pdmNsBwGroupXmitPending()
     * does not hold the bandwidth group lock while iterating over the list
     * of group's filters.
     */
    AssertPtr(pBwGroup);
    AssertPtr(pBwGroup->pShaper);
    Assert(RTCritSectIsOwner(&pBwGroup->pShaper->cs));
    int rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);

    if (pFilter == pBwGroup->pFiltersHead)
        pBwGroup->pFiltersHead = pFilter->pNext;
    else
    {
        PPDMNSFILTER pPrev = pBwGroup->pFiltersHead;
        while (   pPrev
               && pPrev->pNext != pFilter)
            pPrev = pPrev->pNext;

        AssertPtr(pPrev);
        pPrev->pNext = pFilter->pNext;
    }

    rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
}
Exemple #3
0
/* Event rate throttling timer to emulate the auxiliary device sampling rate.
 */
static DECLCALLBACK(void) ps2mThrottleTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
{
    PPS2M       pThis = (PS2M *)pvUser; NOREF(pDevIns);
    uint32_t    uHaveEvents;

    /* Grab the lock to avoid races with PutEvent(). */
    int rc = PDMCritSectEnter(pThis->pCritSectR3, VERR_SEM_BUSY);
    AssertReleaseRC(rc);

#if 0
    /* If the input queue is not empty, restart the timer. */
#else
    /* If more movement is accumulated, report it and restart the timer. */
    uHaveEvents = pThis->iAccumX | pThis->iAccumY | pThis->iAccumZ | pThis->fAccumB;
    LogFlowFunc(("Have%s events\n", uHaveEvents ? "" : " no"));

    if (uHaveEvents)
#endif
    {
        ps2mReportAccumulatedEvents(pThis);
        TMTimerSetMillies(pThis->CTX_SUFF(pThrottleTimer), pThis->uThrottleDelay);
    }
    else
        pThis->fThrottleActive = false;

    PDMCritSectLeave(pThis->pCritSectR3);
}
Exemple #4
0
/* Event rate throttling timer to emulate the auxiliary device sampling rate.
 */
static DECLCALLBACK(void) ps2mThrottleTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
{
    RT_NOREF2(pDevIns, pTimer);
    PPS2M       pThis = (PS2M *)pvUser;
    uint32_t    uHaveEvents;

    /* Grab the lock to avoid races with PutEvent(). */
    int rc = PDMCritSectEnter(pThis->pCritSectR3, VERR_SEM_BUSY);
    AssertReleaseRC(rc);

#if 0
    /* If the input queue is not empty, restart the timer. */
#else
    /* If more movement is accumulated, report it and restart the timer. */
    uHaveEvents = pThis->iAccumX | pThis->iAccumY | pThis->iAccumZ | (pThis->fCurrB != pThis->fReportedB);
    LogFlowFunc(("Have%s events\n", uHaveEvents ? "" : " no"));

    if (uHaveEvents)
#endif
    {
        /* Report accumulated data, poke the KBC, and start the timer. */
        ps2mReportAccumulatedEvents(pThis, (GeneriQ *)&pThis->evtQ, true);
        KBCUpdateInterrupts(pThis->pParent);
        TMTimerSetMillies(pThis->CTX_SUFF(pThrottleTimer), pThis->uThrottleDelay);
    }
    else
        pThis->fThrottleActive = false;

    PDMCritSectLeave(pThis->pCritSectR3);
}
/**
 * Adjusts the maximum rate for the bandwidth group.
 *
 * @returns VBox status code.
 * @param   pUVM            The user mode VM handle.
 * @param   pszBwGroup      Name of the bandwidth group to attach to.
 * @param   cbPerSecMax     Maximum number of bytes per second to be transmitted.
 */
VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PUVM pUVM, const char *pszBwGroup, uint64_t cbPerSecMax)
{
    UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
    PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;
    LOCK_NETSHAPER_RETURN(pShaper);

    int           rc;
    PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pszBwGroup);
    if (pBwGroup)
    {
        rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
        if (RT_SUCCESS(rc))
        {
            pdmNsBwGroupSetLimit(pBwGroup, cbPerSecMax);

            /* Drop extra tokens */
            if (pBwGroup->cbTokensLast > pBwGroup->cbBucket)
                pBwGroup->cbTokensLast = pBwGroup->cbBucket;

            int rc2 = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc2);
        }
    }
    else
        rc = VERR_NOT_FOUND;

    UNLOCK_NETSHAPER(pShaper);
    return rc;
}
Exemple #6
0
/**
 * @interface_method_impl{PDMDRVREG,pfnDetach}
 */
static DECLCALLBACK(void) drvR3NetShaperDetach(PPDMDRVINS pDrvIns, uint32_t fFlags)
{
    PDRVNETSHAPER pThis = PDMINS_2_DATA(pDrvIns, PDRVNETSHAPER);

    LogFlow(("drvNetShaperDetach: pDrvIns: %p, fFlags: %u\n", pDrvIns, fFlags));
    PDMCritSectEnter(&pThis->XmitLock, VERR_IGNORED);
    pThis->pIBelowNetR3 = NULL;
    pThis->pIBelowNetR0 = NIL_RTR0PTR;
    PDMCritSectLeave(&pThis->XmitLock);
}
/**
 * Enters a PDM critical section.
 *
 * @returns VINF_SUCCESS if entered successfully.
 * @returns rcBusy when encountering a busy critical section in GC/R0.
 * @returns VERR_SEM_DESTROYED if the critical section is dead.
 *
 * @param   pCritSect           The PDM critical section to enter.
 * @param   fCallRing3          Whether this is a VMMRZCallRing3()request.
 */
VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
{
    int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
    if (    rc == VINF_SUCCESS
        &&  fCallRing3
        &&  pCritSect->s.Core.pValidatorRec
        &&  pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
        RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
    return rc;
}
/**
 * @interface_method_impl{PDMIHOSTPARALLELPORT,pfnNotifyInterrupt}
 */
static DECLCALLBACK(int) parallelR3NotifyInterrupt(PPDMIHOSTPARALLELPORT pInterface)
{
    PARALLELPORT *pThis = PDMIHOSTPARALLELPORT_2_PARALLELPORT(pInterface);

    PDMCritSectEnter(pThis->pDevInsR3->pCritSectRoR3, VINF_SUCCESS);
    parallelR3IrqSet(pThis);
    PDMCritSectLeave(pThis->pDevInsR3->pCritSectRoR3);

    return VINF_SUCCESS;
}
static DECLCALLBACK(int) parallelNotifyInterrupt(PPDMIHOSTPARALLELPORT pInterface)
{
    ParallelState *pThis = PDMIHOSTPARALLELPORT_2_PARALLELSTATE(pInterface);

    PDMCritSectEnter(&pThis->CritSect, VINF_SUCCESS);
    parallel_set_irq(pThis);
    PDMCritSectLeave(&pThis->CritSect);

    return VINF_SUCCESS;
}
static void pdmNsFilterLink(PPDMNSFILTER pFilter)
{
    PPDMNSBWGROUP pBwGroup = pFilter->pBwGroupR3;
    int rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);

    pFilter->pNext = pBwGroup->pFiltersHead;
    pBwGroup->pFiltersHead = pFilter;

    rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
}
Exemple #11
0
/**
 * @interface_method_impl{PDMIMOUSEPORT, pfnPutEvent}
 */
static DECLCALLBACK(int) ps2mPutEvent(PPDMIMOUSEPORT pInterface, int32_t dx, int32_t dy,
                                      int32_t dz, int32_t dw, uint32_t fButtons)
{
    PPS2M       pThis = RT_FROM_MEMBER(pInterface, PS2M, Mouse.IPort);
    int rc = PDMCritSectEnter(pThis->pCritSectR3, VERR_SEM_BUSY);
    AssertReleaseRC(rc);

    LogFlowFunc(("dX=%d dY=%d dZ=%d dW=%d buttons=%02X\n", dx, dy, dz, dw, fButtons));
    /* NB: The PS/2 Y axis direction is inverted relative to ours. */
    ps2mPutEventWorker(pThis, dx, -dy, dz, dw, fButtons);

    PDMCritSectLeave(pThis->pCritSectR3);
    return VINF_SUCCESS;
}
/**
 * Yield the critical section if someone is waiting on it.
 *
 * When yielding, we'll leave the critical section and try to make sure the
 * other waiting threads get a chance of entering before we reclaim it.
 *
 * @retval  true if yielded.
 * @retval  false if not yielded.
 * @param   pCritSect           The critical section.
 */
VMMR3DECL(bool) PDMR3CritSectYield(PPDMCRITSECT pCritSect)
{
    AssertPtrReturn(pCritSect, false);
    AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
    Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf());
    Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));

    /* No recursion allowed here. */
    int32_t const cNestings = pCritSect->s.Core.cNestings;
    AssertReturn(cNestings == 1, false);

    int32_t const cLockers  = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
    if (cLockers < cNestings)
        return false;

#ifdef PDMCRITSECT_STRICT
    RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos;
#endif
    PDMCritSectLeave(pCritSect);

    /*
     * If we're lucky, then one of the waiters has entered the lock already.
     * We spin a little bit in hope for this to happen so we can avoid the
     * yield detour.
     */
    if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0)
    {
        int cLoops = 20;
        while (   cLoops > 0
               && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0
               && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers)  >= 0)
        {
            ASMNopPause();
            cLoops--;
        }
        if (cLoops == 0)
            RTThreadYield();
    }

#ifdef PDMCRITSECT_STRICT
    int rc = PDMCritSectEnterDebug(pCritSect, VERR_IGNORED,
                                   SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction);
#else
    int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED);
#endif
    AssertLogRelRC(rc);
    return true;
}
Exemple #13
0
/**
 * Locks the hypervisor heap.
 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
 *
 * @param   pVM     The VM handle.
 */
static int mmHyperLock(PVM pVM)
{
    PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);

#ifdef IN_RING3
    if (!PDMCritSectIsInitialized(&pHeap->Lock))
        return VINF_SUCCESS;     /* early init */
#else
    Assert(PDMCritSectIsInitialized(&pHeap->Lock));
#endif
    int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY);
#if defined(IN_RC) || defined(IN_RING0)
    if (rc == VERR_SEM_BUSY)
        rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_MMHYPER_LOCK, 0);
#endif
    AssertRC(rc);
    return rc;
}
/**
 * Obtain bandwidth in a bandwidth group.
 *
 * @returns True if bandwidth was allocated, false if not.
 * @param   pFilter         Pointer to the filter that allocates bandwidth.
 * @param   cbTransfer      Number of bytes to allocate.
 */
VMMDECL(bool) PDMNsAllocateBandwidth(PPDMNSFILTER pFilter, size_t cbTransfer)
{
    AssertPtrReturn(pFilter, true);
    if (!VALID_PTR(pFilter->CTX_SUFF(pBwGroup)))
        return true;

    PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->CTX_SUFF(pBwGroup), PPDMNSBWGROUP);
    int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
    if (RT_UNLIKELY(rc == VERR_SEM_BUSY))
        return true;

    bool fAllowed = true;
    if (pBwGroup->cbPerSecMax)
    {
        /* Re-fill the bucket first */
        uint64_t tsNow        = RTTimeSystemNanoTS();
        uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast) * pBwGroup->cbPerSecMax / (1000 * 1000 * 1000);
        uint32_t uTokens      = RT_MIN(pBwGroup->cbBucket, uTokensAdded + pBwGroup->cbTokensLast);

        if (cbTransfer > uTokens)
        {
            fAllowed = false;
            ASMAtomicWriteBool(&pFilter->fChoked, true);
        }
        else
        {
            pBwGroup->tsUpdatedLast = tsNow;
            pBwGroup->cbTokensLast = uTokens - (uint32_t)cbTransfer;
        }
        Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n",
              pBwGroup, R3STRING(pBwGroup->pszNameR3), cbTransfer, uTokens, uTokensAdded, fAllowed));
    }
    else
        Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} disabled fAllowed=%RTbool\n",
              pBwGroup, R3STRING(pBwGroup->pszNameR3), fAllowed));

    rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc);
    return fAllowed;
}
Exemple #15
0
/**
 * @interface_method_impl{PDMDRVREG,pfnAttach}
 */
static DECLCALLBACK(int) drvR3NetShaperAttach(PPDMDRVINS pDrvIns, uint32_t fFlags)
{
    PDRVNETSHAPER pThis = PDMINS_2_DATA(pDrvIns, PDRVNETSHAPER);
    LogFlow(("drvNetShaperAttach/#%#x: fFlags=%#x\n", pDrvIns->iInstance, fFlags));
    PDMCritSectEnter(&pThis->XmitLock, VERR_IGNORED);

    /*
     * Query the network connector interface.
     */
    PPDMIBASE   pBaseDown;
    int rc = PDMDrvHlpAttach(pDrvIns, fFlags, &pBaseDown);
    if (   rc == VERR_PDM_NO_ATTACHED_DRIVER
        || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
    {
        pThis->pIBelowNetR3 = NULL;
        pThis->pIBelowNetR0 = NIL_RTR0PTR;
        rc = VINF_SUCCESS;
    }
    else if (RT_SUCCESS(rc))
    {
        pThis->pIBelowNetR3 = PDMIBASE_QUERY_INTERFACE(pBaseDown, PDMINETWORKUP);
        if (pThis->pIBelowNetR3)
        {
            PPDMIBASER0 pBaseR0  = PDMIBASE_QUERY_INTERFACE(pBaseDown, PDMIBASER0);
            pThis->pIBelowNetR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
            rc = VINF_SUCCESS;
        }
        else
        {
            AssertMsgFailed(("Configuration error: the driver below didn't export the network connector interface!\n"));
            rc = VERR_PDM_MISSING_INTERFACE_BELOW;
        }
    }
    else
        AssertMsgFailed(("Failed to attach to driver below! rc=%Rrc\n", rc));

    PDMCritSectLeave(&pThis->XmitLock);
    return VINF_SUCCESS;
}
VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PVM pVM, const char *pcszBwGroup, uint64_t cbTransferPerSecMax)
{
    PUVM pUVM = pVM->pUVM;
    PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;

    int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc);
    if (RT_SUCCESS(rc))
    {
        PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
        if (pBwGroup)
        {
            rc = PDMCritSectEnter(&pBwGroup->cs, VERR_SEM_BUSY); AssertRC(rc);
            pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax);
            /* Drop extra tokens */
            if (pBwGroup->cbTokensLast > pBwGroup->cbBucketSize)
                pBwGroup->cbTokensLast = pBwGroup->cbBucketSize;
            rc = PDMCritSectLeave(&pBwGroup->cs); AssertRC(rc);
        }
        rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc);
    }
    return rc;
}
Exemple #17
0
/**
 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
 *
 * @returns VBox status code (appropriate for GC return).
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
 * @param   uErrorCode  CPU Error code.  This is UINT32_MAX when we don't have
 *                      any error code (the EPT misconfig hack).
 * @param   pCtxCore    Trap register frame.
 * @param   GCPhysFault The GC physical address corresponding to pvFault.
 * @param   pvUser      Pointer to the MMIO ring-3 range entry.
 */
static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
                                           RTGCPHYS GCPhysFault, void *pvUser)
{
    int rc = IOM_LOCK_SHARED(pVM);
#ifndef IN_RING3
    if (rc == VERR_SEM_BUSY)
        return VINF_IOM_R3_MMIO_READ_WRITE;
#endif
    AssertRC(rc);

    STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
    Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));

    PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
    Assert(pRange);
    Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
    iomMmioRetainRange(pRange);
#ifndef VBOX_WITH_STATISTICS
    IOM_UNLOCK_SHARED(pVM);

#else
    /*
     * Locate the statistics.
     */
    PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
    if (!pStats)
    {
        iomMmioReleaseRange(pVM, pRange);
# ifdef IN_RING3
        return VERR_NO_MEMORY;
# else
        STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
        STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
        return VINF_IOM_R3_MMIO_READ_WRITE;
# endif
    }
#endif

#ifndef IN_RING3
    /*
     * Should we defer the request right away?  This isn't usually the case, so
     * do the simple test first and the try deal with uErrorCode being N/A.
     */
    if (RT_UNLIKELY(   (   !pRange->CTX_SUFF(pfnWriteCallback)
                        || !pRange->CTX_SUFF(pfnReadCallback))
                    && (  uErrorCode == UINT32_MAX
                        ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
                        : uErrorCode & X86_TRAP_PF_RW
                          ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
                          : !pRange->CTX_SUFF(pfnReadCallback)  && pRange->pfnReadCallbackR3
                        )
                   )
       )
    {
        if (uErrorCode & X86_TRAP_PF_RW)
            STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
        else
            STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));

        STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
        STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
        iomMmioReleaseRange(pVM, pRange);
        return VINF_IOM_R3_MMIO_READ_WRITE;
    }
#endif /* !IN_RING3 */

    /*
     * Retain the range and do locking.
     */
    PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
    rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
    if (rc != VINF_SUCCESS)
    {
        iomMmioReleaseRange(pVM, pRange);
        return rc;
    }

    /*
     * Let IEM call us back via iomMmioHandler.
     */
    VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);

    NOREF(pCtxCore); NOREF(GCPhysFault);
    STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
    PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
    iomMmioReleaseRange(pVM, pRange);
    if (RT_SUCCESS(rcStrict))
        return rcStrict;
    if (   rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
        || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
    {
        Log(("IOM: Hit unsupported IEM feature!\n"));
        rcStrict = VINF_EM_RAW_EMULATE_INSTR;
    }
    return rcStrict;
}
Exemple #18
0
/**
 * Locks PDM but don't go to ring-3 if it's owned by someone.
 *
 * @returns VINF_SUCCESS on success.
 * @returns rc if we're in GC or R0 and can't get the lock.
 * @param   pVM     Pointer to the VM.
 * @param   rc      The RC to return in GC or R0 when we can't get the lock.
 */
int pdmLockEx(PVM pVM, int rc)
{
    return PDMCritSectEnter(&pVM->pdm.s.CritSect, rc);
}
Exemple #19
0
/**
 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
 *
 * @remarks The @a pvUser argument points to the MMIO range entry.
 */
PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
                                              size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
{
    PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
    STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);

    NOREF(pvPhys); NOREF(enmOrigin);
    AssertPtr(pRange);
    AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));


#ifndef IN_RING3
    /*
     * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
     * large amounts of data, just go to ring-3 where we don't need to deal with partial
     * successes.  No chance any of these will be problematic read-modify-write stuff.
     */
    if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
        return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
#endif

    /*
     * Validate the range.
     */
    int rc = IOM_LOCK_SHARED(pVM);
#ifndef IN_RING3
    if (rc == VERR_SEM_BUSY)
    {
        if (enmAccessType == PGMACCESSTYPE_READ)
            return VINF_IOM_R3_MMIO_READ;
        Assert(enmAccessType == PGMACCESSTYPE_WRITE);
        return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
    }
#endif
    AssertRC(rc);
    Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));

    /*
     * Perform locking.
     */
    iomMmioRetainRange(pRange);
    PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
    IOM_UNLOCK_SHARED(pVM);
    VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
    if (rcStrict == VINF_SUCCESS)
    {
        /*
         * Perform the access.
         */
        if (enmAccessType == PGMACCESSTYPE_READ)
            rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
        else
        {
            rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
#ifndef IN_RING3
            if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
                rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
#endif
        }

        /* Check the return code. */
#ifdef IN_RING3
        AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
#else
        AssertMsg(   rcStrict == VINF_SUCCESS
                  || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ :  VINF_IOM_R3_MMIO_WRITE)
                  || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
                  || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
                  || rcStrict == VINF_EM_DBG_STOP
                  || rcStrict == VINF_EM_DBG_EVENT
                  || rcStrict == VINF_EM_DBG_BREAKPOINT
                  || rcStrict == VINF_EM_OFF
                  || rcStrict == VINF_EM_SUSPEND
                  || rcStrict == VINF_EM_RESET
                  || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
                  //|| rcStrict == VINF_EM_HALT       /* ?? */
                  //|| rcStrict == VINF_EM_NO_MEMORY  /* ?? */
                  , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
#endif

        iomMmioReleaseRange(pVM, pRange);
        PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
    }
#ifdef IN_RING3
    else
        iomMmioReleaseRange(pVM, pRange);
#else
    else
    {
        if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
/**
 * Reads an I/O port register.
 *
 * @returns Strict VBox status code. Informational status codes other than the one documented
 *          here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
 * @retval  VINF_SUCCESS                Success.
 * @retval  VINF_EM_FIRST-VINF_EM_LAST  Success with some exceptions (see IOM_SUCCESS()), the
 *                                      status code must be passed on to EM.
 * @retval  VINF_IOM_R3_IOPORT_READ     Defer the read to ring-3. (R0/RC only)
 *
 * @param   pVM         Pointer to the VM.
 * @param   pVCpu       Pointer to the virtual CPU structure of the caller.
 * @param   Port        The port to read.
 * @param   pu32Value   Where to store the value read.
 * @param   cbValue     The size of the register to read in bytes. 1, 2 or 4 bytes.
 */
VMMDECL(VBOXSTRICTRC) IOMIOPortRead(PVM pVM, PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
{
/** @todo should initialize *pu32Value here because it can happen that some
 *        handle is buggy and doesn't handle all cases. */
    /* Take the IOM lock before performing any device I/O. */
    int rc2 = IOM_LOCK_SHARED(pVM);
#ifndef IN_RING3
    if (rc2 == VERR_SEM_BUSY)
        return VINF_IOM_R3_IOPORT_READ;
#endif
    AssertRC(rc2);
#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
    IEMNotifyIOPortRead(pVM, Port, cbValue);
#endif

#ifdef VBOX_WITH_STATISTICS
    /*
     * Get the statistics record.
     */
    PIOMIOPORTSTATS  pStats = pVCpu->iom.s.CTX_SUFF(pStatsLastRead);
    if (!pStats || pStats->Core.Key != Port)
    {
        pStats = (PIOMIOPORTSTATS)RTAvloIOPortGet(&pVM->iom.s.CTX_SUFF(pTrees)->IOPortStatTree, Port);
        if (pStats)
            pVCpu->iom.s.CTX_SUFF(pStatsLastRead) = pStats;
    }
#endif

    /*
     * Get handler for current context.
     */
    CTX_SUFF(PIOMIOPORTRANGE) pRange = pVCpu->iom.s.CTX_SUFF(pRangeLastRead);
    if (    !pRange
        ||   (unsigned)Port - (unsigned)pRange->Port >= (unsigned)pRange->cPorts)
    {
        pRange = iomIOPortGetRange(pVM, Port);
        if (pRange)
            pVCpu->iom.s.CTX_SUFF(pRangeLastRead) = pRange;
    }
    MMHYPER_RC_ASSERT_RCPTR(pVM, pRange);
    if (pRange)
    {
        /*
         * Found a range, get the data in case we leave the IOM lock.
         */
        PFNIOMIOPORTIN  pfnInCallback = pRange->pfnInCallback;
#ifndef IN_RING3
        if (pfnInCallback)
        { /* likely */ }
        else
        {
            STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); });
            IOM_UNLOCK_SHARED(pVM);
            return VINF_IOM_R3_IOPORT_READ;
        }
#endif
        void           *pvUser    = pRange->pvUser;
        PPDMDEVINS      pDevIns   = pRange->pDevIns;
        IOM_UNLOCK_SHARED(pVM);

        /*
         * Call the device.
         */
        VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_IOPORT_READ);
        if (rcStrict == VINF_SUCCESS)
        { /* likely */ }
        else
        {
            STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); });
            return rcStrict;
        }
Exemple #21
0
#ifndef IN_RING3
        if (!pfnInCallback)
        {
            STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); });
            IOM_UNLOCK_SHARED(pVM);
            return VINF_IOM_R3_IOPORT_READ;
        }
#endif
        void           *pvUser    = pRange->pvUser;
        PPDMDEVINS      pDevIns   = pRange->pDevIns;
        IOM_UNLOCK_SHARED(pVM);

        /*
         * Call the device.
         */
        VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_IOPORT_READ);
        if (rcStrict != VINF_SUCCESS)
        {
            STAM_STATS({ if (pStats) STAM_COUNTER_INC(&pStats->InRZToR3); });
            return rcStrict;
        }
#ifdef VBOX_WITH_STATISTICS
        if (pStats)
        {
            STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfIn), a);
            rcStrict = pfnInCallback(pDevIns, pvUser, Port, pu32Value, (unsigned)cbValue);
            STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfIn), a);
        }
        else
#endif
            rcStrict = pfnInCallback(pDevIns, pvUser, Port, pu32Value, (unsigned)cbValue);