/**
 * Calculate the host context ring-0 address of an offset into the HMA memory chunk.
 *
 * @returns the host context ring-0 address.
 * @param   pVM         The cross context VM structure.
 * @param   pLookup     The HMA lookup record.
 * @param   off         The offset into the HMA memory chunk.
 */
DECLINLINE(RTR0PTR) mmHyperLookupCalcR0(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_t off)
{
    switch (pLookup->enmType)
    {
        case MMLOOKUPHYPERTYPE_LOCKED:
            if (pLookup->u.Locked.pvR0)
                return (RTR0PTR)((RTR0UINTPTR)pLookup->u.Locked.pvR0 + off);
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
            AssertMsg(VM_IS_RAW_MODE_ENABLED(pVM), ("%s\n", R3STRING(pLookup->pszDesc)));
#else
            AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc))); NOREF(pVM);
#endif
            return NIL_RTR0PTR;

        case MMLOOKUPHYPERTYPE_HCPHYS:
            if (pLookup->u.HCPhys.pvR0)
                return (RTR0PTR)((RTR0UINTPTR)pLookup->u.HCPhys.pvR0 + off);
            AssertMsgFailed(("%s\n", R3STRING(pLookup->pszDesc)));
            return NIL_RTR0PTR;

        default:
            AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
            return NIL_RTR0PTR;
    }
}
/**
 * Obtain bandwidth in a bandwidth group.
 *
 * @returns True if bandwidth was allocated, false if not.
 * @param   pFilter         Pointer to the filter that allocates bandwidth.
 * @param   cbTransfer      Number of bytes to allocate.
 */
VMMDECL(bool) PDMNsAllocateBandwidth(PPDMNSFILTER pFilter, size_t cbTransfer)
{
    AssertPtrReturn(pFilter, true);
    if (!VALID_PTR(pFilter->CTX_SUFF(pBwGroup)))
        return true;

    PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->CTX_SUFF(pBwGroup), PPDMNSBWGROUP);
    int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc);
    if (RT_UNLIKELY(rc == VERR_SEM_BUSY))
        return true;

    bool fAllowed = true;
    if (pBwGroup->cbPerSecMax)
    {
        /* Re-fill the bucket first */
        uint64_t tsNow        = RTTimeSystemNanoTS();
        uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast) * pBwGroup->cbPerSecMax / (1000 * 1000 * 1000);
        uint32_t uTokens      = RT_MIN(pBwGroup->cbBucket, uTokensAdded + pBwGroup->cbTokensLast);

        if (cbTransfer > uTokens)
        {
            fAllowed = false;
            ASMAtomicWriteBool(&pFilter->fChoked, true);
        }
        else
        {
            pBwGroup->tsUpdatedLast = tsNow;
            pBwGroup->cbTokensLast = uTokens - (uint32_t)cbTransfer;
        }
        Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n",
              pBwGroup, R3STRING(pBwGroup->pszNameR3), cbTransfer, uTokens, uTokensAdded, fAllowed));
    }
    else
        Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} disabled fAllowed=%RTbool\n",
              pBwGroup, R3STRING(pBwGroup->pszNameR3), fAllowed));

    rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc);
    return fAllowed;
}
Пример #3
0
/**
 * Leaves a critical section entered with PDMCritSectEnter().
 *
 * @param   pCritSect           The PDM critical section to leave.
 */
VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
{
    AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);

    /* Check for NOP sections before asserting ownership. */
    if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)
        return;

    /*
     * Always check that the caller is the owner (screw performance).
     */
    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
    AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf,
                               ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
                                pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
                                pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings));
    Assert(pCritSect->s.Core.cNestings >= 1);

    /*
     * Nested leave.
     */
    if (pCritSect->s.Core.cNestings > 1)
    {
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings >= 1);
        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
        Assert(pCritSect->s.Core.cLockers >= 0);
        return;
    }

#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    if (1) /* SUPSemEventSignal is safe */
# else
    if (ASMIntAreEnabled())
# endif
#endif
#if defined(IN_RING3) || defined(IN_RING0)
    {
        /*
         * Leave for real.
         */
        /* update members. */
# ifdef IN_RING3
        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
#  if defined(PDMCRITSECT_STRICT)
        if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
            RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
#  endif
        Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
        Assert(pCritSect->s.Core.cNestings == 0);

        /* stop and decrement lockers. */
        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
        ASMCompilerBarrier();
        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
        {
            /* Someone is waiting, wake up one of them. */
            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
            int rc = SUPSemEventSignal(pSession, hEvent);
            AssertRC(rc);
        }

# ifdef IN_RING3
        /* Signal exit event. */
        if (hEventToSignal != NIL_RTSEMEVENT)
        {
            LogBird(("Signalling %#x\n", hEventToSignal));
            int rc = RTSemEventSignal(hEventToSignal);
            AssertRC(rc);
        }
# endif

# if defined(DEBUG_bird) && defined(IN_RING0)
        VMMTrashVolatileXMMRegs();
# endif
    }
#endif  /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
    else
#endif
#if defined(IN_RING0) || defined(IN_RC)
    {
        /*
         * Try leave it.
         */
        if (pCritSect->s.Core.cLockers == 0)
        {
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);

            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
                return;

            /* darn, someone raced in on us. */
            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
            Assert(pCritSect->s.Core.cNestings == 0);
            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
        }
        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);

        /*
         * Queue the request.
         */
        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    }
#endif /* IN_RING0 || IN_RC */
}
Пример #4
0
/**
 * Deals with complicated MMIO writes.
 *
 * Complicated means unaligned or non-dword/qword sized accesses depending on
 * the MMIO region's access mode flags.
 *
 * @returns Strict VBox status code. Any EM scheduling status code,
 *          VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
 *          VINF_IOM_R3_MMIO_READ may be returned.
 *
 * @param   pVM         The cross context VM structure.
 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
 * @param   pRange      The range to write to.
 * @param   GCPhys      The physical address to start writing.
 * @param   pvValue     Where to store the value.
 * @param   cbValue     The size of the value to write.
 */
static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
                                              void const *pvValue, unsigned cbValue)
{
    AssertReturn(   (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
                 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
                 VERR_IOM_MMIO_IPE_1);
    AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
    RTGCPHYS const GCPhysStart  = GCPhys; NOREF(GCPhysStart);
    bool const     fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
                               || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;

    /*
     * Do debug stop if requested.
     */
    int rc = VINF_SUCCESS; NOREF(pVM);
#ifdef VBOX_STRICT
    if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
    {
# ifdef IN_RING3
        LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
                R3STRING(pRange->pszDesc)));
        rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
                            "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
        if (rc == VERR_DBGF_NOT_ATTACHED)
            rc = VINF_SUCCESS;
# else
        return VINF_IOM_R3_MMIO_WRITE;
# endif
    }
#endif

    /*
     * Check if we should ignore the write.
     */
    if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
    {
        Assert(cbValue != 4 || (GCPhys & 3));
        return VINF_SUCCESS;
    }
    if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
    {
        Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
        return VINF_SUCCESS;
    }

    /*
     * Split and conquer.
     */
    for (;;)
    {
        unsigned const  offAccess  = GCPhys & 3;
        unsigned        cbThisPart = 4 - offAccess;
        if (cbThisPart > cbValue)
            cbThisPart = cbValue;

        /*
         * Get the missing bits (if any).
         */
        uint32_t u32MissingValue = 0;
        if (fReadMissing && cbThisPart != 4)
        {
            int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
                                                        GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
            switch (rc2)
            {
                case VINF_SUCCESS:
                    break;
                case VINF_IOM_MMIO_UNUSED_FF:
                    u32MissingValue = UINT32_C(0xffffffff);
                    break;
                case VINF_IOM_MMIO_UNUSED_00:
                    u32MissingValue = 0;
                    break;
#ifndef IN_RING3
                case VINF_IOM_R3_MMIO_READ:
                case VINF_IOM_R3_MMIO_READ_WRITE:
                case VINF_IOM_R3_MMIO_WRITE:
                    LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
                    rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
                    if (rc == VINF_SUCCESS || rc2 < rc)
                        rc = rc2;
                    return rc;
#endif
                default:
                    if (RT_FAILURE(rc2))
                    {
                        Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
                        return rc2;
                    }
                    AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
                    if (rc == VINF_SUCCESS || rc2 < rc)
                        rc = rc2;
                    break;
            }
        }

        /*
         * Merge missing and given bits.
         */
        uint32_t u32GivenMask;
        uint32_t u32GivenValue;
        switch (cbThisPart)
        {
            case 1:
                u32GivenValue = *(uint8_t  const *)pvValue;
                u32GivenMask  = UINT32_C(0x000000ff);
                break;
            case 2:
                u32GivenValue = *(uint16_t const *)pvValue;
                u32GivenMask  = UINT32_C(0x0000ffff);
                break;
            case 3:
                u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
                                                    ((uint8_t const *)pvValue)[2], 0);
                u32GivenMask  = UINT32_C(0x00ffffff);
                break;
            case 4:
                u32GivenValue = *(uint32_t const *)pvValue;
                u32GivenMask  = UINT32_C(0xffffffff);
                break;
            default:
                AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
        }
        if (offAccess)
        {
            u32GivenValue <<= offAccess * 8;
            u32GivenMask  <<= offAccess * 8;
        }

        uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
                          | (u32GivenValue & u32GivenMask);

        /*
         * Do DWORD write to the device.
         */
        int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
                                                     GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
        switch (rc2)
        {
            case VINF_SUCCESS:
                break;
#ifndef IN_RING3
            case VINF_IOM_R3_MMIO_READ:
            case VINF_IOM_R3_MMIO_READ_WRITE:
            case VINF_IOM_R3_MMIO_WRITE:
                Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
                AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
                AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
                pVCpu->iom.s.PendingMmioWrite.GCPhys  = GCPhys & ~(RTGCPHYS)3;
                pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
                *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
                if (cbValue > cbThisPart)
                    memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
                           (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
                VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
                if (rc == VINF_SUCCESS)
                    rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
                return rc2;
#endif
            default:
                if (RT_FAILURE(rc2))
                {
                    Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
                    return rc2;
                }
                AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
                if (rc == VINF_SUCCESS || rc2 < rc)
                    rc = rc2;
                break;
        }

        /*
         * Advance.
         */
        cbValue -= cbThisPart;
        if (!cbValue)
            break;
        GCPhys += cbThisPart;
        pvValue = (uint8_t const *)pvValue + cbThisPart;
    }

    return rc;
}
Пример #5
0
/**
 * Deals with complicated MMIO reads.
 *
 * Complicated means unaligned or non-dword/qword sized accesses depending on
 * the MMIO region's access mode flags.
 *
 * @returns Strict VBox status code. Any EM scheduling status code,
 *          VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
 *          VINF_IOM_R3_MMIO_WRITE may be returned.
 *
 * @param   pVM                 The cross context VM structure.
 * @param   pRange              The range to read from.
 * @param   GCPhys              The physical address to start reading.
 * @param   pvValue             Where to store the value.
 * @param   cbValue             The size of the value to read.
 */
static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
{
    AssertReturn(   (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
                 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
                 VERR_IOM_MMIO_IPE_1);
    AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
    RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);

    /*
     * Do debug stop if requested.
     */
    int rc = VINF_SUCCESS; NOREF(pVM);
#ifdef VBOX_STRICT
    if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
    {
# ifdef IN_RING3
        rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
                            "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
        if (rc == VERR_DBGF_NOT_ATTACHED)
            rc = VINF_SUCCESS;
# else
        return VINF_IOM_R3_MMIO_READ;
# endif
    }
#endif

    /*
     * Split and conquer.
     */
    for (;;)
    {
        /*
         * Do DWORD read from the device.
         */
        uint32_t u32Value;
        int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
                                                    GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
        switch (rc2)
        {
            case VINF_SUCCESS:
                break;
            case VINF_IOM_MMIO_UNUSED_FF:
                u32Value = UINT32_C(0xffffffff);
                break;
            case VINF_IOM_MMIO_UNUSED_00:
                u32Value = 0;
                break;
            case VINF_IOM_R3_MMIO_READ:
            case VINF_IOM_R3_MMIO_READ_WRITE:
            case VINF_IOM_R3_MMIO_WRITE:
                /** @todo What if we've split a transfer and already read
                 * something?  Since reads can have sideeffects we could be
                 * kind of screwed here... */
                LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
                return rc2;
            default:
                if (RT_FAILURE(rc2))
                {
                    Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
                    return rc2;
                }
                AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
                if (rc == VINF_SUCCESS || rc2 < rc)
                    rc = rc2;
                break;
        }
        u32Value >>= (GCPhys & 3) * 8;

        /*
         * Write what we've read.
         */
        unsigned cbThisPart = 4 - (GCPhys & 3);
        if (cbThisPart > cbValue)
            cbThisPart = cbValue;

        switch (cbThisPart)
        {
            case 1:
                *(uint8_t *)pvValue = (uint8_t)u32Value;
                break;
            case 2:
                *(uint16_t *)pvValue = (uint16_t)u32Value;
                break;
            case 3:
                ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
                ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
                ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
                break;
            case 4:
                *(uint32_t *)pvValue = u32Value;
                break;
        }

        /*
         * Advance.
         */
        cbValue -= cbThisPart;
        if (!cbValue)
            break;
        GCPhys += cbThisPart;
        pvValue = (uint8_t *)pvValue + cbThisPart;
    }

    return rc;
}