Пример #1
0
/**
 * Mark a page as scanned/not scanned
 *
 * @note: we always mark it as scanned, even if we haven't completely done so
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   pPage       GC page address (not necessarily aligned)
 * @param   fScanned    Mark as scanned or not scanned
 *
 */
VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned)
{
    int pgdir, bit;
    uintptr_t page;

#ifdef LOG_ENABLED
    if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage))
       Log(("CSAMMarkPage %RRv\n", pPage));
#endif

    if (!CSAMIsEnabled(pVM))
        return VINF_SUCCESS;
    Assert(!HMIsEnabled(pVM));

    page  = (uintptr_t)pPage;
    pgdir = page >> X86_PAGE_4M_SHIFT;
    bit   = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT;

    Assert(pgdir < CSAM_PGDIRBMP_CHUNKS);
    Assert(bit < PAGE_SIZE);

    if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir])
    {
        STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc);
        int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]);
        if (RT_FAILURE(rc))
        {
            Log(("MMHyperAlloc failed with %Rrc\n", rc));
            return rc;
        }
#ifdef IN_RC
        pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]);
        if (!pVM->csam.s.pPDHCBitmapGC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir]));
            return rc;
        }
#else
        pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]);
        if (!pVM->csam.s.pPDGCBitmapHC[pgdir])
        {
            Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir]));
            return rc;
        }
#endif
    }
    if(fScanned)
        ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);
    else
        ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit);

    return VINF_SUCCESS;
}
Пример #2
0
static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pcszBwGroup, uint64_t cbTransferPerSecMax)
{
    LogFlowFunc(("pShaper=%#p pcszBwGroup=%#p{%s} cbTransferPerSecMax=%llu\n",
                 pShaper, pcszBwGroup, pcszBwGroup, cbTransferPerSecMax));

    AssertPtrReturn(pShaper, VERR_INVALID_POINTER);
    AssertPtrReturn(pcszBwGroup, VERR_INVALID_POINTER);
    AssertReturn(*pcszBwGroup != '\0', VERR_INVALID_PARAMETER);

    int         rc;
    PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
    if (!pBwGroup)
    {
        rc = MMHyperAlloc(pShaper->pVM, sizeof(PDMNSBWGROUP), 64,
                          MM_TAG_PDM_NET_SHAPER, (void **)&pBwGroup);
        if (RT_SUCCESS(rc))
        {
            rc = PDMR3CritSectInit(pShaper->pVM, &pBwGroup->cs, RT_SRC_POS, "BWGRP");
            if (RT_SUCCESS(rc))
            {
                pBwGroup->pszName = RTStrDup(pcszBwGroup);
                if (pBwGroup->pszName)
                {
                    pBwGroup->pShaper               = pShaper;
                    pBwGroup->cRefs                 = 0;

                    pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax);

                    pBwGroup->cbTokensLast          = pBwGroup->cbBucketSize;
                    pBwGroup->tsUpdatedLast         = RTTimeSystemNanoTS();

                    LogFlowFunc(("pcszBwGroup={%s} cbBucketSize=%u\n",
                                 pcszBwGroup, pBwGroup->cbBucketSize));
                    pdmNsBwGroupLink(pBwGroup);
                    return VINF_SUCCESS;
                }
                PDMR3CritSectDelete(&pBwGroup->cs);
            }
            MMHyperFree(pShaper->pVM, pBwGroup);
        }
        else
            rc = VERR_NO_MEMORY;
    }
    else
        rc = VERR_ALREADY_EXISTS;

    LogFlowFunc(("returns rc=%Rrc\n", rc));
    return rc;
}
/**
 *  Entry point.
 */
extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
{

    /*
     * Init runtime.
     */
    RTR3InitExe(argc, &argv, 0);

    /*
     * Create empty VM structure and call MMR3Init().
     */
    PVM         pVM;
    RTR0PTR     pvR0;
    SUPPAGE     aPages[RT_ALIGN_Z(sizeof(*pVM) + NUM_CPUS * sizeof(VMCPU), PAGE_SIZE) >> PAGE_SHIFT];
    int rc = SUPR3Init(NULL);
    if (RT_SUCCESS(rc))
        rc = SUPR3LowAlloc(RT_ELEMENTS(aPages), (void **)&pVM, &pvR0, &aPages[0]);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: SUP Failure! rc=%Rrc\n", rc);
        return 1;
    }
    memset(pVM, 0, sizeof(*pVM)); /* wtf? */
    pVM->paVMPagesR3 = aPages;
    pVM->pVMR0 = pvR0;

    static UVM s_UVM;
    PUVM pUVM = &s_UVM;
    pUVM->pVM = pVM;
    pVM->pUVM = pUVM;

    pVM->cCpus = NUM_CPUS;
    pVM->cbSelf = RT_UOFFSETOF(VM, aCpus[pVM->cCpus]);

    rc = STAMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = CFGMR3Init(pVM, NULL, NULL);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: CFGMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3Init(pVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: MMR3Init failed! rc=%Rrc\n", rc);
        return 1;
    }

    /*
     * Try allocate.
     */
    static struct
    {
        size_t      cb;
        unsigned    uAlignment;
        void       *pvAlloc;
        unsigned    iFreeOrder;
    } aOps[] =
    {
        {        16,          0,    NULL,  0 },
        {        16,          4,    NULL,  1 },
        {        16,          8,    NULL,  2 },
        {        16,         16,    NULL,  5 },
        {        16,         32,    NULL,  4 },
        {        32,          0,    NULL,  3 },
        {        31,          0,    NULL,  6 },
        {      1024,          0,    NULL,  8 },
        {      1024,         32,    NULL, 10 },
        {      1024,         32,    NULL, 12 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 13 },
        {      1024,         32,    NULL,  9 },
        { PAGE_SIZE,         32,    NULL, 11 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 14 },
        {        16,          0,    NULL, 15 },
        {        9,           0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {        36,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {     12344,          0,    NULL,  7 },
        {        50,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
    };
    unsigned i;
#ifdef DEBUG
    MMHyperHeapDump(pVM);
#endif
    size_t cbBefore = MMHyperHeapGetFreeSize(pVM);
    static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";

    /* allocate */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM, &aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        memset(aOps[i].pvAlloc, szFill[i], aOps[i].cb);
        if (RT_ALIGN_P(aOps[i].pvAlloc, (aOps[i].uAlignment ? aOps[i].uAlignment : 8)) != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %p, invalid alignment!\n", aOps[i].cb, aOps[i].uAlignment, aOps[i].pvAlloc);
            return 1;
        }
    }

    /* free and allocate the same node again. */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        if (    !aOps[i].pvAlloc
            ||  aOps[i].uAlignment == PAGE_SIZE)
            continue;
        //size_t cbBeforeSub = MMHyperHeapGetFreeSize(pVM);
        rc = MMHyperFree(pVM, aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperFree(, %p,) -> %d i=%d\n", aOps[i].pvAlloc, rc, i);
            return 1;
        }
        //RTPrintf("debug: i=%d cbBeforeSub=%d now=%d\n", i, cbBeforeSub, MMHyperHeapGetFreeSize(pVM));
        void *pv;
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM_REQ, &pv);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        if (pv != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: Free+Alloc returned different address. new=%p old=%p i=%d (doesn't work with delayed free)\n", pv, aOps[i].pvAlloc, i);
            //return 1;
        }
        aOps[i].pvAlloc = pv;
        #if 0 /* won't work :/ */
        size_t cbAfterSub = MMHyperHeapGetFreeSize(pVM);
        if (cbBeforeSub != cbAfterSub)
        {
            RTPrintf("Failure: cbBeforeSub=%d cbAfterSub=%d. i=%d\n", cbBeforeSub, cbAfterSub, i);
            return 1;
        }
        #endif
    }

    /* free it in a specific order. */
    int cFreed = 0;
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(aOps); j++)
        {
            if (    aOps[j].iFreeOrder != i
                ||  !aOps[j].pvAlloc)
                continue;
            RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, MMHyperHeapGetFreeSize(pVM), aOps[j].cb, aOps[j].pvAlloc);
            if (aOps[j].uAlignment == PAGE_SIZE)
                cbBefore -= aOps[j].cb;
            else
            {
                rc = MMHyperFree(pVM, aOps[j].pvAlloc);
                if (RT_FAILURE(rc))
                {
                    RTPrintf("Failure: MMHyperFree(, %p,) -> %d j=%d i=%d\n", aOps[j].pvAlloc, rc, i, j);
                    return 1;
                }
            }
            aOps[j].pvAlloc = NULL;
            cFreed++;
        }
    }
    Assert(cFreed == RT_ELEMENTS(aOps));
    RTPrintf("i=done free=%d\n", MMHyperHeapGetFreeSize(pVM));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfter = MMHyperHeapGetFreeSize(pVM);
    if (cbBefore != cbAfter)
    {
        RTPrintf("Warning: Either we've split out an alignment chunk at the start, or we've got\n"
                 "         an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter);
#ifdef DEBUG
        MMHyperHeapDump(pVM);
#endif
    }

    RTPrintf("tstMMHyperHeap: Success\n");
#ifdef LOG_ENABLED
    RTLogFlush(NULL);
#endif
    return 0;
}
Пример #4
0
/**
 * Allocates a page from the page pool.
 *
 * @returns Pointer to allocated page(s).
 * @returns NULL on failure.
 * @param   pPool   Pointer to the page pool.
 * @thread  The Emulation Thread.
 */
DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool)
{
    VM_ASSERT_EMT(pPool->pVM);
    STAM_COUNTER_INC(&pPool->cAllocCalls);

    /*
     * Walk free list.
     */
    if (pPool->pHeadFree)
    {
        PMMPAGESUBPOOL  pSub = pPool->pHeadFree;
        /* decrement free count and unlink if no more free entries. */
        if (!--pSub->cPagesFree)
            pPool->pHeadFree = pSub->pNextFree;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages--;
#endif

        /* find free spot in bitmap. */
#ifdef USE_INLINE_ASM_BIT_OPS
        const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages);
        if (iPage >= 0)
        {
            Assert(!ASMBitTest(pSub->auBitmap, iPage));
            ASMBitSet(pSub->auBitmap, iPage);
            return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage;
        }
#else
        unsigned   *pu = &pSub->auBitmap[0];
        unsigned   *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)];
        while (pu < puEnd)
        {
            unsigned u;
            if ((u = *pu) != ~0U)
            {
                unsigned iBit = 0;
                unsigned uMask = 1;
                while (iBit < sizeof(pSub->auBitmap[0]) * 8)
                {
                    if (!(u & uMask))
                    {
                        *pu |= uMask;
                        return (uint8_t *)pSub->pvPages
                            + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8);
                    }
                    iBit++;
                    uMask <<= 1;
                }
                STAM_COUNTER_INC(&pPool->cErrors);
                AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u));
            }
            /* next */
            pu++;
        }
#endif
        STAM_COUNTER_INC(&pPool->cErrors);
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages++;
#endif
        AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1));
    }

    /*
     * Allocate new subpool.
     */
    unsigned        cPages = !pPool->fLow ? 128 : 32;
    PMMPAGESUBPOOL  pSub;
    int rc = MMHyperAlloc(pPool->pVM,
                          RT_OFFSETOF(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)])
                          + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages
                          + sizeof(MMPPLOOKUPHCPTR),
                          0,
                          MM_TAG_MM_PAGE,
                          (void **)&pSub);
    if (RT_FAILURE(rc))
        return NULL;

    PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)];
    Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]);
    if (!pPool->fLow)
    {
        rc = SUPR3PageAllocEx(cPages,
                              0 /* fFlags */,
                              &pSub->pvPages,
                              NULL,
                              paPhysPages);
        if (RT_FAILURE(rc))
            rc = VMSetError(pPool->pVM, rc, RT_SRC_POS,
                            N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT);
    }
    else
        rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages);
    if (RT_SUCCESS(rc))
    {
        /*
         * Setup the sub structure and allocate the requested page.
         */
        pSub->cPages    = cPages;
        pSub->cPagesFree= cPages - 1;
        pSub->paPhysPages = paPhysPages;
        memset(pSub->auBitmap, 0, cPages / 8);
        /* allocate first page. */
        pSub->auBitmap[0] |= 1;
        /* link into free chain. */
        pSub->pNextFree = pPool->pHeadFree;
        pPool->pHeadFree= pSub;
        /* link into main chain. */
        pSub->pNext     = pPool->pHead;
        pPool->pHead    = pSub;
        /* update pool statistics. */
        pPool->cSubPools++;
        pPool->cPages  += cPages;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages += cPages - 1;
#endif

        /*
         * Initialize the physical pages with backpointer to subpool.
         */
        unsigned i = cPages;
        while (i-- > 0)
        {
            AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK),
                      ("i=%d Phys=%d\n", i, paPhysPages[i].Phys));
            paPhysPages[i].uReserved = (RTHCUINTPTR)pSub;
        }

        /*
         * Initialize the physical lookup record with backpointers to the physical pages.
         */
        PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages];
        i = cPages;
        while (i-- > 0)
        {
            paLookupPhys[i].pPhysPage = &paPhysPages[i];
            paLookupPhys[i].Core.Key = paPhysPages[i].Phys;
            RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core);
        }

        /*
         * And the one record for virtual memory lookup.
         */
        PMMPPLOOKUPHCPTR   pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages];
        pLookupVirt->pSubPool = pSub;
        pLookupVirt->Core.Key = pSub->pvPages;
        RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core);

        /* return allocated page (first). */
        return pSub->pvPages;
    }

    MMHyperFree(pPool->pVM, pSub);
    STAM_COUNTER_INC(&pPool->cErrors);
    if (pPool->fLow)
        VMSetError(pPool->pVM, rc, RT_SRC_POS,
                   N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"),
                   pPool->cPages);
    AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n",
                     pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages));
    return NULL;
}
Пример #5
0
/**
 * Internal worker for the queue creation apis.
 *
 * @returns VBox status.
 * @param   pVM                 Pointer to the VM.
 * @param   cbItem              Item size.
 * @param   cItems              Number of items.
 * @param   cMilliesInterval    Number of milliseconds between polling the queue.
 *                              If 0 then the emulation thread will be notified whenever an item arrives.
 * @param   fRZEnabled          Set if the queue will be used from RC/R0 and need to be allocated from the hyper heap.
 * @param   pszName             The queue name. Unique. Not copied.
 * @param   ppQueue             Where to store the queue handle.
 */
static int pdmR3QueueCreate(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, bool fRZEnabled,
                            const char *pszName, PPDMQUEUE *ppQueue)
{
    PUVM pUVM = pVM->pUVM;

    /*
     * Validate input.
     */
    AssertMsgReturn(cbItem >= sizeof(PDMQUEUEITEMCORE) && cbItem < _1M, ("cbItem=%zu\n", cbItem), VERR_OUT_OF_RANGE);
    AssertMsgReturn(cItems >= 1 && cItems <= _64K, ("cItems=%u\n", cItems), VERR_OUT_OF_RANGE);

    /*
     * Align the item size and calculate the structure size.
     */
    cbItem = RT_ALIGN(cbItem, sizeof(RTUINTPTR));
    size_t cb = cbItem * cItems + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16);
    PPDMQUEUE pQueue;
    int rc;
    if (fRZEnabled)
        rc = MMHyperAlloc(pVM, cb, 0, MM_TAG_PDM_QUEUE, (void **)&pQueue );
    else
        rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_QUEUE, cb, (void **)&pQueue);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Initialize the data fields.
     */
    pQueue->pVMR3 = pVM;
    pQueue->pVMR0 = fRZEnabled ? pVM->pVMR0 : NIL_RTR0PTR;
    pQueue->pVMRC = fRZEnabled ? pVM->pVMRC : NIL_RTRCPTR;
    pQueue->pszName = pszName;
    pQueue->cMilliesInterval = cMilliesInterval;
    //pQueue->pTimer = NULL;
    pQueue->cbItem = (uint32_t)cbItem;
    pQueue->cItems = cItems;
    //pQueue->pPendingR3 = NULL;
    //pQueue->pPendingR0 = NULL;
    //pQueue->pPendingRC = NULL;
    pQueue->iFreeHead = cItems;
    //pQueue->iFreeTail = 0;
    PPDMQUEUEITEMCORE pItem = (PPDMQUEUEITEMCORE)((char *)pQueue + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16));
    for (unsigned i = 0; i < cItems; i++, pItem = (PPDMQUEUEITEMCORE)((char *)pItem + cbItem))
    {
        pQueue->aFreeItems[i].pItemR3 = pItem;
        if (fRZEnabled)
        {
            pQueue->aFreeItems[i].pItemR0 = MMHyperR3ToR0(pVM, pItem);
            pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pVM, pItem);
        }
    }

    /*
     * Create timer?
     */
    if (cMilliesInterval)
    {
        rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, pdmR3QueueTimer, pQueue, "Queue timer", &pQueue->pTimer);
        if (RT_SUCCESS(rc))
        {
            rc = TMTimerSetMillies(pQueue->pTimer, cMilliesInterval);
            if (RT_FAILURE(rc))
            {
                AssertMsgFailed(("TMTimerSetMillies failed rc=%Rrc\n", rc));
                int rc2 = TMR3TimerDestroy(pQueue->pTimer);
                AssertRC(rc2);
            }
        }
        else
            AssertMsgFailed(("TMR3TimerCreateInternal failed rc=%Rrc\n", rc));
        if (RT_FAILURE(rc))
        {
            if (fRZEnabled)
                MMHyperFree(pVM, pQueue);
            else
                MMR3HeapFree(pQueue);
            return rc;
        }

        /*
         * Insert into the queue list for timer driven queues.
         */
        pdmLock(pVM);
        pQueue->pNext = pUVM->pdm.s.pQueuesTimer;
        pUVM->pdm.s.pQueuesTimer = pQueue;
        pdmUnlock(pVM);
    }
    else
    {
        /*
         * Insert into the queue list for forced action driven queues.
         * This is a FIFO, so insert at the end.
         */
        /** @todo we should add a priority to the queues so we don't have to rely on
         * the initialization order to deal with problems like @bugref{1605} (pgm/pcnet
         * deadlock caused by the critsect queue to be last in the chain).
         * - Update, the critical sections are no longer using queues, so this isn't a real
         *   problem any longer. The priority might be a nice feature for later though.
         */
        pdmLock(pVM);
        if (!pUVM->pdm.s.pQueuesForced)
            pUVM->pdm.s.pQueuesForced = pQueue;
        else
        {
            PPDMQUEUE pPrev = pUVM->pdm.s.pQueuesForced;
            while (pPrev->pNext)
                pPrev = pPrev->pNext;
            pPrev->pNext = pQueue;
        }
        pdmUnlock(pVM);
    }

    /*
     * Register the statistics.
     */
    STAMR3RegisterF(pVM, &pQueue->cbItem,               STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,        "Item size.",                       "/PDM/Queue/%s/cbItem",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->cItems,               STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,        "Queue size.",                      "/PDM/Queue/%s/cItems",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatAllocFailures,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "PDMQueueAlloc failures.",          "/PDM/Queue/%s/AllocFailures",  pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatInsert,           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Calls to PDMQueueInsert.",         "/PDM/Queue/%s/Insert",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatFlush,            STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Calls to pdmR3QueueFlush.",        "/PDM/Queue/%s/Flush",          pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatFlushLeftovers,   STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Left over items after flush.",     "/PDM/Queue/%s/FlushLeftovers", pQueue->pszName);
#ifdef VBOX_WITH_STATISTICS
    STAMR3RegisterF(pVM, &pQueue->StatFlushPrf,         STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Profiling pdmR3QueueFlush.",       "/PDM/Queue/%s/FlushPrf",       pQueue->pszName);
    STAMR3RegisterF(pVM, (void *)&pQueue->cStatPending, STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,        "Pending items.",                   "/PDM/Queue/%s/Pending",        pQueue->pszName);
#endif

    *ppQueue = pQueue;
    return VINF_SUCCESS;
}
Пример #6
0
static DECLCALLBACK(int) doit(PVM pVM)
{
    RTPrintf(TESTCASE ": testing...\n");
    SetupSelectors(pVM);

    /*
     * Loading the module and resolve the entry point.
     */
    int rc = PDMR3LdrLoadRC(pVM, NULL, "tstMicroRC.gc");
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": Failed to load tstMicroRC.gc, rc=%Rra\n", rc);
        return rc;
    }
    RTRCPTR RCPtrEntry;
    rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRC", &RCPtrEntry);
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRC' entry point in tstMicroRC.gc, rc=%Rra\n", rc);
        return rc;
    }
    RTRCPTR RCPtrStart;
    rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRCAsmStart", &RCPtrStart);
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRCAsmStart' entry point in tstMicroRC.gc, rc=%Rra\n", rc);
        return rc;
    }
    RTRCPTR RCPtrEnd;
    rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRCAsmEnd", &RCPtrEnd);
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRCAsmEnd' entry point in tstMicroRC.gc, rc=%Rra\n", rc);
        return rc;
    }

    /*
     * Allocate and initialize the instance data.
     */
    PTSTMICRO pTst;
    rc = MMHyperAlloc(pVM, RT_ALIGN_Z(sizeof(*pTst), PAGE_SIZE), PAGE_SIZE, MM_TAG_VM, (void **)&pTst);
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": Failed to resolve allocate instance memory (%d bytes), rc=%Rra\n", sizeof(*pTst), rc);
        return rc;
    }
    pTst->RCPtr = MMHyperR3ToRC(pVM, pTst);
    pTst->RCPtrStack = MMHyperR3ToRC(pVM, &pTst->au8Stack[sizeof(pTst->au8Stack) - 32]);

    /* the page must be writable from user mode */
    rc = PGMMapModifyPage(pVM, pTst->RCPtr, sizeof(*pTst), X86_PTE_US | X86_PTE_RW, ~(uint64_t)(X86_PTE_US | X86_PTE_RW));
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": PGMMapModifyPage -> rc=%Rra\n", rc);
        return rc;
    }

    /* all the code must be executable from R3. */
    rc = PGMMapModifyPage(pVM, RCPtrStart, RCPtrEnd - RCPtrStart + PAGE_SIZE, X86_PTE_US, ~(uint64_t)X86_PTE_US);
    if (RT_FAILURE(rc))
    {
        RTPrintf(TESTCASE ": PGMMapModifyPage -> rc=%Rra\n", rc);
        return rc;
    }
    DBGFR3PagingDumpEx(pVM->pUVM, 0 /*idCpu*/, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE
                       | DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3,
                       0 /*cr3*/, 0 /*u64FirstAddr*/, UINT64_MAX /*u64LastAddr*/, 99 /*cMaxDepth*/, NULL);

#if 0
    /*
     * Disassemble the assembly...
     */
    RTGCPTR GCPtr = RCPtrStart;
    while (GCPtr < RCPtrEnd)
    {
        size_t  cb = 0;
        char    sz[256];
        int rc = DBGFR3DisasInstrEx(pVM, CPUMGetHyperCS(pVM), GCPtr, 0, sz, sizeof(sz), &cb);
        if (RT_SUCCESS(rc))
            RTLogPrintf("%s\n", sz);
        else
        {
            RTLogPrintf("%RGv rc=%Rrc\n", GCPtr, rc);
            cb = 1;
        }
        GCPtr += cb;
    }
#endif

#ifdef VBOX_WITH_RAW_MODE
    /*
     * Do the profiling.
     */
    /* execute the instruction profiling tests */
    PrintHeaderInstr();
    int i;
    for (i = TSTMICROTEST_OVERHEAD; i < TSTMICROTEST_TRAP_FIRST; i++)
    {
        TSTMICROTEST enmTest = (TSTMICROTEST)i;
        uint64_t    cMin = ~0;
        uint64_t    cMax = 0;
        uint64_t    cTotal = 0;
        unsigned    cSamples = 0;
        rc = VINF_SUCCESS;
        for (int c = 0; c < 100; c++)
        {
            int rc2 = VMMR3CallRC(pVM, RCPtrEntry, 2, pTst->RCPtr, enmTest);
            if (RT_SUCCESS(rc2))
            {
                uint64_t u64 = pTst->aResults[enmTest].cTotalTicks;
                if (cMin > u64)
                    cMin = u64;
                if (cMax < u64)
                    cMax = u64;
                cTotal += u64;
                cSamples++;
            }
            else if (RT_SUCCESS(rc))
                rc = rc2;
        }
        uint64_t cAvg = cTotal / (cSamples ? cSamples : 1);
        pTst->aResults[enmTest].cTotalTicks = cAvg;
        PrintResultInstr(pTst, enmTest, rc, cMin, cAvg, cMax);
        /* store the overhead */
        if (enmTest == TSTMICROTEST_OVERHEAD)
            pTst->u64Overhead = cMin;
    }
#endif


#ifdef VBOX_WITH_RAW_MODE
    /* execute the trap/cycle profiling tests. */
    RTPrintf("\n");
    PrintHeaderTraps();
    /* don't disable rdtsc in R1/R2/R3! */
    CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);
    for (i = TSTMICROTEST_TRAP_FIRST; i < TSTMICROTEST_MAX; i++)
    {
        TSTMICROTEST enmTest = (TSTMICROTEST)i;
        rc = VMMR3CallRC(pVM, RCPtrEntry, 2, pTst->RCPtr, enmTest);
        PrintResultTrap(pTst, enmTest, rc);
    }
#endif

    RTPrintf(TESTCASE ": done!\n");
    return VINF_SUCCESS;
}
Пример #7
0
int MsixInit(PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev, PPDMMSIREG pMsiReg)
{
    if (pMsiReg->cMsixVectors == 0)
         return VINF_SUCCESS;

     /* We cannot init MSI-X on raw devices yet. */
    Assert(!pciDevIsPassthrough(pDev));

    uint16_t   cVectors    = pMsiReg->cMsixVectors;
    uint8_t    iCapOffset  = pMsiReg->iMsixCapOffset;
    uint8_t    iNextOffset = pMsiReg->iMsixNextOffset;
    uint8_t    iBar        = pMsiReg->iMsixBar;

    if (cVectors > VBOX_MSIX_MAX_ENTRIES)
    {
        AssertMsgFailed(("Too many MSI-X vectors: %d\n", cVectors));
        return VERR_TOO_MUCH_DATA;
    }

    if (iBar > 5)
    {
        AssertMsgFailed(("Using wrong BAR for MSI-X: %d\n", iBar));
        return VERR_INVALID_PARAMETER;
    }

    Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff);

    int rc = VINF_SUCCESS;

    /* If device is passthrough, BAR is registered using common mechanism. */
    if (!pciDevIsPassthrough(pDev))
    {
        rc = PDMDevHlpPCIIORegionRegister (pDev->pDevIns, iBar, 0x1000, PCI_ADDRESS_SPACE_MEM, msixMap);
        if (RT_FAILURE (rc))
            return rc;
    }

    pDev->Int.s.u8MsixCapOffset = iCapOffset;
    pDev->Int.s.u8MsixCapSize   = VBOX_MSIX_CAP_SIZE;
    PVM pVM = PDMDevHlpGetVM(pDev->pDevIns);

    pDev->Int.s.pMsixPageR3     = NULL;

    rc = MMHyperAlloc(pVM, 0x1000, 1, MM_TAG_PDM_DEVICE_USER, (void **)&pDev->Int.s.pMsixPageR3);
    if (RT_FAILURE(rc) || (pDev->Int.s.pMsixPageR3 == NULL))
        return VERR_NO_VM_MEMORY;
    RT_BZERO(pDev->Int.s.pMsixPageR3, 0x1000);
    pDev->Int.s.pMsixPageR0     = MMHyperR3ToR0(pVM, pDev->Int.s.pMsixPageR3);
    pDev->Int.s.pMsixPageRC     = MMHyperR3ToRC(pVM, pDev->Int.s.pMsixPageR3);

    /* R3 PCI helper */
    pDev->Int.s.pPciBusPtrR3    = pPciHlp;

    PCIDevSetByte(pDev,  iCapOffset + 0, VBOX_PCI_CAP_ID_MSIX);
    PCIDevSetByte(pDev,  iCapOffset + 1, iNextOffset); /* next */
    PCIDevSetWord(pDev,  iCapOffset + VBOX_MSIX_CAP_MESSAGE_CONTROL, cVectors - 1);

    uint32_t offTable = 0, offPBA = 0x800;

    PCIDevSetDWord(pDev,  iCapOffset + VBOX_MSIX_TABLE_BIROFFSET, offTable | iBar);
    PCIDevSetDWord(pDev,  iCapOffset + VBOX_MSIX_PBA_BIROFFSET,   offPBA   | iBar);

    pciDevSetMsixCapable(pDev);

    return VINF_SUCCESS;
}
Пример #8
0
/**
 * Initializes the interpreted execution manager.
 *
 * This must be called after CPUM as we're quering information from CPUM about
 * the guest and host CPUs.
 *
 * @returns VBox status code.
 * @param   pVM                The cross context VM structure.
 */
VMMR3DECL(int)      IEMR3Init(PVM pVM)
{
    uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
    uint64_t const uInitialTlbPhysRev  = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);

    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    {
        PVMCPU pVCpu = &pVM->aCpus[idCpu];
        pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu);
        pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3);
        pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3);

        pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
        pVCpu->iem.s.CodeTlb.uTlbPhysRev  = pVCpu->iem.s.DataTlb.uTlbPhysRev  = uInitialTlbPhysRev;

        STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions,               STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Instructions interpreted",                     "/IEM/CPU%u/cInstructions", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps,                  STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Number of longjmp calls",                      "/IEM/CPU%u/cLongJumps", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits,             STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Potential exits",                              "/IEM/CPU%u/cPotentialExits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_ASPECT_NOT_IMPLEMENTED",              "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented,     STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "VERR_IEM_INSTR_NOT_IMPLEMENTED",               "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Informational statuses returned",              "/IEM/CPU%u/cRetInfStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses,             STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Error statuses returned",                      "/IEM/CPU%u/cRetErrStatuses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten,                   STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Approx bytes written",                         "/IEM/CPU%u/cbWritten", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit,              STAMTYPE_U32,       STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
                        "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);

#ifdef VBOX_WITH_STATISTICS
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Code TLB hits",                            "/IEM/CPU%u/CodeTlb-Hits", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbHits,            STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Data TLB hits",                            "/IEM/CPU%u/DataTlb-Hits", idCpu);
#endif
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Code TLB misses",                          "/IEM/CPU%u/CodeTlb-Misses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB revision",                        "/IEM/CPU%u/CodeTlb-Revision", idCpu);
        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB physical revision",               "/IEM/CPU%u/CodeTlb-PhysRev", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowReadPath,    STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Code TLB slow read path",                  "/IEM/CPU%u/CodeTlb-SlowReads", idCpu);

        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbMisses,          STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
                        "Data TLB misses",                          "/IEM/CPU%u/DataTlb-Misses", idCpu);
        STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision,        STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Data TLB revision",                        "/IEM/CPU%u/DataTlb-Revision", idCpu);
        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64,       STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
                        "Data TLB physical revision",               "/IEM/CPU%u/DataTlb-PhysRev", idCpu);

#if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
        /* Allocate instruction statistics and register them. */
        pVCpu->iem.s.pStatsR3 = (PIEMINSTRSTATS)MMR3HeapAllocZ(pVM, MM_TAG_IEM, sizeof(IEMINSTRSTATS));
        AssertLogRelReturn(pVCpu->iem.s.pStatsR3, VERR_NO_MEMORY);
        int rc = MMHyperAlloc(pVM, sizeof(IEMINSTRSTATS), sizeof(uint64_t), MM_TAG_IEM, (void **)&pVCpu->iem.s.pStatsCCR3);
        AssertLogRelRCReturn(rc, rc);
        pVCpu->iem.s.pStatsR0 = MMHyperR3ToR0(pVM, pVCpu->iem.s.pStatsCCR3);
        pVCpu->iem.s.pStatsRC = MMHyperR3ToR0(pVM, pVCpu->iem.s.pStatsCCR3);
# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
            STAMR3RegisterF(pVM, &pVCpu->iem.s.pStatsCCR3->a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
                            STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
            STAMR3RegisterF(pVM, &pVCpu->iem.s.pStatsR3->a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
                            STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
# include "IEMInstructionStatisticsTmpl.h"
# undef IEM_DO_INSTR_STAT
#endif

        /*
         * Host and guest CPU information.
         */
        if (idCpu == 0)
        {
            pVCpu->iem.s.enmCpuVendor             = CPUMGetGuestCpuVendor(pVM);
            pVCpu->iem.s.enmHostCpuVendor         = CPUMGetHostCpuVendor(pVM);
#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
            switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
            {
                case kCpumMicroarch_Intel_8086:     pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
                case kCpumMicroarch_Intel_80186:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
                case kCpumMicroarch_Intel_80286:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
                case kCpumMicroarch_Intel_80386:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
                case kCpumMicroarch_Intel_80486:    pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
                case kCpumMicroarch_Intel_P5:       pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
                case kCpumMicroarch_Intel_P6:       pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
                case kCpumMicroarch_NEC_V20:        pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
                case kCpumMicroarch_NEC_V30:        pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
                default:                            pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
            }
            LogRel(("IEM: TargetCpu=%s, Microarch=%s\n", iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMR3MicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch)));
#endif
        }
        else
        {
            pVCpu->iem.s.enmCpuVendor             = pVM->aCpus[0].iem.s.enmCpuVendor;
            pVCpu->iem.s.enmHostCpuVendor         = pVM->aCpus[0].iem.s.enmHostCpuVendor;
#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
            pVCpu->iem.s.uTargetCpu               = pVM->aCpus[0].iem.s.uTargetCpu;
#endif
        }

        /*
         * Mark all buffers free.
         */
        uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
        while (iMemMap-- > 0)
            pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    }
    return VINF_SUCCESS;
}
Пример #9
0
                                                       RCPTRTYPE(PFNPGMRZPHYSPFHANDLER) pfnPfHandlerRC,
                                                       const char *pszDesc, PPGMPHYSHANDLERTYPE phType)
{
    AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
    AssertReturn(pfnHandlerR0   != NIL_RTR0PTR, VERR_INVALID_POINTER);
    AssertReturn(pfnPfHandlerR0 != NIL_RTR0PTR, VERR_INVALID_POINTER);
    AssertReturn(pfnHandlerRC   != NIL_RTRCPTR || HMIsEnabled(pVM), VERR_INVALID_POINTER);
    AssertReturn(pfnPfHandlerRC != NIL_RTRCPTR || HMIsEnabled(pVM), VERR_INVALID_POINTER);
    AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
    AssertReturn(   enmKind == PGMPHYSHANDLERKIND_WRITE
                 || enmKind == PGMPHYSHANDLERKIND_ALL
                 || enmKind == PGMPHYSHANDLERKIND_MMIO,
                 VERR_INVALID_PARAMETER);

    PPGMPHYSHANDLERTYPEINT pType;
    int rc = MMHyperAlloc(pVM, sizeof(*pType), 0, MM_TAG_PGM_HANDLER_TYPES, (void **)&pType);
    if (RT_SUCCESS(rc))
    {
        pType->u32Magic         = PGMPHYSHANDLERTYPEINT_MAGIC;
        pType->cRefs            = 1;
        pType->enmKind          = enmKind;
        pType->uState           = enmKind == PGMPHYSHANDLERKIND_WRITE
                                ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL;
        pType->pfnHandlerR3     = pfnHandlerR3;
        pType->pfnHandlerR0     = pfnHandlerR0;
        pType->pfnPfHandlerR0   = pfnPfHandlerR0;
        pType->pfnHandlerRC     = pfnHandlerRC;
        pType->pfnPfHandlerRC   = pfnPfHandlerRC;
        pType->pszDesc          = pszDesc;

        pgmLock(pVM);