Example #1
0
static int pdmNsBwGroupCreate(PPDMNETSHAPER pShaper, const char *pcszBwGroup, uint64_t cbTransferPerSecMax)
{
    LogFlowFunc(("pShaper=%#p pcszBwGroup=%#p{%s} cbTransferPerSecMax=%llu\n",
                 pShaper, pcszBwGroup, pcszBwGroup, cbTransferPerSecMax));

    AssertPtrReturn(pShaper, VERR_INVALID_POINTER);
    AssertPtrReturn(pcszBwGroup, VERR_INVALID_POINTER);
    AssertReturn(*pcszBwGroup != '\0', VERR_INVALID_PARAMETER);

    int         rc;
    PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup);
    if (!pBwGroup)
    {
        rc = MMHyperAlloc(pShaper->pVM, sizeof(PDMNSBWGROUP), 64,
                          MM_TAG_PDM_NET_SHAPER, (void **)&pBwGroup);
        if (RT_SUCCESS(rc))
        {
            rc = PDMR3CritSectInit(pShaper->pVM, &pBwGroup->cs, RT_SRC_POS, "BWGRP");
            if (RT_SUCCESS(rc))
            {
                pBwGroup->pszName = RTStrDup(pcszBwGroup);
                if (pBwGroup->pszName)
                {
                    pBwGroup->pShaper               = pShaper;
                    pBwGroup->cRefs                 = 0;

                    pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax);

                    pBwGroup->cbTokensLast          = pBwGroup->cbBucketSize;
                    pBwGroup->tsUpdatedLast         = RTTimeSystemNanoTS();

                    LogFlowFunc(("pcszBwGroup={%s} cbBucketSize=%u\n",
                                 pcszBwGroup, pBwGroup->cbBucketSize));
                    pdmNsBwGroupLink(pBwGroup);
                    return VINF_SUCCESS;
                }
                PDMR3CritSectDelete(&pBwGroup->cs);
            }
            MMHyperFree(pShaper->pVM, pBwGroup);
        }
        else
            rc = VERR_NO_MEMORY;
    }
    else
        rc = VERR_ALREADY_EXISTS;

    LogFlowFunc(("returns rc=%Rrc\n", rc));
    return rc;
}
Example #2
0
/**
 * Terminate the network shaper.
 *
 * @returns VBox error code.
 * @param   pVM  Pointer to VM.
 *
 * @remarks This method destroys all bandwidth group objects.
 */
int pdmR3NetShaperTerm(PVM pVM)
{
    PUVM pUVM = pVM->pUVM;
    AssertPtrReturn(pUVM, VERR_INVALID_POINTER);
    PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper;
    AssertPtrReturn(pShaper, VERR_INVALID_POINTER);

    /* Destroy the bandwidth managers. */
    PPDMNSBWGROUP pBwGroup = pShaper->pBwGroupsHead;
    while (pBwGroup)
    {
        PPDMNSBWGROUP pFree = pBwGroup;
        pBwGroup = pBwGroup->pNext;
        pdmNsBwGroupTerminate(pFree);
        MMHyperFree(pVM, pFree);
    }

    RTCritSectDelete(&pShaper->cs);
    return VINF_SUCCESS;
}
/**
 *  Entry point.
 */
extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
{

    /*
     * Init runtime.
     */
    RTR3InitExe(argc, &argv, 0);

    /*
     * Create empty VM structure and call MMR3Init().
     */
    PVM         pVM;
    RTR0PTR     pvR0;
    SUPPAGE     aPages[RT_ALIGN_Z(sizeof(*pVM) + NUM_CPUS * sizeof(VMCPU), PAGE_SIZE) >> PAGE_SHIFT];
    int rc = SUPR3Init(NULL);
    if (RT_SUCCESS(rc))
        rc = SUPR3LowAlloc(RT_ELEMENTS(aPages), (void **)&pVM, &pvR0, &aPages[0]);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: SUP Failure! rc=%Rrc\n", rc);
        return 1;
    }
    memset(pVM, 0, sizeof(*pVM)); /* wtf? */
    pVM->paVMPagesR3 = aPages;
    pVM->pVMR0 = pvR0;

    static UVM s_UVM;
    PUVM pUVM = &s_UVM;
    pUVM->pVM = pVM;
    pVM->pUVM = pUVM;

    pVM->cCpus = NUM_CPUS;
    pVM->cbSelf = RT_UOFFSETOF(VM, aCpus[pVM->cCpus]);

    rc = STAMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = CFGMR3Init(pVM, NULL, NULL);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: CFGMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3Init(pVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: MMR3Init failed! rc=%Rrc\n", rc);
        return 1;
    }

    /*
     * Try allocate.
     */
    static struct
    {
        size_t      cb;
        unsigned    uAlignment;
        void       *pvAlloc;
        unsigned    iFreeOrder;
    } aOps[] =
    {
        {        16,          0,    NULL,  0 },
        {        16,          4,    NULL,  1 },
        {        16,          8,    NULL,  2 },
        {        16,         16,    NULL,  5 },
        {        16,         32,    NULL,  4 },
        {        32,          0,    NULL,  3 },
        {        31,          0,    NULL,  6 },
        {      1024,          0,    NULL,  8 },
        {      1024,         32,    NULL, 10 },
        {      1024,         32,    NULL, 12 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 13 },
        {      1024,         32,    NULL,  9 },
        { PAGE_SIZE,         32,    NULL, 11 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 14 },
        {        16,          0,    NULL, 15 },
        {        9,           0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {        36,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {     12344,          0,    NULL,  7 },
        {        50,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
    };
    unsigned i;
#ifdef DEBUG
    MMHyperHeapDump(pVM);
#endif
    size_t cbBefore = MMHyperHeapGetFreeSize(pVM);
    static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";

    /* allocate */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM, &aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        memset(aOps[i].pvAlloc, szFill[i], aOps[i].cb);
        if (RT_ALIGN_P(aOps[i].pvAlloc, (aOps[i].uAlignment ? aOps[i].uAlignment : 8)) != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %p, invalid alignment!\n", aOps[i].cb, aOps[i].uAlignment, aOps[i].pvAlloc);
            return 1;
        }
    }

    /* free and allocate the same node again. */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        if (    !aOps[i].pvAlloc
            ||  aOps[i].uAlignment == PAGE_SIZE)
            continue;
        //size_t cbBeforeSub = MMHyperHeapGetFreeSize(pVM);
        rc = MMHyperFree(pVM, aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperFree(, %p,) -> %d i=%d\n", aOps[i].pvAlloc, rc, i);
            return 1;
        }
        //RTPrintf("debug: i=%d cbBeforeSub=%d now=%d\n", i, cbBeforeSub, MMHyperHeapGetFreeSize(pVM));
        void *pv;
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM_REQ, &pv);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        if (pv != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: Free+Alloc returned different address. new=%p old=%p i=%d (doesn't work with delayed free)\n", pv, aOps[i].pvAlloc, i);
            //return 1;
        }
        aOps[i].pvAlloc = pv;
        #if 0 /* won't work :/ */
        size_t cbAfterSub = MMHyperHeapGetFreeSize(pVM);
        if (cbBeforeSub != cbAfterSub)
        {
            RTPrintf("Failure: cbBeforeSub=%d cbAfterSub=%d. i=%d\n", cbBeforeSub, cbAfterSub, i);
            return 1;
        }
        #endif
    }

    /* free it in a specific order. */
    int cFreed = 0;
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(aOps); j++)
        {
            if (    aOps[j].iFreeOrder != i
                ||  !aOps[j].pvAlloc)
                continue;
            RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, MMHyperHeapGetFreeSize(pVM), aOps[j].cb, aOps[j].pvAlloc);
            if (aOps[j].uAlignment == PAGE_SIZE)
                cbBefore -= aOps[j].cb;
            else
            {
                rc = MMHyperFree(pVM, aOps[j].pvAlloc);
                if (RT_FAILURE(rc))
                {
                    RTPrintf("Failure: MMHyperFree(, %p,) -> %d j=%d i=%d\n", aOps[j].pvAlloc, rc, i, j);
                    return 1;
                }
            }
            aOps[j].pvAlloc = NULL;
            cFreed++;
        }
    }
    Assert(cFreed == RT_ELEMENTS(aOps));
    RTPrintf("i=done free=%d\n", MMHyperHeapGetFreeSize(pVM));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfter = MMHyperHeapGetFreeSize(pVM);
    if (cbBefore != cbAfter)
    {
        RTPrintf("Warning: Either we've split out an alignment chunk at the start, or we've got\n"
                 "         an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter);
#ifdef DEBUG
        MMHyperHeapDump(pVM);
#endif
    }

    RTPrintf("tstMMHyperHeap: Success\n");
#ifdef LOG_ENABLED
    RTLogFlush(NULL);
#endif
    return 0;
}
Example #4
0
/**
 * Allocates a page from the page pool.
 *
 * @returns Pointer to allocated page(s).
 * @returns NULL on failure.
 * @param   pPool   Pointer to the page pool.
 * @thread  The Emulation Thread.
 */
DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool)
{
    VM_ASSERT_EMT(pPool->pVM);
    STAM_COUNTER_INC(&pPool->cAllocCalls);

    /*
     * Walk free list.
     */
    if (pPool->pHeadFree)
    {
        PMMPAGESUBPOOL  pSub = pPool->pHeadFree;
        /* decrement free count and unlink if no more free entries. */
        if (!--pSub->cPagesFree)
            pPool->pHeadFree = pSub->pNextFree;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages--;
#endif

        /* find free spot in bitmap. */
#ifdef USE_INLINE_ASM_BIT_OPS
        const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages);
        if (iPage >= 0)
        {
            Assert(!ASMBitTest(pSub->auBitmap, iPage));
            ASMBitSet(pSub->auBitmap, iPage);
            return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage;
        }
#else
        unsigned   *pu = &pSub->auBitmap[0];
        unsigned   *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)];
        while (pu < puEnd)
        {
            unsigned u;
            if ((u = *pu) != ~0U)
            {
                unsigned iBit = 0;
                unsigned uMask = 1;
                while (iBit < sizeof(pSub->auBitmap[0]) * 8)
                {
                    if (!(u & uMask))
                    {
                        *pu |= uMask;
                        return (uint8_t *)pSub->pvPages
                            + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8);
                    }
                    iBit++;
                    uMask <<= 1;
                }
                STAM_COUNTER_INC(&pPool->cErrors);
                AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u));
            }
            /* next */
            pu++;
        }
#endif
        STAM_COUNTER_INC(&pPool->cErrors);
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages++;
#endif
        AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1));
    }

    /*
     * Allocate new subpool.
     */
    unsigned        cPages = !pPool->fLow ? 128 : 32;
    PMMPAGESUBPOOL  pSub;
    int rc = MMHyperAlloc(pPool->pVM,
                          RT_OFFSETOF(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)])
                          + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages
                          + sizeof(MMPPLOOKUPHCPTR),
                          0,
                          MM_TAG_MM_PAGE,
                          (void **)&pSub);
    if (RT_FAILURE(rc))
        return NULL;

    PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)];
    Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]);
    if (!pPool->fLow)
    {
        rc = SUPR3PageAllocEx(cPages,
                              0 /* fFlags */,
                              &pSub->pvPages,
                              NULL,
                              paPhysPages);
        if (RT_FAILURE(rc))
            rc = VMSetError(pPool->pVM, rc, RT_SRC_POS,
                            N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT);
    }
    else
        rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages);
    if (RT_SUCCESS(rc))
    {
        /*
         * Setup the sub structure and allocate the requested page.
         */
        pSub->cPages    = cPages;
        pSub->cPagesFree= cPages - 1;
        pSub->paPhysPages = paPhysPages;
        memset(pSub->auBitmap, 0, cPages / 8);
        /* allocate first page. */
        pSub->auBitmap[0] |= 1;
        /* link into free chain. */
        pSub->pNextFree = pPool->pHeadFree;
        pPool->pHeadFree= pSub;
        /* link into main chain. */
        pSub->pNext     = pPool->pHead;
        pPool->pHead    = pSub;
        /* update pool statistics. */
        pPool->cSubPools++;
        pPool->cPages  += cPages;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages += cPages - 1;
#endif

        /*
         * Initialize the physical pages with backpointer to subpool.
         */
        unsigned i = cPages;
        while (i-- > 0)
        {
            AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK),
                      ("i=%d Phys=%d\n", i, paPhysPages[i].Phys));
            paPhysPages[i].uReserved = (RTHCUINTPTR)pSub;
        }

        /*
         * Initialize the physical lookup record with backpointers to the physical pages.
         */
        PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages];
        i = cPages;
        while (i-- > 0)
        {
            paLookupPhys[i].pPhysPage = &paPhysPages[i];
            paLookupPhys[i].Core.Key = paPhysPages[i].Phys;
            RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core);
        }

        /*
         * And the one record for virtual memory lookup.
         */
        PMMPPLOOKUPHCPTR   pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages];
        pLookupVirt->pSubPool = pSub;
        pLookupVirt->Core.Key = pSub->pvPages;
        RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core);

        /* return allocated page (first). */
        return pSub->pvPages;
    }

    MMHyperFree(pPool->pVM, pSub);
    STAM_COUNTER_INC(&pPool->cErrors);
    if (pPool->fLow)
        VMSetError(pPool->pVM, rc, RT_SRC_POS,
                   N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"),
                   pPool->cPages);
    AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n",
                     pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages));
    return NULL;
}
Example #5
0
/**
 * Internal worker for the queue creation apis.
 *
 * @returns VBox status.
 * @param   pVM                 Pointer to the VM.
 * @param   cbItem              Item size.
 * @param   cItems              Number of items.
 * @param   cMilliesInterval    Number of milliseconds between polling the queue.
 *                              If 0 then the emulation thread will be notified whenever an item arrives.
 * @param   fRZEnabled          Set if the queue will be used from RC/R0 and need to be allocated from the hyper heap.
 * @param   pszName             The queue name. Unique. Not copied.
 * @param   ppQueue             Where to store the queue handle.
 */
static int pdmR3QueueCreate(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, bool fRZEnabled,
                            const char *pszName, PPDMQUEUE *ppQueue)
{
    PUVM pUVM = pVM->pUVM;

    /*
     * Validate input.
     */
    AssertMsgReturn(cbItem >= sizeof(PDMQUEUEITEMCORE) && cbItem < _1M, ("cbItem=%zu\n", cbItem), VERR_OUT_OF_RANGE);
    AssertMsgReturn(cItems >= 1 && cItems <= _64K, ("cItems=%u\n", cItems), VERR_OUT_OF_RANGE);

    /*
     * Align the item size and calculate the structure size.
     */
    cbItem = RT_ALIGN(cbItem, sizeof(RTUINTPTR));
    size_t cb = cbItem * cItems + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16);
    PPDMQUEUE pQueue;
    int rc;
    if (fRZEnabled)
        rc = MMHyperAlloc(pVM, cb, 0, MM_TAG_PDM_QUEUE, (void **)&pQueue );
    else
        rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_QUEUE, cb, (void **)&pQueue);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Initialize the data fields.
     */
    pQueue->pVMR3 = pVM;
    pQueue->pVMR0 = fRZEnabled ? pVM->pVMR0 : NIL_RTR0PTR;
    pQueue->pVMRC = fRZEnabled ? pVM->pVMRC : NIL_RTRCPTR;
    pQueue->pszName = pszName;
    pQueue->cMilliesInterval = cMilliesInterval;
    //pQueue->pTimer = NULL;
    pQueue->cbItem = (uint32_t)cbItem;
    pQueue->cItems = cItems;
    //pQueue->pPendingR3 = NULL;
    //pQueue->pPendingR0 = NULL;
    //pQueue->pPendingRC = NULL;
    pQueue->iFreeHead = cItems;
    //pQueue->iFreeTail = 0;
    PPDMQUEUEITEMCORE pItem = (PPDMQUEUEITEMCORE)((char *)pQueue + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16));
    for (unsigned i = 0; i < cItems; i++, pItem = (PPDMQUEUEITEMCORE)((char *)pItem + cbItem))
    {
        pQueue->aFreeItems[i].pItemR3 = pItem;
        if (fRZEnabled)
        {
            pQueue->aFreeItems[i].pItemR0 = MMHyperR3ToR0(pVM, pItem);
            pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pVM, pItem);
        }
    }

    /*
     * Create timer?
     */
    if (cMilliesInterval)
    {
        rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, pdmR3QueueTimer, pQueue, "Queue timer", &pQueue->pTimer);
        if (RT_SUCCESS(rc))
        {
            rc = TMTimerSetMillies(pQueue->pTimer, cMilliesInterval);
            if (RT_FAILURE(rc))
            {
                AssertMsgFailed(("TMTimerSetMillies failed rc=%Rrc\n", rc));
                int rc2 = TMR3TimerDestroy(pQueue->pTimer);
                AssertRC(rc2);
            }
        }
        else
            AssertMsgFailed(("TMR3TimerCreateInternal failed rc=%Rrc\n", rc));
        if (RT_FAILURE(rc))
        {
            if (fRZEnabled)
                MMHyperFree(pVM, pQueue);
            else
                MMR3HeapFree(pQueue);
            return rc;
        }

        /*
         * Insert into the queue list for timer driven queues.
         */
        pdmLock(pVM);
        pQueue->pNext = pUVM->pdm.s.pQueuesTimer;
        pUVM->pdm.s.pQueuesTimer = pQueue;
        pdmUnlock(pVM);
    }
    else
    {
        /*
         * Insert into the queue list for forced action driven queues.
         * This is a FIFO, so insert at the end.
         */
        /** @todo we should add a priority to the queues so we don't have to rely on
         * the initialization order to deal with problems like @bugref{1605} (pgm/pcnet
         * deadlock caused by the critsect queue to be last in the chain).
         * - Update, the critical sections are no longer using queues, so this isn't a real
         *   problem any longer. The priority might be a nice feature for later though.
         */
        pdmLock(pVM);
        if (!pUVM->pdm.s.pQueuesForced)
            pUVM->pdm.s.pQueuesForced = pQueue;
        else
        {
            PPDMQUEUE pPrev = pUVM->pdm.s.pQueuesForced;
            while (pPrev->pNext)
                pPrev = pPrev->pNext;
            pPrev->pNext = pQueue;
        }
        pdmUnlock(pVM);
    }

    /*
     * Register the statistics.
     */
    STAMR3RegisterF(pVM, &pQueue->cbItem,               STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,        "Item size.",                       "/PDM/Queue/%s/cbItem",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->cItems,               STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,        "Queue size.",                      "/PDM/Queue/%s/cItems",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatAllocFailures,    STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "PDMQueueAlloc failures.",          "/PDM/Queue/%s/AllocFailures",  pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatInsert,           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Calls to PDMQueueInsert.",         "/PDM/Queue/%s/Insert",         pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatFlush,            STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Calls to pdmR3QueueFlush.",        "/PDM/Queue/%s/Flush",          pQueue->pszName);
    STAMR3RegisterF(pVM, &pQueue->StatFlushLeftovers,   STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,   "Left over items after flush.",     "/PDM/Queue/%s/FlushLeftovers", pQueue->pszName);
#ifdef VBOX_WITH_STATISTICS
    STAMR3RegisterF(pVM, &pQueue->StatFlushPrf,         STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,        "Profiling pdmR3QueueFlush.",       "/PDM/Queue/%s/FlushPrf",       pQueue->pszName);
    STAMR3RegisterF(pVM, (void *)&pQueue->cStatPending, STAMTYPE_U32,     STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,        "Pending items.",                   "/PDM/Queue/%s/Pending",        pQueue->pszName);
#endif

    *ppQueue = pQueue;
    return VINF_SUCCESS;
}
Example #6
0
/**
 * Destroy a queue.
 *
 * @returns VBox status code.
 * @param   pQueue      Queue to destroy.
 * @thread  Emulation thread only.
 */
VMMR3_INT_DECL(int) PDMR3QueueDestroy(PPDMQUEUE pQueue)
{
    LogFlow(("PDMR3QueueDestroy: pQueue=%p\n", pQueue));

    /*
     * Validate input.
     */
    if (!pQueue)
        return VERR_INVALID_PARAMETER;
    Assert(pQueue && pQueue->pVMR3);
    PVM     pVM  = pQueue->pVMR3;
    PUVM    pUVM = pVM->pUVM;

    pdmLock(pVM);

    /*
     * Unlink it.
     */
    if (pQueue->pTimer)
    {
        if (pUVM->pdm.s.pQueuesTimer != pQueue)
        {
            PPDMQUEUE pCur = pUVM->pdm.s.pQueuesTimer;
            while (pCur)
            {
                if (pCur->pNext == pQueue)
                {
                    pCur->pNext = pQueue->pNext;
                    break;
                }
                pCur = pCur->pNext;
            }
            AssertMsg(pCur, ("Didn't find the queue!\n"));
        }
        else
            pUVM->pdm.s.pQueuesTimer = pQueue->pNext;
    }
    else
    {
        if (pUVM->pdm.s.pQueuesForced != pQueue)
        {
            PPDMQUEUE pCur = pUVM->pdm.s.pQueuesForced;
            while (pCur)
            {
                if (pCur->pNext == pQueue)
                {
                    pCur->pNext = pQueue->pNext;
                    break;
                }
                pCur = pCur->pNext;
            }
            AssertMsg(pCur, ("Didn't find the queue!\n"));
        }
        else
            pUVM->pdm.s.pQueuesForced = pQueue->pNext;
    }
    pQueue->pNext = NULL;
    pQueue->pVMR3 = NULL;
    pdmUnlock(pVM);

    /*
     * Deregister statistics.
     */
    STAMR3Deregister(pVM, &pQueue->cbItem);
    STAMR3Deregister(pVM, &pQueue->cbItem);
    STAMR3Deregister(pVM, &pQueue->StatAllocFailures);
    STAMR3Deregister(pVM, &pQueue->StatInsert);
    STAMR3Deregister(pVM, &pQueue->StatFlush);
    STAMR3Deregister(pVM, &pQueue->StatFlushLeftovers);
#ifdef VBOX_WITH_STATISTICS
    STAMR3Deregister(pVM, &pQueue->StatFlushPrf);
    STAMR3Deregister(pVM, (void *)&pQueue->cStatPending);
#endif

    /*
     * Destroy the timer and free it.
     */
    if (pQueue->pTimer)
    {
        TMR3TimerDestroy(pQueue->pTimer);
        pQueue->pTimer = NULL;
    }
    if (pQueue->pVMRC)
    {
        pQueue->pVMRC = NIL_RTRCPTR;
        pQueue->pVMR0 = NIL_RTR0PTR;
        MMHyperFree(pVM, pQueue);
    }
    else
        MMR3HeapFree(pQueue);

    return VINF_SUCCESS;
}
/**
 * Destroy a queue.
 *
 * @returns VBox status code.
 * @param   pQueue      Queue to destroy.
 * @thread  Emulation thread only.
 */
VMMR3_INT_DECL(int) PDMR3QueueDestroy(PPDMQUEUE pQueue)
{
    LogFlow(("PDMR3QueueDestroy: pQueue=%p\n", pQueue));

    /*
     * Validate input.
     */
    if (!pQueue)
        return VERR_INVALID_PARAMETER;
    Assert(pQueue && pQueue->pVMR3);
    PVM     pVM  = pQueue->pVMR3;
    PUVM    pUVM = pVM->pUVM;

    pdmLock(pVM);

    /*
     * Unlink it.
     */
    if (pQueue->pTimer)
    {
        if (pUVM->pdm.s.pQueuesTimer != pQueue)
        {
            PPDMQUEUE pCur = pUVM->pdm.s.pQueuesTimer;
            while (pCur)
            {
                if (pCur->pNext == pQueue)
                {
                    pCur->pNext = pQueue->pNext;
                    break;
                }
                pCur = pCur->pNext;
            }
            AssertMsg(pCur, ("Didn't find the queue!\n"));
        }
        else
            pUVM->pdm.s.pQueuesTimer = pQueue->pNext;
    }
    else
    {
        if (pUVM->pdm.s.pQueuesForced != pQueue)
        {
            PPDMQUEUE pCur = pUVM->pdm.s.pQueuesForced;
            while (pCur)
            {
                if (pCur->pNext == pQueue)
                {
                    pCur->pNext = pQueue->pNext;
                    break;
                }
                pCur = pCur->pNext;
            }
            AssertMsg(pCur, ("Didn't find the queue!\n"));
        }
        else
            pUVM->pdm.s.pQueuesForced = pQueue->pNext;
    }
    pQueue->pNext = NULL;
    pQueue->pVMR3 = NULL;
    pdmUnlock(pVM);

    /*
     * Deregister statistics.
     */
    STAMR3DeregisterF(pVM->pUVM, "/PDM/Queue/%s/cbItem", pQueue->pszName);

    /*
     * Destroy the timer and free it.
     */
    if (pQueue->pTimer)
    {
        TMR3TimerDestroy(pQueue->pTimer);
        pQueue->pTimer = NULL;
    }
    if (pQueue->pVMRC)
    {
        pQueue->pVMRC = NIL_RTRCPTR;
        pQueue->pVMR0 = NIL_RTR0PTR;
        MMHyperFree(pVM, pQueue);
    }
    else
        MMR3HeapFree(pQueue);

    return VINF_SUCCESS;
}
/**
 * Initialize a new thread, this actually creates the thread.
 *
 * @returns VBox status code.
 * @param   pVM         Pointer to the VM.
 * @param   ppThread    Where the thread instance data handle is.
 * @param   cbStack     The stack size, see RTThreadCreate().
 * @param   enmType     The thread type, see RTThreadCreate().
 * @param   pszName     The thread name, see RTThreadCreate().
 */
static int pdmR3ThreadInit(PVM pVM, PPPDMTHREAD ppThread, size_t cbStack, RTTHREADTYPE enmType, const char *pszName)
{
    PPDMTHREAD  pThread = *ppThread;
    PUVM        pUVM    = pVM->pUVM;

    /*
     * Initialize the remainder of the structure.
     */
    pThread->Internal.s.pVM = pVM;

    int rc = RTSemEventMultiCreate(&pThread->Internal.s.BlockEvent);
    if (RT_SUCCESS(rc))
    {
        rc = RTSemEventMultiCreate(&pThread->Internal.s.SleepEvent);
        if (RT_SUCCESS(rc))
        {
            /*
             * Create the thread and wait for it to initialize.
             * The newly created thread will set the PDMTHREAD::Thread member.
             */
            RTTHREAD Thread;
            rc = RTThreadCreate(&Thread, pdmR3ThreadMain, pThread, cbStack, enmType, RTTHREADFLAGS_WAITABLE, pszName);
            if (RT_SUCCESS(rc))
            {
                rc = RTThreadUserWait(Thread, 60*1000);
                if (    RT_SUCCESS(rc)
                    &&  pThread->enmState != PDMTHREADSTATE_SUSPENDED)
                    rc = VERR_PDM_THREAD_IPE_2;
                if (RT_SUCCESS(rc))
                {
                    /*
                     * Insert it into the thread list.
                     */
                    RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
                    pThread->Internal.s.pNext = NULL;
                    if (pUVM->pdm.s.pThreadsTail)
                        pUVM->pdm.s.pThreadsTail->Internal.s.pNext = pThread;
                    else
                        pUVM->pdm.s.pThreads = pThread;
                    pUVM->pdm.s.pThreadsTail = pThread;
                    RTCritSectLeave(&pUVM->pdm.s.ListCritSect);

                    rc = RTThreadUserReset(Thread);
                    AssertRC(rc);
                    return rc;
                }

                /* bailout */
                RTThreadWait(Thread, 60*1000, NULL);
            }
            RTSemEventMultiDestroy(pThread->Internal.s.SleepEvent);
            pThread->Internal.s.SleepEvent = NIL_RTSEMEVENTMULTI;
        }
        RTSemEventMultiDestroy(pThread->Internal.s.BlockEvent);
        pThread->Internal.s.BlockEvent = NIL_RTSEMEVENTMULTI;
    }
    MMHyperFree(pVM, pThread);
    *ppThread = NULL;

    return rc;
}