Пример #1
0
/**
 * Performs a GMMR0AllocatePages request.
 *
 * This will call VMSetError on failure.
 *
 * @returns VBox status code.
 * @param   pVM         The cross context VM structure.
 * @param   pReq        Pointer to the request (returned by GMMR3AllocatePagesPrepare).
 */
GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
{
    for (unsigned i = 0; ; i++)
    {
        int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
        if (RT_SUCCESS(rc))
        {
#ifdef LOG_ENABLED
            for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
                Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
                      pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
#endif
            return rc;
        }
        if (rc != VERR_GMM_SEED_ME)
            return VMSetError(pVM, rc, RT_SRC_POS,
                              N_("GMMR0AllocatePages failed to allocate %u pages"),
                              pReq->cPages);
        Assert(i < pReq->cPages);

        /*
         * Seed another chunk.
         */
        void *pvChunk;
        rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
        if (RT_FAILURE(rc))
            return VMSetError(pVM, rc, RT_SRC_POS,
                              N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"),
                              pReq->cPages);

        rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
        if (RT_FAILURE(rc))
            return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed"));
    }
}
Пример #2
0
/**
 * Initializes the tracing.
 *
 * @returns VBox status code
 * @param   pVM                 The cross context VM structure.
 */
int dbgfR3TraceInit(PVM pVM)
{
    /*
     * Initialize the trace buffer handles.
     */
    Assert(NIL_RTTRACEBUF == (RTTRACEBUF)NULL);
    pVM->hTraceBufR3 = NIL_RTTRACEBUF;
    pVM->hTraceBufRC = NIL_RTRCPTR;
    pVM->hTraceBufR0 = NIL_RTR0PTR;

    /*
     * Check the config and enable tracing if requested.
     */
    PCFGMNODE pDbgfNode = CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF");
#if defined(DEBUG) || defined(RTTRACE_ENABLED)
    bool const          fDefault        = false;
    const char * const  pszConfigDefault = "";
#else
    bool const          fDefault        = false;
    const char * const  pszConfigDefault = "";
#endif
    bool                fTracingEnabled;
    int rc = CFGMR3QueryBoolDef(pDbgfNode, "TracingEnabled", &fTracingEnabled, fDefault);
    AssertRCReturn(rc, rc);
    if (fTracingEnabled)
    {
        rc = dbgfR3TraceEnable(pVM, 0, 0);
        if (RT_SUCCESS(rc))
        {
            if (pDbgfNode)
            {
                char       *pszTracingConfig;
                rc = CFGMR3QueryStringAllocDef(pDbgfNode, "TracingConfig", &pszTracingConfig, pszConfigDefault);
                if (RT_SUCCESS(rc))
                {
                    rc = DBGFR3TraceConfig(pVM, pszTracingConfig);
                    if (RT_FAILURE(rc))
                        rc = VMSetError(pVM, rc, RT_SRC_POS, "TracingConfig=\"%s\" -> %Rrc", pszTracingConfig, rc);
                    MMR3HeapFree(pszTracingConfig);
                }
            }
            else
            {
                rc = DBGFR3TraceConfig(pVM, pszConfigDefault);
                if (RT_FAILURE(rc))
                    rc = VMSetError(pVM, rc, RT_SRC_POS, "TracingConfig=\"%s\" (default) -> %Rrc", pszConfigDefault, rc);
            }
        }
    }

    /*
     * Register a debug info item that will dump the trace buffer content.
     */
    if (RT_SUCCESS(rc))
        rc = DBGFR3InfoRegisterInternal(pVM, "tracebuf", "Display the trace buffer content. No arguments.", dbgfR3TraceInfo);

    return rc;
}
Пример #3
0
/**
 * Performs a GMMR0FreePages request.
 * This will call VMSetError on failure.
 *
 * @returns VBox status code.
 * @param   pVM             The cross context VM structure.
 * @param   pReq            Pointer to the request (returned by GMMR3FreePagesPrepare).
 * @param   cActualPages    The number of pages actually freed.
 */
GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
{
    /*
     * Adjust the request if we ended up with fewer pages than anticipated.
     */
    if (cActualPages != pReq->cPages)
    {
        AssertReturn(cActualPages < pReq->cPages, VERR_GMM_ACTUAL_PAGES_IPE);
        if (!cActualPages)
            return VINF_SUCCESS;
        pReq->cPages = cActualPages;
        pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cActualPages]);
    }

    /*
     * Do the job.
     */
    int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
    if (RT_SUCCESS(rc))
        return rc;
    AssertRC(rc);
    return VMSetError(pVM, rc, RT_SRC_POS,
                      N_("GMMR0FreePages failed to free %u pages"),
                      pReq->cPages);
}
Пример #4
0
/**
 * Changes the halt method.
 *
 * @returns VBox status code.
 * @param   pUVM            Pointer to the user mode VM structure.
 * @param   enmHaltMethod   The new halt method.
 * @thread  EMT.
 */
int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
{
    PVM pVM = pUVM->pVM; Assert(pVM);
    VM_ASSERT_EMT(pVM);
    AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);

    /*
     * Resolve default (can be overridden in the configuration).
     */
    if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
    {
        uint32_t u32;
        int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
        if (RT_SUCCESS(rc))
        {
            enmHaltMethod = (VMHALTMETHOD)u32;
            if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
                return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
        }
        else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
            return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
        else
            enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
            //enmHaltMethod = VMHALTMETHOD_1;
            //enmHaltMethod = VMHALTMETHOD_OLD;
    }
    LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));

    /*
     * Find the descriptor.
     */
    unsigned i = 0;
    while (     i < RT_ELEMENTS(g_aHaltMethods)
           &&   g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
        i++;
    AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);

    /*
     * This needs to be done while the other EMTs are not sleeping or otherwise messing around.
     */
    return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3SetHaltMethodCallback, (void *)(uintptr_t)i);
}
Пример #5
0
/**
 * Allocates a page from the page pool.
 *
 * @returns Pointer to allocated page(s).
 * @returns NULL on failure.
 * @param   pPool   Pointer to the page pool.
 * @thread  The Emulation Thread.
 */
DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool)
{
    VM_ASSERT_EMT(pPool->pVM);
    STAM_COUNTER_INC(&pPool->cAllocCalls);

    /*
     * Walk free list.
     */
    if (pPool->pHeadFree)
    {
        PMMPAGESUBPOOL  pSub = pPool->pHeadFree;
        /* decrement free count and unlink if no more free entries. */
        if (!--pSub->cPagesFree)
            pPool->pHeadFree = pSub->pNextFree;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages--;
#endif

        /* find free spot in bitmap. */
#ifdef USE_INLINE_ASM_BIT_OPS
        const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages);
        if (iPage >= 0)
        {
            Assert(!ASMBitTest(pSub->auBitmap, iPage));
            ASMBitSet(pSub->auBitmap, iPage);
            return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage;
        }
#else
        unsigned   *pu = &pSub->auBitmap[0];
        unsigned   *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)];
        while (pu < puEnd)
        {
            unsigned u;
            if ((u = *pu) != ~0U)
            {
                unsigned iBit = 0;
                unsigned uMask = 1;
                while (iBit < sizeof(pSub->auBitmap[0]) * 8)
                {
                    if (!(u & uMask))
                    {
                        *pu |= uMask;
                        return (uint8_t *)pSub->pvPages
                            + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8);
                    }
                    iBit++;
                    uMask <<= 1;
                }
                STAM_COUNTER_INC(&pPool->cErrors);
                AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u));
            }
            /* next */
            pu++;
        }
#endif
        STAM_COUNTER_INC(&pPool->cErrors);
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages++;
#endif
        AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1));
    }

    /*
     * Allocate new subpool.
     */
    unsigned        cPages = !pPool->fLow ? 128 : 32;
    PMMPAGESUBPOOL  pSub;
    int rc = MMHyperAlloc(pPool->pVM,
                          RT_OFFSETOF(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)])
                          + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages
                          + sizeof(MMPPLOOKUPHCPTR),
                          0,
                          MM_TAG_MM_PAGE,
                          (void **)&pSub);
    if (RT_FAILURE(rc))
        return NULL;

    PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)];
    Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]);
    if (!pPool->fLow)
    {
        rc = SUPR3PageAllocEx(cPages,
                              0 /* fFlags */,
                              &pSub->pvPages,
                              NULL,
                              paPhysPages);
        if (RT_FAILURE(rc))
            rc = VMSetError(pPool->pVM, rc, RT_SRC_POS,
                            N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT);
    }
    else
        rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages);
    if (RT_SUCCESS(rc))
    {
        /*
         * Setup the sub structure and allocate the requested page.
         */
        pSub->cPages    = cPages;
        pSub->cPagesFree= cPages - 1;
        pSub->paPhysPages = paPhysPages;
        memset(pSub->auBitmap, 0, cPages / 8);
        /* allocate first page. */
        pSub->auBitmap[0] |= 1;
        /* link into free chain. */
        pSub->pNextFree = pPool->pHeadFree;
        pPool->pHeadFree= pSub;
        /* link into main chain. */
        pSub->pNext     = pPool->pHead;
        pPool->pHead    = pSub;
        /* update pool statistics. */
        pPool->cSubPools++;
        pPool->cPages  += cPages;
#ifdef VBOX_WITH_STATISTICS
        pPool->cFreePages += cPages - 1;
#endif

        /*
         * Initialize the physical pages with backpointer to subpool.
         */
        unsigned i = cPages;
        while (i-- > 0)
        {
            AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK),
                      ("i=%d Phys=%d\n", i, paPhysPages[i].Phys));
            paPhysPages[i].uReserved = (RTHCUINTPTR)pSub;
        }

        /*
         * Initialize the physical lookup record with backpointers to the physical pages.
         */
        PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages];
        i = cPages;
        while (i-- > 0)
        {
            paLookupPhys[i].pPhysPage = &paPhysPages[i];
            paLookupPhys[i].Core.Key = paPhysPages[i].Phys;
            RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core);
        }

        /*
         * And the one record for virtual memory lookup.
         */
        PMMPPLOOKUPHCPTR   pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages];
        pLookupVirt->pSubPool = pSub;
        pLookupVirt->Core.Key = pSub->pvPages;
        RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core);

        /* return allocated page (first). */
        return pSub->pvPages;
    }

    MMHyperFree(pPool->pVM, pSub);
    STAM_COUNTER_INC(&pPool->cErrors);
    if (pPool->fLow)
        VMSetError(pPool->pVM, rc, RT_SRC_POS,
                   N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"),
                   pPool->cPages);
    AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n",
                     pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages));
    return NULL;
}