Ejemplo n.º 1
0
/**
 * Ensures that there is space for at least @a cNewRanges in the table,
 * reallocating the table if necessary.
 *
 * @returns Pointer to the MSR ranges on success, NULL on failure.  On failure
 *          @a *ppaMsrRanges is freed and set to NULL.
 * @param   pVM             The cross context VM structure.  If NULL,
 *                          use the process heap, otherwise the VM's hyper heap.
 * @param   ppaMsrRanges    The variable pointing to the ranges (input/output).
 * @param   cMsrRanges      The current number of ranges.
 * @param   cNewRanges      The number of ranges to be added.
 */
static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
{
    uint32_t cMsrRangesAllocated;
    if (!pVM)
        cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
    else
    {
        /*
         * We're using the hyper heap now, but when the range array was copied over to it from
         * the host-context heap, we only copy the exact size and not the ensured size.
         * See @bugref{7270}.
         */
        cMsrRangesAllocated = cMsrRanges;
    }
    if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
    {
        void    *pvNew;
        uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
        if (pVM)
        {
            Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
            Assert(cMsrRanges   == pVM->cpum.s.GuestInfo.cMsrRanges);

            size_t cb    = cMsrRangesAllocated * sizeof(**ppaMsrRanges);
            size_t cbNew = cNew * sizeof(**ppaMsrRanges);
            int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew);
            if (RT_FAILURE(rc))
            {
                *ppaMsrRanges = NULL;
                pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR;
                pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR;
                LogRel(("CPUM: cpumR3MsrRangesEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc));
                return NULL;
            }
            *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
        }
        else
        {
            pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
            if (!pvNew)
            {
                RTMemFree(*ppaMsrRanges);
                *ppaMsrRanges = NULL;
                return NULL;
            }
        }
        *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
    }

    if (pVM)
    {
        /* Update R0 and RC pointers. */
        Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3);
        pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges);
        pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, *ppaMsrRanges);
    }

    return *ppaMsrRanges;
}
Ejemplo n.º 2
0
static void PNGAPI png_write_data_fn(png_structp png_ptr, png_bytep p, png_size_t cb)
{
    PNGWriteCtx *pCtx = (PNGWriteCtx *)png_get_io_ptr(png_ptr);
    LogFlowFunc(("png_ptr %p, p %p, cb %d, pCtx %p\n", png_ptr, p, cb, pCtx));

    if (pCtx && RT_SUCCESS(pCtx->rc))
    {
        if (pCtx->cbAllocated - pCtx->cbPNG < cb)
        {
            uint32_t cbNew = pCtx->cbPNG + (uint32_t)cb;
            AssertReturnVoidStmt(cbNew > pCtx->cbPNG && cbNew <= _1G, pCtx->rc = VERR_TOO_MUCH_DATA);
            cbNew = RT_ALIGN_32(cbNew, 4096) + 4096;

            void *pNew = RTMemRealloc(pCtx->pu8PNG, cbNew);
            if (!pNew)
            {
                pCtx->rc = VERR_NO_MEMORY;
                return;
            }

            pCtx->pu8PNG = (uint8_t *)pNew;
            pCtx->cbAllocated = cbNew;
        }

        memcpy(pCtx->pu8PNG + pCtx->cbPNG, p, cb);
        pCtx->cbPNG += (uint32_t)cb;
    }
}
Ejemplo n.º 3
0
/**
 * Ensures that there is space for at least @a cNewRanges in the table,
 * reallocating the table if necessary.
 *
 * @returns Pointer to the MSR ranges on success, NULL on failure.  On failure
 *          @a *ppaMsrRanges is freed and set to NULL.
 * @param   ppaMsrRanges    The variable pointing to the ranges (input/output).
 * @param   cMsrRanges      The current number of ranges.
 * @param   cNewRanges      The number of ranges to be added.
 */
static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges)
{
    uint32_t cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16);
    if (cMsrRangesAllocated < cMsrRanges + cNewRanges)
    {
        uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16);
        void *pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges));
        if (!pvNew)
        {
            RTMemFree(*ppaMsrRanges);
            *ppaMsrRanges = NULL;
            return NULL;
        }
        *ppaMsrRanges = (PCPUMMSRRANGE)pvNew;
    }
    return *ppaMsrRanges;
}
Ejemplo n.º 4
0
/**
 * Adds a range of memory to the tiled slabs.
 *
 * @param   uRange      Start of range.
 * @param   cbRange     Size of range.
 */
static void bs3InitMemoryAddRange32(uint32_t uRange, uint32_t cbRange)
{
    uint32_t uRangeEnd = uRange + cbRange;
    if (uRangeEnd < uRange)
        uRangeEnd = UINT32_MAX;

    /* Raise the end-of-ram-below-4GB marker? */
    if (uRangeEnd > g_uBs3EndOfRamBelow4G)
        g_uBs3EndOfRamBelow4G = uRangeEnd;

    /* Applicable to tiled memory? */
    if (   uRange < BS3_SEL_TILED_AREA_SIZE
        && (   uRange >= _1M
            || uRangeEnd >= _1M))
    {
        uint16_t cPages;

        /* Adjust the start of the range such that it's at or above 1MB and page aligned.  */
        if (uRange < _1M)
        {
            cbRange -= _1M - uRange;
            uRange   = _1M;
        }
        else if (uRange & (_4K - 1U))
        {
            cbRange -= uRange & (_4K - 1U);
            uRange   = RT_ALIGN_32(uRange, _4K);
        }

        /* Adjust the end/size of the range such that it's page aligned and not beyond the tiled area. */
        if (uRangeEnd > BS3_SEL_TILED_AREA_SIZE)
        {
            cbRange  -= uRangeEnd - BS3_SEL_TILED_AREA_SIZE;
            uRangeEnd = BS3_SEL_TILED_AREA_SIZE;
        }
        else if (uRangeEnd & (_4K - 1U))
        {
            cbRange   -= uRangeEnd & (_4K - 1U);
            uRangeEnd &= ~(uint32_t)(_4K - 1U);
        }

        /* If there is still something, enable it.
           (We're a bit paranoid here don't trust the BIOS to only report a page once.)  */
        cPages = cbRange >> 12; /*div 4K*/
        if (cPages)
        {
            unsigned i;
            uRange -= _1M;
            i = uRange >> 12; /*div _4K*/
            while (cPages-- > 0)
            {
                uint16_t uLineToLong = ASMBitTestAndClear(g_Bs3Mem4KUpperTiled.Core.bmAllocated, i);
                g_Bs3Mem4KUpperTiled.Core.cFreeChunks += uLineToLong;
                i++;
            }
        }
    }
Ejemplo n.º 5
0
/**
 * Aligns the current block data to a 32bit boundary.
 *
 * @returns VBox status code.
 * @param   pThis           The VUSB sniffer instance.
 */
static int vusbSnifferBlockAlign(PVUSBSNIFFERINT pThis)
{
    int rc = VINF_SUCCESS;

    Assert(pThis->cbBlockCur);

    /* Pad to 32bits. */
    uint8_t abPad[3] = { 0 };
    uint32_t cbPad = RT_ALIGN_32(pThis->cbBlockCur, 4) - pThis->cbBlockCur;

    Assert(cbPad <= 3);
    if (cbPad)
        rc = vusbSnifferBlockAddData(pThis, abPad, cbPad);

    return rc;
}
Ejemplo n.º 6
0
/**
 * Worker for dbgDiggerLinuxFindEndNames that records the findings.
 *
 * @returns VINF_SUCCESS
 * @param   pThis           The linux digger data to update.
 * @param   pAddrMarkers    The address of the marker (kallsyms_markers).
 * @param   cbMarkerEntry   The size of a marker entry (32-bit or 64-bit).
 */
static int dbgDiggerLinuxFoundMarkers(PDBGDIGGERLINUX pThis, PCDBGFADDRESS pAddrMarkers, uint32_t cbMarkerEntry)
{
    pThis->cbKernelNames         = pAddrMarkers->FlatPtr - pThis->AddrKernelNames.FlatPtr - 1;
    pThis->AddrKernelNameMarkers = *pAddrMarkers;
    pThis->cKernelNameMarkers    = RT_ALIGN_32(pThis->cKernelSymbols, 256) / 256;
    pThis->AddrKernelTokenTable  = *pAddrMarkers;
    DBGFR3AddrAdd(&pThis->AddrKernelTokenTable, pThis->cKernelNameMarkers * cbMarkerEntry);

    Log(("dbgDiggerLinuxFoundMarkers: AddrKernelNames=%RGv cbKernelNames=%#x\n"
         "dbgDiggerLinuxFoundMarkers: AddrKernelNameMarkers=%RGv cKernelNameMarkers=%#x\n"
         "dbgDiggerLinuxFoundMarkers: AddrKernelTokenTable=%RGv\n",
         pThis->AddrKernelNames.FlatPtr, pThis->cbKernelNames,
         pThis->AddrKernelNameMarkers.FlatPtr, pThis->cKernelNameMarkers,
         pThis->AddrKernelTokenTable.FlatPtr));
    return VINF_SUCCESS;
}
RTDECL(int) RTCrDigestCreate(PRTCRDIGEST phDigest, PCRTCRDIGESTDESC pDesc, void *pvOpaque)
{
    AssertPtrReturn(phDigest, VERR_INVALID_POINTER);
    AssertPtrReturn(pDesc, VERR_INVALID_POINTER);

    int rc = VINF_SUCCESS;
    uint32_t const offHash = RT_ALIGN_32(pDesc->cbState, 8);
    AssertReturn(pDesc->pfnNew || offHash, VERR_INVALID_PARAMETER);
    AssertReturn(!pDesc->pfnNew || (pDesc->pfnFree && pDesc->pfnInit && pDesc->pfnClone), VERR_INVALID_PARAMETER);
    PRTCRDIGESTINT pThis = (PRTCRDIGESTINT)RTMemAllocZ(RT_UOFFSETOF_DYN(RTCRDIGESTINT, abState[offHash + pDesc->cbHash]));
    if (pThis)
    {
        if (pDesc->pfnNew)
            pThis->pvState = pDesc->pfnNew();
        else
            pThis->pvState = &pThis->abState[0];
        if (pThis->pvState)
        {
            pThis->u32Magic = RTCRDIGESTINT_MAGIC;
            pThis->cRefs    = 1;
            pThis->offHash  = offHash;
            pThis->pDesc    = pDesc;
            pThis->uState   = RTCRDIGEST_STATE_READY;
            if (pDesc->pfnInit)
                rc = pDesc->pfnInit(pThis->pvState, pvOpaque, false /*fReInit*/);
            if (RT_SUCCESS(rc))
            {
                *phDigest = pThis;
                return rtCrDigestSuccessWithDigestWarnings(pDesc);
            }
            if (pDesc->pfnFree)
                pDesc->pfnFree(pThis->pvState);
        }
        else
            rc = VERR_NO_MEMORY;
        pThis->u32Magic = 0;
        RTMemFree(pThis);
    }
    else
        rc = VERR_NO_MEMORY;
    return rc;
}
/**
 * Allocate memory for host buffer and receive it.
 *
 * @param   u32ClientId    Host connection.
 * @param   fFormat        Buffer data format.
 * @param   pData          Where to store received data.
 * @param   cbDataSize     The size of the received data.
 * @param   cbMemSize      The actual size of memory occupied by *pData.
 *
 * @returns IPRT status code.
 */
static int vbclClipboardReadHostData(uint32_t u32ClientId, uint32_t fFormat, void **pData, uint32_t *cbDataSize, uint32_t *cbMemSize)
{
    int rc;

    AssertReturn(pData && cbDataSize && cbMemSize, VERR_INVALID_PARAMETER);

    uint32_t  cbDataSizeInternal = _4K;
    uint32_t  cbMemSizeInternal  = cbDataSizeInternal;
    void     *pDataInternal      = RTMemPageAllocZ(cbDataSizeInternal);

    if (!pDataInternal)
        return VERR_NO_MEMORY;

    rc = VbglR3ClipboardReadData(u32ClientId, fFormat, pDataInternal, cbMemSizeInternal, &cbDataSizeInternal);
    if (rc == VINF_BUFFER_OVERFLOW)
    {
        /* Reallocate bigger buffer and receive all the data */
        RTMemPageFree(pDataInternal, cbMemSizeInternal);
        cbDataSizeInternal = cbMemSizeInternal = RT_ALIGN_32(cbDataSizeInternal, PAGE_SIZE);
        pDataInternal = RTMemPageAllocZ(cbMemSizeInternal);
        if (!pDataInternal)
            return VERR_NO_MEMORY;

        rc = VbglR3ClipboardReadData(u32ClientId, fFormat, pDataInternal, cbMemSizeInternal, &cbDataSizeInternal);
    }

    /* Error occurred of zero-sized buffer */
    if (RT_FAILURE(rc))
    {
        RTMemPageFree(pDataInternal, cbMemSizeInternal);
        return VERR_NO_MEMORY;
    }

    *pData      = pDataInternal;
    *cbDataSize = cbDataSizeInternal;
    *cbMemSize  = cbMemSizeInternal;

    return rc;
}
Ejemplo n.º 9
0
/**
 * Free memory allocated using MMHyperAlloc().
 * The caller validates the parameters of this request.
 *
 * @returns VBox status code.
 * @param   pVM         The VM to operate on.
 * @param   pv          The memory to free.
 * @remark  Try avoid free hyper memory.
 */
static int mmHyperFreeInternal(PVM pVM, void *pv)
{
    Log2(("MMHyperFree: pv=%p\n", pv));
    if (!pv)
        return VINF_SUCCESS;
    AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
                    ("Invalid pointer %p!\n", pv),
                    VERR_INVALID_POINTER);

    /*
     * Get the heap and stats.
     * Validate the chunk at the same time.
     */
    PMMHYPERCHUNK   pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);

    AssertMsgReturn(    (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
                    ||  RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
                    ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
                    ("%p: Not used!\n", pv),
                    VERR_INVALID_POINTER);

    int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
    AssertMsgReturn(    (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
                    && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
                    ("%p: offPrev=%#RX32!\n", pv, offPrev),
                    VERR_INVALID_POINTER);

    /* statistics */
#ifdef VBOX_WITH_STATISTICS
    PMMHYPERSTAT    pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
    AssertMsgReturn(    RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
                    &&  pChunk->offStat,
                    ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
                    VERR_INVALID_POINTER);
#else
    AssertMsgReturn(!pChunk->offStat,
                    ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
                    VERR_INVALID_POINTER);
#endif

    /* The heap structure. */
    PMMHYPERHEAP    pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
    AssertMsgReturn(    !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
                    &&  pChunk->offHeap,
                    ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
                    ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
                    VERR_INVALID_POINTER);
    Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap));

    /* Some more verifications using additional info from pHeap. */
    AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
                    ("%p: offPrev=%#RX32!\n", pv, offPrev),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
                    ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(   (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
                    ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
                    (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
                    VERR_INVALID_POINTER);

#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif

#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
    /* calc block size. */
    const uint32_t cbChunk = pChunk->offNext
        ? pChunk->offNext
        : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
#endif
#ifdef MMHYPER_HEAP_FREE_POISON
    /* poison the block */
    memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
#endif

#ifdef MMHYPER_HEAP_FREE_DELAY
# ifdef MMHYPER_HEAP_FREE_POISON
    /*
     * Check poison.
     */
    unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
    while (i-- > 0)
        if (pHeap->aDelayedFrees[i].offChunk)
        {
            PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
            const size_t cb = pCur->offNext
                ? pCur->offNext - sizeof(*pCur)
                : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
            uint8_t *pab = (uint8_t *)(pCur + 1);
            for (unsigned off = 0; off < cb; off++)
                AssertReleaseMsg(pab[off] == 0xCB,
                                 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
                                  pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
        }
# endif /* MMHYPER_HEAP_FREE_POISON */

    /*
     * Delayed freeing.
     */
    int rc = VINF_SUCCESS;
    if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
    {
        PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
        rc = mmHyperFree(pHeap, pChunkFree);
    }
    pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
    pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
    pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);

#else   /* !MMHYPER_HEAP_FREE_POISON */
    /*
     * Call the worker.
     */
    int rc = mmHyperFree(pHeap, pChunk);
#endif  /* !MMHYPER_HEAP_FREE_POISON */

    /*
     * Update statistics.
     */
#ifdef VBOX_WITH_STATISTICS
    pStat->cFrees++;
    if (RT_SUCCESS(rc))
    {
        pStat->cbFreed        += cbChunk;
        pStat->cbCurAllocated -= cbChunk;
    }
    else
        pStat->cFailures++;
#endif

    return rc;
}
Ejemplo n.º 10
0
/**
 * VMMR3Init worker that initiates the switcher code (aka core code).
 *
 * This is core per VM code which might need fixups and/or for ease of use are
 * put on linear contiguous backing.
 *
 * @returns VBox status code.
 * @param   pVM     Pointer to the VM.
 */
int vmmR3SwitcherInit(PVM pVM)
{
#ifndef VBOX_WITH_RAW_MODE
    return VINF_SUCCESS;
#else
    /*
     * Calc the size.
     */
    unsigned cbCoreCode = 0;
    for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
    {
        pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
        PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
        if (pSwitcher)
        {
            AssertRelease((unsigned)pSwitcher->enmType == iSwitcher);
            cbCoreCode += RT_ALIGN_32(pSwitcher->cbCode + 1, 32);
        }
    }

    /*
     * Allocate contiguous pages for switchers and deal with
     * conflicts in the intermediate mapping of the code.
     */
    pVM->vmm.s.cbCoreCode = RT_ALIGN_32(cbCoreCode, PAGE_SIZE);
    pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
    int rc = VERR_NO_MEMORY;
    if (pVM->vmm.s.pvCoreCodeR3)
    {
        rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
        if (rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT)
        {
            /* try more allocations - Solaris, Linux.  */
            const unsigned cTries = 8234;
            struct VMMInitBadTry
            {
                RTR0PTR  pvR0;
                void    *pvR3;
                RTHCPHYS HCPhys;
                RTUINT   cb;
            } *paBadTries = (struct VMMInitBadTry *)RTMemTmpAlloc(sizeof(*paBadTries) * cTries);
            AssertReturn(paBadTries, VERR_NO_TMP_MEMORY);
            unsigned i = 0;
            do
            {
                paBadTries[i].pvR3 = pVM->vmm.s.pvCoreCodeR3;
                paBadTries[i].pvR0 = pVM->vmm.s.pvCoreCodeR0;
                paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
                i++;
                pVM->vmm.s.pvCoreCodeR0 = NIL_RTR0PTR;
                pVM->vmm.s.HCPhysCoreCode = NIL_RTHCPHYS;
                pVM->vmm.s.pvCoreCodeR3 = SUPR3ContAlloc(pVM->vmm.s.cbCoreCode >> PAGE_SHIFT, &pVM->vmm.s.pvCoreCodeR0, &pVM->vmm.s.HCPhysCoreCode);
                if (!pVM->vmm.s.pvCoreCodeR3)
                    break;
                rc = PGMR3MapIntermediate(pVM, pVM->vmm.s.pvCoreCodeR0, pVM->vmm.s.HCPhysCoreCode, cbCoreCode);
            } while (   rc == VERR_PGM_INTERMEDIATE_PAGING_CONFLICT
                     && i < cTries - 1);

            /* cleanup */
            if (RT_FAILURE(rc))
            {
                paBadTries[i].pvR3   = pVM->vmm.s.pvCoreCodeR3;
                paBadTries[i].pvR0   = pVM->vmm.s.pvCoreCodeR0;
                paBadTries[i].HCPhys = pVM->vmm.s.HCPhysCoreCode;
                paBadTries[i].cb     = pVM->vmm.s.cbCoreCode;
                i++;
                LogRel(("Failed to allocated and map core code: rc=%Rrc\n", rc));
            }
            while (i-- > 0)
            {
                LogRel(("Core code alloc attempt #%d: pvR3=%p pvR0=%p HCPhys=%RHp\n",
                        i, paBadTries[i].pvR3, paBadTries[i].pvR0, paBadTries[i].HCPhys));
                SUPR3ContFree(paBadTries[i].pvR3, paBadTries[i].cb >> PAGE_SHIFT);
            }
            RTMemTmpFree(paBadTries);
        }
    }
Ejemplo n.º 11
0
/**
 * Allocates memory in the Hypervisor (RC VMM) area.
 * The returned memory is of course zeroed.
 *
 * @returns VBox status code.
 * @param   pVM         The VM to operate on.
 * @param   cb          Number of bytes to allocate.
 * @param   uAlignment  Required memory alignment in bytes.
 *                      Values are 0,8,16,32,64 and PAGE_SIZE.
 *                      0 -> default alignment, i.e. 8 bytes.
 * @param   enmTag      The statistics tag.
 * @param   ppv         Where to store the address to the allocated
 *                      memory.
 */
static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
{
    AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));

    /*
     * Validate input and adjust it to reasonable values.
     */
    if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
        uAlignment = MMHYPER_HEAP_ALIGN_MIN;
    uint32_t cbAligned;
    switch (uAlignment)
    {
        case 8:
        case 16:
        case 32:
        case 64:
            cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
            if (!cbAligned || cbAligned < cb)
            {
                Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
                AssertMsgFailed(("Nice try.\n"));
                return VERR_INVALID_PARAMETER;
            }
            break;

        case PAGE_SIZE:
            AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
            cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
            if (!cbAligned)
            {
                Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
                AssertMsgFailed(("Nice try.\n"));
                return VERR_INVALID_PARAMETER;
            }
            break;

        default:
            Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
            AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
            return VERR_INVALID_PARAMETER;
    }


    /*
     * Get heap and statisticsStatistics.
     */
    PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
#ifdef VBOX_WITH_STATISTICS
    PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
    if (!pStat)
    {
        Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
        AssertMsgFailed(("Failed to allocate statistics!\n"));
        return VERR_MM_HYPER_NO_MEMORY;
    }
#endif
    if (uAlignment < PAGE_SIZE)
    {
        /*
         * Allocate a chunk.
         */
        PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
        if (pChunk)
        {
#ifdef VBOX_WITH_STATISTICS
            const uint32_t cbChunk = pChunk->offNext
                ? pChunk->offNext
                : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
            pStat->cbAllocated += (uint32_t)cbChunk;
            pStat->cbCurAllocated += (uint32_t)cbChunk;
            if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
                pStat->cbMaxAllocated = pStat->cbCurAllocated;
            pStat->cAllocations++;
            pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
#else
            pChunk->offStat = 0;
#endif
            void *pv = pChunk + 1;
            *ppv = pv;
            ASMMemZero32(pv, cbAligned);
            Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
            return VINF_SUCCESS;
        }
    }
    else
    {
        /*
         * Allocate page aligned memory.
         */
        void *pv = mmHyperAllocPages(pHeap, cbAligned);
        if (pv)
        {
#ifdef VBOX_WITH_STATISTICS
            pStat->cbAllocated += cbAligned;
            pStat->cbCurAllocated += cbAligned;
            if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
                pStat->cbMaxAllocated = pStat->cbCurAllocated;
            pStat->cAllocations++;
#endif
            *ppv = pv;
            /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPR3PageAlloc zeros it. */
            Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
            return VINF_SUCCESS;
        }
    }

#ifdef VBOX_WITH_STATISTICS
    pStat->cAllocations++;
    pStat->cFailures++;
#endif
    Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
    AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
    return VERR_MM_HYPER_NO_MEMORY;
}
Ejemplo n.º 12
0
RTDECL(RTCPUID) RTMpGetCoreCount(void)
{
    /*
     * Resolve the API dynamically (one try) as it requires XP w/ sp3 or later.
     */
    typedef BOOL (WINAPI *PFNGETLOGICALPROCINFO)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
    static PFNGETLOGICALPROCINFO s_pfnGetLogicalProcInfo = (PFNGETLOGICALPROCINFO)~(uintptr_t)0;
    if (s_pfnGetLogicalProcInfo == (PFNGETLOGICALPROCINFO)~(uintptr_t)0)
        s_pfnGetLogicalProcInfo = (PFNGETLOGICALPROCINFO)RTLdrGetSystemSymbol("kernel32.dll", "GetLogicalProcessorInformation");

    /*
     * Sadly, on XP and Server 2003, even if the API is present, it does not tell us
     * how many physical cores there are (any package will look like a single core).
     * That is worse than not using the API at all, so just skip it unless it's Vista+.
     */
    bool fIsVistaOrLater = false;
    OSVERSIONINFOEX OSInfoEx = { 0 };
    OSInfoEx.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
    if (   GetVersionEx((LPOSVERSIONINFO) &OSInfoEx)
        && (OSInfoEx.dwPlatformId == VER_PLATFORM_WIN32_NT)
        && (OSInfoEx.dwMajorVersion >= 6))
        fIsVistaOrLater = true;

    if (s_pfnGetLogicalProcInfo && fIsVistaOrLater)
    {
        /*
         * Query the information. This unfortunately requires a buffer, so we
         * start with a guess and let windows advice us if it's too small.
         */
        DWORD                                   cbSysProcInfo = _4K;
        PSYSTEM_LOGICAL_PROCESSOR_INFORMATION   paSysInfo = NULL;
        BOOL                                    fRc = FALSE;
        do
        {
            cbSysProcInfo = RT_ALIGN_32(cbSysProcInfo, 256);
            void *pv = RTMemRealloc(paSysInfo, cbSysProcInfo);
            if (!pv)
                break;
            paSysInfo = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)pv;
            fRc = s_pfnGetLogicalProcInfo(paSysInfo, &cbSysProcInfo);
        } while (!fRc && GetLastError() == ERROR_INSUFFICIENT_BUFFER);
        if (fRc)
        {
            /*
             * Parse the result.
             */
            uint32_t cCores = 0;
            uint32_t i      = cbSysProcInfo / sizeof(paSysInfo[0]);
            while (i-- > 0)
                if (paSysInfo[i].Relationship == RelationProcessorCore)
                    cCores++;

            RTMemFree(paSysInfo);
            Assert(cCores > 0);
            return cCores;
        }

        RTMemFree(paSysInfo);
    }

    /* If we don't have the necessary API or if it failed, return the same
       value as the generic implementation. */
    return RTMpGetCount();
}
/**
 * Search for content of specified type in guest clipboard buffer and put
 * it into newly allocated buffer.
 *
 * @param   pPasteboard     Guest PasteBoard reference.
 * @param   fFormat         Data formats we are looking for.
 * @param   ppvData         Where to return pointer to the received data. M
 * @param   pcbData         Where to return the size of the data.
 * @param   pcbAlloc        Where to return the size of the memory block
 *                          *ppvData pointes to. (Usually greater than *cbData
 *                           because the allocation is page aligned.)
 * @returns IPRT status code.
 */
static int vbclClipboardReadGuestData(PasteboardRef pPasteboard, CFStringRef sFormat, void **ppvData, uint32_t *pcbData,
                                      uint32_t *pcbAlloc)
{
    ItemCount cItems, iItem;
    OSStatus  rc;

    void     *pvData  = NULL;
    uint32_t  cbData  = 0;
    uint32_t  cbAlloc = 0;

    AssertPtrReturn(ppvData, VERR_INVALID_POINTER);
    AssertPtrReturn(pcbData, VERR_INVALID_POINTER);
    AssertPtrReturn(pcbAlloc, VERR_INVALID_POINTER);

    rc = PasteboardGetItemCount(pPasteboard, &cItems);
    AssertReturn(rc == noErr, VERR_INVALID_PARAMETER);
    AssertReturn(cItems > 0, VERR_INVALID_PARAMETER);

    /* Walk through all the items in PasteBoard in order to find
       that one that correcponds to requested data format. */
    for (iItem = 1; iItem <= cItems; iItem++)
    {
        PasteboardItemID iItemID;
        CFDataRef        flavorData;

        /* Now, get the item's flavors that corresponds to requested type. */
        rc = PasteboardGetItemIdentifier(pPasteboard, iItem, &iItemID);
        AssertReturn(rc == noErr, VERR_INVALID_PARAMETER);
        rc = PasteboardCopyItemFlavorData(pPasteboard, iItemID, sFormat, &flavorData);
        if (rc == noErr)
        {
            void *flavorDataPtr = (void *)CFDataGetBytePtr(flavorData);
            cbData = CFDataGetLength(flavorData);
            if (flavorDataPtr && cbData > 0)
            {
                cbAlloc = RT_ALIGN_32(cbData, PAGE_SIZE);
                pvData = RTMemPageAllocZ(cbAlloc);
                if (pvData)
                    memcpy(pvData, flavorDataPtr, cbData);
            }

            CFRelease(flavorData);

            /* Found first matching item, no more search. */
            break;
        }

    }

    /* Found match */
    if (pvData)
    {
        *ppvData  = pvData;
        *pcbData  = cbData;
        *pcbAlloc = cbAlloc;

        return VINF_SUCCESS;
    }

    return VERR_INVALID_PARAMETER;
}
Ejemplo n.º 14
0
RTR3DECL(int)  RTFileWrite(RTFILE hFile, const void *pvBuf, size_t cbToWrite, size_t *pcbWritten)
{
    if (cbToWrite <= 0)
        return VINF_SUCCESS;
    ULONG cbToWriteAdj = (ULONG)cbToWrite;
    AssertReturn(cbToWriteAdj == cbToWrite, VERR_NUMBER_TOO_BIG);

    ULONG cbWritten = 0;
    if (WriteFile((HANDLE)RTFileToNative(hFile), pvBuf, cbToWriteAdj, &cbWritten, NULL))
    {
        if (pcbWritten)
            /* Caller can handle partial writes. */
            *pcbWritten = cbWritten;
        else
        {
            /* Caller expects everything to be written. */
            while (cbToWriteAdj > cbWritten)
            {
                ULONG cbWrittenPart = 0;
                if (!WriteFile((HANDLE)RTFileToNative(hFile), (char*)pvBuf + cbWritten,
                               cbToWriteAdj - cbWritten, &cbWrittenPart, NULL))
                {
                    int rc = RTErrConvertFromWin32(GetLastError());
                    if (   rc == VERR_DISK_FULL
                        && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT)
                       )
                        rc = VERR_FILE_TOO_BIG;
                    return rc;
                }
                if (cbWrittenPart == 0)
                    return VERR_WRITE_ERROR;
                cbWritten += cbWrittenPart;
            }
        }
        return VINF_SUCCESS;
    }

    /*
     * If it's a console, we might bump into out of memory conditions in the
     * WriteConsole call.
     */
    DWORD dwErr = GetLastError();
    if (dwErr == ERROR_NOT_ENOUGH_MEMORY)
    {
        ULONG cbChunk = cbToWriteAdj / 2;
        if (cbChunk > _32K)
            cbChunk = _32K;
        else
            cbChunk = RT_ALIGN_32(cbChunk, 256);

        cbWritten = 0;
        while (cbToWriteAdj > cbWritten)
        {
            ULONG cbToWrite     = RT_MIN(cbChunk, cbToWriteAdj - cbWritten);
            ULONG cbWrittenPart = 0;
            if (!WriteFile((HANDLE)RTFileToNative(hFile), (const char *)pvBuf + cbWritten, cbToWrite, &cbWrittenPart, NULL))
            {
                /* If we failed because the buffer is too big, shrink it and
                   try again. */
                dwErr = GetLastError();
                if (   dwErr == ERROR_NOT_ENOUGH_MEMORY
                    && cbChunk > 8)
                {
                    cbChunk /= 2;
                    continue;
                }
                int rc = RTErrConvertFromWin32(dwErr);
                if (   rc == VERR_DISK_FULL
                    && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT))
                    rc = VERR_FILE_TOO_BIG;
                return rc;
            }
            cbWritten += cbWrittenPart;

            /* Return if the caller can handle partial writes, otherwise try
               write out everything. */
            if (pcbWritten)
            {
                *pcbWritten = cbWritten;
                break;
            }
            if (cbWrittenPart == 0)
                return VERR_WRITE_ERROR;
        }
        return VINF_SUCCESS;
    }

    int rc = RTErrConvertFromWin32(dwErr);
    if (   rc == VERR_DISK_FULL
        && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT))
        rc = VERR_FILE_TOO_BIG;
    return rc;
}
Ejemplo n.º 15
0
RTR3DECL(int)  RTFileRead(RTFILE hFile, void *pvBuf, size_t cbToRead, size_t *pcbRead)
{
    if (cbToRead <= 0)
        return VINF_SUCCESS;
    ULONG cbToReadAdj = (ULONG)cbToRead;
    AssertReturn(cbToReadAdj == cbToRead, VERR_NUMBER_TOO_BIG);

    ULONG cbRead = 0;
    if (ReadFile((HANDLE)RTFileToNative(hFile), pvBuf, cbToReadAdj, &cbRead, NULL))
    {
        if (pcbRead)
            /* Caller can handle partial reads. */
            *pcbRead = cbRead;
        else
        {
            /* Caller expects everything to be read. */
            while (cbToReadAdj > cbRead)
            {
                ULONG cbReadPart = 0;
                if (!ReadFile((HANDLE)RTFileToNative(hFile), (char*)pvBuf + cbRead, cbToReadAdj - cbRead, &cbReadPart, NULL))
                    return RTErrConvertFromWin32(GetLastError());
                if (cbReadPart == 0)
                    return VERR_EOF;
                cbRead += cbReadPart;
            }
        }
        return VINF_SUCCESS;
    }

    /*
     * If it's a console, we might bump into out of memory conditions in the
     * ReadConsole call.
     */
    DWORD dwErr = GetLastError();
    if (dwErr == ERROR_NOT_ENOUGH_MEMORY)
    {
        ULONG cbChunk = cbToReadAdj / 2;
        if (cbChunk > 16*_1K)
            cbChunk = 16*_1K;
        else
            cbChunk = RT_ALIGN_32(cbChunk, 256);

        cbRead = 0;
        while (cbToReadAdj > cbRead)
        {
            ULONG cbToRead   = RT_MIN(cbChunk, cbToReadAdj - cbRead);
            ULONG cbReadPart = 0;
            if (!ReadFile((HANDLE)RTFileToNative(hFile), (char *)pvBuf + cbRead, cbToRead, &cbReadPart, NULL))
            {
                /* If we failed because the buffer is too big, shrink it and
                   try again. */
                dwErr = GetLastError();
                if (   dwErr == ERROR_NOT_ENOUGH_MEMORY
                    && cbChunk > 8)
                {
                    cbChunk /= 2;
                    continue;
                }
                return RTErrConvertFromWin32(dwErr);
            }
            cbRead += cbReadPart;

            /* Return if the caller can handle partial reads, otherwise try
               fill the buffer all the way up. */
            if (pcbRead)
            {
                *pcbRead = cbRead;
                break;
            }
            if (cbReadPart == 0)
                return VERR_EOF;
        }
        return VINF_SUCCESS;
    }

    return RTErrConvertFromWin32(dwErr);
}
Ejemplo n.º 16
0
static void rtMemReplaceMallocAndFriends(void)
{
    struct
    {
        const char *pszName;
        PFNRT       pfnReplacement;
        PFNRT       pfnOrg;
        PFNRT      *ppfnJumpBack;
    } aApis[] =
    {
        { "free",    (PFNRT)rtMemReplacementFree,    (PFNRT)free,    (PFNRT *)&g_pfnOrgFree },
        { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc },
        { "calloc",  (PFNRT)rtMemReplacementCalloc,  (PFNRT)calloc,  (PFNRT *)&g_pfnOrgCalloc },
        { "malloc",  (PFNRT)rtMemReplacementMalloc,  (PFNRT)malloc,  (PFNRT *)&g_pfnOrgMalloc },
#ifdef RT_OS_DARWIN
        { "malloc_size", (PFNRT)rtMemReplacementMallocSize,  (PFNRT)malloc_size,  (PFNRT *)&g_pfnOrgMallocSize },
#endif
    };

    /*
     * Initialize the jump backs to avoid recursivly entering this function.
     */
    for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
        *aApis[i].ppfnJumpBack = aApis[i].pfnOrg;

    /*
     * Give the user an option to skip replacing malloc.
     */
    if (getenv("IPRT_DONT_REPLACE_MALLOC"))
        return;

    /*
     * Allocate a page for jump back code (we leak it).
     */
    uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(PAGE_SIZE); AssertFatal(pbExecPage);
    int rc = RTMemProtect(pbExecPage, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);

    /*
     * Do the ground work.
     */
    uint8_t *pb = pbExecPage;
    for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
    {
        /* Resolve it. */
        PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName);
        if (pfnOrg)
            aApis[i].pfnOrg = pfnOrg;
        else
            pfnOrg = aApis[i].pfnOrg;

        /* Figure what we can replace and how much to duplicate in the jump back code. */
# ifdef RT_ARCH_AMD64
        uint32_t         cbNeeded   = 12;
        DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT;
# elif defined(RT_ARCH_X86)
        uint32_t   const cbNeeded   = 5;
        DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT;
# else
#  error "Port me"
# endif
        uint32_t offJmpBack = 0;
        uint32_t cbCopy = 0;
        while (offJmpBack < cbNeeded)
        {
            DISCPUSTATE Dis;
            uint32_t cbInstr = 1;
            rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
            AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW)));
# ifdef RT_ARCH_AMD64
#  ifdef RT_OS_DARWIN
            /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */
            DISQPVPARAMVAL Parm;
            if (   Dis.ModRM.Bits.Mod == 0
                && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */
                && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8))
                && Dis.Param2.uValue == 1
                && Dis.pCurInstr->uOpcode == OP_CMP)
            {
                cbCopy = offJmpBack;

                offJmpBack += cbInstr;
                rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc);
                if (   Dis.pCurInstr->uOpcode == OP_JNBE
                    && Dis.Param1.uDisp.i8 == 5)
                {
                    offJmpBack += cbInstr + 5;
                    AssertFatal(offJmpBack >= cbNeeded);
                    break;
                }
            }
#  endif
            AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */));
# endif
            offJmpBack += cbInstr;
        }
        if (!cbCopy)
            cbCopy = offJmpBack;

        /* Assemble the jump back. */
        memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy);
        uint32_t off = cbCopy;
# ifdef RT_ARCH_AMD64
        pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */
        pb[off++] = 0x25;
        *(uint32_t *)&pb[off] = 0;
        off += 4;
        *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack;
        off += 8;
        off = RT_ALIGN_32(off, 16);
# elif defined(RT_ARCH_X86)
        pb[off++] = 0xe9; /* jmp rel32 */
        *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4];
        off += 4;
        off = RT_ALIGN_32(off, 8);
# else
#  error "Port me"
# endif
        *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb;
        pb += off;
    }

    /*
     * Modify the APIs.
     */
    for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++)
    {
        pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg;
        rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc);

# ifdef RT_ARCH_AMD64
        /* Assemble the LdrLoadDll patch. */
        *pb++ = 0x48; /* mov rax, qword */
        *pb++ = 0xb8;
        *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement;
        pb += 8;
        *pb++ = 0xff; /* jmp rax */
        *pb++ = 0xe0;
# elif defined(RT_ARCH_X86)
        *pb++ = 0xe9; /* jmp rel32 */
        *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4];
# else
#  error "Port me"
# endif
    }
}
Ejemplo n.º 17
0
/**
 * Maps a range of physical pages at a given virtual address
 * in the guest context.
 *
 * The GC virtual address range must be within an existing mapping.
 *
 * @returns VBox status code.
 * @param   pVM         The virtual machine.
 * @param   GCPtr       Where to map the page(s). Must be page aligned.
 * @param   HCPhys      Start of the range of physical pages. Must be page aligned.
 * @param   cbPages     Number of bytes to map. Must be page aligned.
 * @param   fFlags      Page flags (X86_PTE_*).
 */
VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
{
    AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));

    /*
     * Validate input.
     */
    AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
    AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n",  cbPages));
    AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));

    /* hypervisor defaults */
    if (!fFlags)
        fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;

    /*
     * Find the mapping.
     */
    PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
    while (pCur)
    {
        if (GCPtr - pCur->GCPtr < pCur->cb)
        {
            if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
            {
                AssertMsgFailed(("Invalid range!!\n"));
                return VERR_INVALID_PARAMETER;
            }

            /*
             * Setup PTE.
             */
            X86PTEPAE Pte;
            Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);

            /*
             * Update the page tables.
             */
            for (;;)
            {
                RTGCUINTPTR     off = GCPtr - pCur->GCPtr;
                const unsigned  iPT = off >> X86_PD_SHIFT;
                const unsigned  iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;

                /* 32-bit */
                pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u;      /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */

                /* pae */
                PGMSHWPTEPAE_SET(pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512], Pte.u);

                /* next */
                cbPages -= PAGE_SIZE;
                if (!cbPages)
                    break;
                GCPtr += PAGE_SIZE;
                Pte.u += PAGE_SIZE;
            }

            return VINF_SUCCESS;
        }

        /* next */
        pCur = pCur->CTX_SUFF(pNext);
    }

    AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n",  GCPtr));
    return VERR_INVALID_PARAMETER;
}
Ejemplo n.º 18
0
STDMETHODIMP UIFrameBufferQuartz2D::SetVisibleRegion(BYTE *aRectangles, ULONG aCount)
{
    PRTRECT rects = (PRTRECT)aRectangles;

    if (!rects)
        return E_POINTER;

    /** @todo r=bird: Is this thread safe? If I remember the code flow correctly, the
     * GUI thread could be happily jogging along paintEvent now on another cpu core.
     * This function is called on the EMT (emulation thread). Which means, blocking
     * execution waiting for a lock is out of the question. A quick solution using
     * ASMAtomic(Cmp)XchgPtr and a struct { cAllocated; cRects; aRects[1]; }
     * *mRegion, *mUnusedRegion; should suffice (and permit you to reuse allocations). */
    RegionRects *rgnRcts = ASMAtomicXchgPtrT(&mRegionUnused, NULL, RegionRects *);
    if (rgnRcts && rgnRcts->allocated < aCount)
    {
        RTMemFree (rgnRcts);
        rgnRcts = NULL;
    }
    if (!rgnRcts)
    {
        ULONG allocated = RT_ALIGN_32(aCount + 1, 32);
        allocated = RT_MAX (128, allocated);
        rgnRcts = (RegionRects *)RTMemAlloc(RT_OFFSETOF(RegionRects, rcts[allocated]));
        if (!rgnRcts)
            return E_OUTOFMEMORY;
        rgnRcts->allocated = allocated;
    }
    rgnRcts->used = 0;

    QRegion reg;
//    printf ("Region rects follow...\n");
    QRect vmScreenRect (0, 0, width(), height());
    for (ULONG ind = 0; ind < aCount; ++ ind)
    {
        QRect rect;
        rect.setLeft(rects->xLeft);
        rect.setTop(rects->yTop);
        /* QRect are inclusive */
        rect.setRight(rects->xRight - 1);
        rect.setBottom(rects->yBottom - 1);

        /* The rect should intersect with the vm screen. */
        rect = vmScreenRect.intersect(rect);
        ++ rects;
        /* Make sure only valid rects are distributed */
        /* todo: Test if the other framebuffer implementation have the same
         * problem with invalid rects (In Linux/Windows) */
        if (rect.isValid() &&
           rect.width() > 0 && rect.height() > 0)
            reg += rect;
        else
            continue;

        CGRect *cgRct = &rgnRcts->rcts[rgnRcts->used];
        cgRct->origin.x = rect.x();
        cgRct->origin.y = height() - rect.y() - rect.height();
        cgRct->size.width = rect.width();
        cgRct->size.height = rect.height();
//        printf ("Region rect[%d - %d]: %d %d %d %d\n", rgnRcts->used, aCount, rect.x(), rect.y(), rect.height(), rect.width());
        rgnRcts->used++;
    }
//    printf ("..................................\n");

    RegionRects *pOld = ASMAtomicXchgPtrT(&mRegion, rgnRcts, RegionRects *);
    if (    pOld
        &&  !ASMAtomicCmpXchgPtr(&mRegionUnused, pOld, NULL))
        RTMemFree(pOld);

    QApplication::postEvent(m_pMachineView, new UISetRegionEvent (reg));

    return S_OK;
}
STDMETHODIMP UIFrameBufferQuartz2D::SetVisibleRegion(BYTE *pRectangles, ULONG aCount)
{
    LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Rectangle count=%lu\n",
             (unsigned long)aCount));

    /* Make sure rectangles were passed: */
    if (!pRectangles)
    {
        LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Invalid pRectangles pointer!\n"));

        return E_POINTER;
    }

    /* Lock access to frame-buffer: */
    lock();

    /* Make sure frame-buffer is used: */
    if (m_fIsMarkedAsUnused)
    {
        LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Ignored!\n"));

        /* Unlock access to frame-buffer: */
        unlock();

        /* Ignore SetVisibleRegion: */
        return E_FAIL;
    }

    /** @todo r=bird: Is this thread safe? If I remember the code flow correctly, the
     * GUI thread could be happily jogging along paintEvent now on another cpu core.
     * This function is called on the EMT (emulation thread). Which means, blocking
     * execution waiting for a lock is out of the question. A quick solution using
     * ASMAtomic(Cmp)XchgPtr and a struct { cAllocated; cRects; aRects[1]; }
     * *mRegion, *mUnusedRegion; should suffice (and permit you to reuse allocations). */
    RegionRects *rgnRcts = ASMAtomicXchgPtrT(&mRegionUnused, NULL, RegionRects *);
    if (rgnRcts && rgnRcts->allocated < aCount)
    {
        RTMemFree (rgnRcts);
        rgnRcts = NULL;
    }
    if (!rgnRcts)
    {
        ULONG allocated = RT_ALIGN_32(aCount + 1, 32);
        allocated = RT_MAX (128, allocated);
        rgnRcts = (RegionRects *)RTMemAlloc(RT_OFFSETOF(RegionRects, rcts[allocated]));
        if (!rgnRcts)
        {
            /* Unlock access to frame-buffer: */
            unlock();

            return E_OUTOFMEMORY;
        }
        rgnRcts->allocated = allocated;
    }
    rgnRcts->used = 0;

    /* Compose region: */
    QRegion reg;
    PRTRECT rects = (PRTRECT)pRectangles;
    QRect vmScreenRect(0, 0, width(), height());
    for (ULONG ind = 0; ind < aCount; ++ ind)
    {
        /* Get current rectangle: */
        QRect rect;
        rect.setLeft(rects->xLeft);
        rect.setTop(rects->yTop);
        /* Which is inclusive: */
        rect.setRight(rects->xRight - 1);
        rect.setBottom(rects->yBottom - 1);

        /* The rect should intersect with the vm screen. */
        rect = vmScreenRect.intersect(rect);
        ++rects;
        /* Make sure only valid rects are distributed: */
        if (rect.isValid() &&
           rect.width() > 0 && rect.height() > 0)
            reg += rect;
        else
            continue;

        /* That is some *magic* added by Knut in r27807: */
        CGRect *cgRct = &rgnRcts->rcts[rgnRcts->used];
        cgRct->origin.x = rect.x();
        cgRct->origin.y = height() - rect.y() - rect.height();
        cgRct->size.width = rect.width();
        cgRct->size.height = rect.height();
        rgnRcts->used++;
    }

    RegionRects *pOld = ASMAtomicXchgPtrT(&mRegion, rgnRcts, RegionRects *);
    if (    pOld
        &&  !ASMAtomicCmpXchgPtr(&mRegionUnused, pOld, NULL))
        RTMemFree(pOld);

    /* Send async signal to update asynchronous visible-region: */
    LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Sending to async-handler...\n"));
    emit sigSetVisibleRegion(reg);

    /* Unlock access to frame-buffer: */
    unlock();

    /* Confirm SetVisibleRegion: */
    return S_OK;
}
Ejemplo n.º 20
0
/**
 * @callback_method_impl{FNRTONCE,
 *      Resolves dynamic imports and initializes globals.}
 */
static DECLCALLBACK(int32_t) rtMpWinInitOnce(void *pvUser)
{
    RT_NOREF(pvUser);

    Assert(g_WinOsInfoEx.dwOSVersionInfoSize != 0);
    Assert(g_hModKernel32 != NULL);

    /*
     * Resolve dynamic APIs.
     */
#define RESOLVE_API(a_szMod, a_FnName) \
        do { \
            RT_CONCAT(g_pfn,a_FnName) = (decltype(a_FnName) *)GetProcAddress(g_hModKernel32, #a_FnName); \
        } while (0)
    RESOLVE_API("kernel32.dll", GetMaximumProcessorCount);
    //RESOLVE_API("kernel32.dll", GetActiveProcessorCount); - slow :/
    RESOLVE_API("kernel32.dll", GetCurrentProcessorNumber);
    RESOLVE_API("kernel32.dll", GetCurrentProcessorNumberEx);
    RESOLVE_API("kernel32.dll", GetLogicalProcessorInformation);
    RESOLVE_API("kernel32.dll", GetLogicalProcessorInformationEx);

    /*
     * Reset globals.
     */
    for (unsigned i = 0; i < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx); i++)
        g_aidRtMpWinByCpuSetIdx[i] = NIL_RTCPUID;
    for (unsigned idxGroup = 0; idxGroup < RT_ELEMENTS(g_aRtMpWinCpuGroups); idxGroup++)
    {
        g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = 0;
        g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = 0;
        for (unsigned idxMember = 0; idxMember < RT_ELEMENTS(g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers); idxMember++)
            g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1;
    }

    /*
     * Query group information, partitioning CPU IDs and CPU set indexes.
     *
     * We ASSUME the GroupInfo index is the same as the group number.
     *
     * We CANNOT ASSUME that the kernel CPU indexes are assigned in any given
     * way, though they usually are in group order by active processor.  So,
     * we do that to avoid trouble.  We must use information provided thru GIP
     * if we want the kernel CPU set indexes.  Even there, the inactive CPUs
     * wont have sensible indexes.  Sigh.
     *
     * We try to assign IDs to inactive CPUs in the same manner as mp-r0drv-nt.cpp
     *
     * Note! We will die (AssertFatal) if there are too many processors!
     */
    union
    {
        SYSTEM_INFO                                 SysInfo;
        SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX     Info;
        uint8_t                                     abPaddingG[  sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
                                                               + sizeof(PROCESSOR_GROUP_INFO) * RTCPUSET_MAX_CPUS];
        uint8_t                                     abPaddingC[  sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
                                                               +   (sizeof(PROCESSOR_RELATIONSHIP) + sizeof(GROUP_AFFINITY))
                                                                 * RTCPUSET_MAX_CPUS];
    } uBuf;
    if (g_pfnGetLogicalProcessorInformationEx)
    {
        /* Query the information. */
        DWORD cbData = sizeof(uBuf);
        AssertFatalMsg(g_pfnGetLogicalProcessorInformationEx(RelationGroup, &uBuf.Info, &cbData) != FALSE,
                       ("last error = %u, cbData = %u (in %u)\n", GetLastError(), cbData, sizeof(uBuf)));
        AssertFatalMsg(uBuf.Info.Relationship == RelationGroup,
                       ("Relationship = %u, expected %u!\n", uBuf.Info.Relationship, RelationGroup));
        AssertFatalMsg(uBuf.Info.Group.MaximumGroupCount <= RT_ELEMENTS(g_aRtMpWinCpuGroups),
                       ("MaximumGroupCount is %u, we only support up to %u!\n",
                        uBuf.Info.Group.MaximumGroupCount, RT_ELEMENTS(g_aRtMpWinCpuGroups)));

        AssertMsg(uBuf.Info.Group.MaximumGroupCount == uBuf.Info.Group.ActiveGroupCount, /* 2nd assumption mentioned above. */
                  ("%u vs %u\n", uBuf.Info.Group.MaximumGroupCount, uBuf.Info.Group.ActiveGroupCount));
        AssertFatal(uBuf.Info.Group.MaximumGroupCount >= uBuf.Info.Group.ActiveGroupCount);

        g_cRtMpWinMaxCpuGroups = uBuf.Info.Group.MaximumGroupCount;

        /* Count max cpus (see mp-r0drv0-nt.cpp) why we don't use GetMaximumProcessorCount(ALL). */
        uint32_t idxGroup;
        g_cRtMpWinMaxCpus = 0;
        for (idxGroup = 0; idxGroup < uBuf.Info.Group.ActiveGroupCount; idxGroup++)
            g_cRtMpWinMaxCpus += uBuf.Info.Group.GroupInfo[idxGroup].MaximumProcessorCount;

        /* Process the active groups. */
        uint32_t cActive   = 0;
        uint32_t cInactive = 0;
        uint32_t idxCpu    = 0;
        uint32_t idxCpuSetNextInactive = g_cRtMpWinMaxCpus - 1;
        for (idxGroup = 0; idxGroup < uBuf.Info.Group.ActiveGroupCount; idxGroup++)
        {
            PROCESSOR_GROUP_INFO const *pGroupInfo = &uBuf.Info.Group.GroupInfo[idxGroup];
            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = pGroupInfo->MaximumProcessorCount;
            g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = pGroupInfo->ActiveProcessorCount;
            for (uint32_t idxMember = 0; idxMember < pGroupInfo->MaximumProcessorCount; idxMember++)
            {
                if (pGroupInfo->ActiveProcessorMask & RT_BIT_64(idxMember))
                {
                    g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpu;
                    g_aidRtMpWinByCpuSetIdx[idxCpu] = idxCpu;
                    idxCpu++;
                    cActive++;
                }
                else
                {
                    if (idxCpuSetNextInactive >= idxCpu)
                    {
                        g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
                        g_aidRtMpWinByCpuSetIdx[idxCpuSetNextInactive] = idxCpuSetNextInactive;
                        idxCpuSetNextInactive--;
                    }
                    cInactive++;
                }
            }
        }
        g_cRtMpWinActiveCpus = cActive;
        Assert(cActive + cInactive <= g_cRtMpWinMaxCpus);
        Assert(idxCpu <= idxCpuSetNextInactive + 1);
        Assert(idxCpu <= g_cRtMpWinMaxCpus);

        /* Just in case the 2nd assumption doesn't hold true and there are inactive groups. */
        for (; idxGroup < uBuf.Info.Group.MaximumGroupCount; idxGroup++)
        {
            DWORD cMaxMembers = g_pfnGetMaximumProcessorCount(idxGroup);
            g_aRtMpWinCpuGroups[idxGroup].cMaxCpus    = cMaxMembers;
            g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = 0;
            for (uint32_t idxMember = 0; idxMember < cMaxMembers; idxMember++)
            {
                if (idxCpuSetNextInactive >= idxCpu)
                {
                    g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxCpuSetNextInactive;
                    g_aidRtMpWinByCpuSetIdx[idxCpuSetNextInactive] = idxCpuSetNextInactive;
                    idxCpuSetNextInactive--;
                }
                cInactive++;
            }
        }
        Assert(cActive + cInactive <= g_cRtMpWinMaxCpus);
        Assert(idxCpu <= idxCpuSetNextInactive + 1);
    }
    else
    {
        /* Legacy: */
        GetSystemInfo(&uBuf.SysInfo);
        g_cRtMpWinMaxCpuGroups              = 1;
        g_cRtMpWinMaxCpus                   = uBuf.SysInfo.dwNumberOfProcessors;
        g_aRtMpWinCpuGroups[0].cMaxCpus     = uBuf.SysInfo.dwNumberOfProcessors;
        g_aRtMpWinCpuGroups[0].cActiveCpus  = uBuf.SysInfo.dwNumberOfProcessors;

        for (uint32_t idxMember = 0; idxMember < uBuf.SysInfo.dwNumberOfProcessors; idxMember++)
        {
            g_aRtMpWinCpuGroups[0].aidxCpuSetMembers[idxMember] = idxMember;
            g_aidRtMpWinByCpuSetIdx[idxMember] = idxMember;
        }
    }

    AssertFatalMsg(g_cRtMpWinMaxCpus <= RTCPUSET_MAX_CPUS,
                   ("g_cRtMpWinMaxCpus=%u (%#x); RTCPUSET_MAX_CPUS=%u\n", g_cRtMpWinMaxCpus, g_cRtMpWinMaxCpus, RTCPUSET_MAX_CPUS));

    g_cbRtMpWinGrpRelBuf = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)
                         + (g_cRtMpWinMaxCpuGroups + 2) * sizeof(PROCESSOR_GROUP_INFO);

    /*
     * Get information about cores.
     *
     * Note! This will only give us info about active processors according to
     *       MSDN, we'll just have to hope that CPUs aren't hotplugged after we
     *       initialize here (or that the API consumers doesn't care too much).
     */
    /** @todo A hot CPU plug event would be nice. */
    g_cRtMpWinMaxCpuCores = g_cRtMpWinMaxCpus;
    if (g_pfnGetLogicalProcessorInformationEx)
    {
        /* Query the information. */
        DWORD cbData = sizeof(uBuf);
        AssertFatalMsg(g_pfnGetLogicalProcessorInformationEx(RelationProcessorCore, &uBuf.Info, &cbData) != FALSE,
                       ("last error = %u, cbData = %u (in %u)\n", GetLastError(), cbData, sizeof(uBuf)));
        g_cRtMpWinMaxCpuCores = 0;
        for (uint32_t off = 0; off < cbData; )
        {
            SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pCur = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)&uBuf.abPaddingG[off];
            AssertFatalMsg(pCur->Relationship == RelationProcessorCore,
                           ("off = %#x, Relationship = %u, expected %u!\n", off, pCur->Relationship, RelationProcessorCore));
            g_cRtMpWinMaxCpuCores++;
            off += pCur->Size;
        }

#if ARCH_BITS == 32
        if (g_cRtMpWinMaxCpuCores > g_cRtMpWinMaxCpus)
        {
            /** @todo WOW64 trouble where the emulation environment has folded the high
             *        processor masks (63..32) into the low (31..0), hiding some
             *        processors from us.  Currently we don't deal with that. */
            g_cRtMpWinMaxCpuCores = g_cRtMpWinMaxCpus;
        }
        else
            AssertStmt(g_cRtMpWinMaxCpuCores > 0, g_cRtMpWinMaxCpuCores = g_cRtMpWinMaxCpus);
#else
        AssertStmt(g_cRtMpWinMaxCpuCores > 0 && g_cRtMpWinMaxCpuCores <= g_cRtMpWinMaxCpus,
                   g_cRtMpWinMaxCpuCores = g_cRtMpWinMaxCpus);
#endif
    }
    else
    {
        /*
         * Sadly, on XP and Server 2003, even if the API is present, it does not tell us
         * how many physical cores there are (any package will look like a single core).
         * That is worse than not using the API at all, so just skip it unless it's Vista+.
         */
        if (   g_pfnGetLogicalProcessorInformation
            && g_WinOsInfoEx.dwPlatformId == VER_PLATFORM_WIN32_NT
            && g_WinOsInfoEx.dwMajorVersion >= 6)
        {
            /* Query the info. */
            DWORD                                   cbSysProcInfo = _4K;
            PSYSTEM_LOGICAL_PROCESSOR_INFORMATION   paSysInfo = NULL;
            BOOL                                    fRc = FALSE;
            do
            {
                cbSysProcInfo = RT_ALIGN_32(cbSysProcInfo, 256);
                void *pv = RTMemRealloc(paSysInfo, cbSysProcInfo);
                if (!pv)
                    break;
                paSysInfo = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)pv;
                fRc = g_pfnGetLogicalProcessorInformation(paSysInfo, &cbSysProcInfo);
            } while (!fRc && GetLastError() == ERROR_INSUFFICIENT_BUFFER);
            if (fRc)
            {
                /* Count the cores in the result. */
                g_cRtMpWinMaxCpuCores = 0;
                uint32_t i = cbSysProcInfo / sizeof(paSysInfo[0]);
                while (i-- > 0)
                    if (paSysInfo[i].Relationship == RelationProcessorCore)
                        g_cRtMpWinMaxCpuCores++;

                AssertStmt(g_cRtMpWinMaxCpuCores > 0 && g_cRtMpWinMaxCpuCores <= g_cRtMpWinMaxCpus,
                           g_cRtMpWinMaxCpuCores = g_cRtMpWinMaxCpus);
            }
            RTMemFree(paSysInfo);
        }
    }

    return VINF_SUCCESS;
}