コード例 #1
0
ファイル: heapoffset.cpp プロジェクト: jeppeter/vbox
RTDECL(size_t) RTHeapOffsetSize(RTHEAPOFFSET hHeap, void *pv)
{
    PRTHEAPOFFSETINTERNAL pHeapInt;
    PRTHEAPOFFSETBLOCK pBlock;
    size_t cbBlock;

    /*
     * Validate input.
     */
    if (!pv)
        return 0;
    AssertPtrReturn(pv, 0);
    AssertReturn(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv, 0);

    /*
     * Get the block and heap. If in strict mode, validate these.
     */
    pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
    pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
    ASSERT_BLOCK_USED(pHeapInt, pBlock);
    ASSERT_ANCHOR(pHeapInt);
    Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);

    /*
     * Calculate the block size.
     */
    cbBlock = (pBlock->offNext ? pBlock->offNext : pHeapInt->cbHeap)
            - RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
    return cbBlock;
}
コード例 #2
0
RTDECL(size_t) RTHeapSimpleSize(RTHEAPSIMPLE hHeap, void *pv)
{
    PRTHEAPSIMPLEINTERNAL pHeapInt;
    PRTHEAPSIMPLEBLOCK pBlock;
    size_t cbBlock;

    /*
     * Validate input.
     */
    if (!pv)
        return 0;
    AssertPtrReturn(pv, 0);
    AssertReturn(RT_ALIGN_P(pv, RTHEAPSIMPLE_ALIGNMENT) == pv, 0);

    /*
     * Get the block and heap. If in strict mode, validate these.
     */
    pBlock = (PRTHEAPSIMPLEBLOCK)pv - 1;
    pHeapInt = pBlock->pHeap;
    ASSERT_BLOCK_USED(pHeapInt, pBlock);
    ASSERT_ANCHOR(pHeapInt);
    Assert(pHeapInt == (PRTHEAPSIMPLEINTERNAL)hHeap || !hHeap);

    /*
     * Calculate the block size.
     */
    cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
            - (uintptr_t)pBlock- sizeof(RTHEAPSIMPLEBLOCK);
    return cbBlock;
}
コード例 #3
0
ファイル: memcache.cpp プロジェクト: miguelinux/vbox
/**
 * Grows the cache.
 *
 * @returns IPRT status code.
 * @param   pThis               The memory cache instance.
 */
static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
{
    /*
     * Enter the critical section here to avoid allocation races leading to
     * wasted memory (++) and make it easier to link in the new page.
     */
    RTCritSectEnter(&pThis->CritSect);
    int rc = VINF_SUCCESS;
    if (pThis->cFree < 0)
    {
        /*
         * Allocate and initialize the new page.
         *
         * We put the constructor bitmap at the lower end right after cFree.
         * We then push the object array to the end of the page and place the
         * allocation bitmap below it.  The hope is to increase the chance that
         * the allocation bitmap is in a different cache line than cFree since
         * this increases performance markably when lots of threads are beating
         * on the cache.
         */
        PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
        if (pPage)
        {
            uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);

            ASMMemZeroPage(pPage);
            pPage->pCache       = pThis;
            pPage->pNext        = NULL;
            pPage->cFree        = cObjects;
            pPage->cObjects     = cObjects;
            uint8_t *pb = (uint8_t *)(pPage + 1);
            pb = RT_ALIGN_PT(pb, 8, uint8_t *);
            pPage->pbmCtor      = pb;
            pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
            pPage->pbObjects    = pb;   Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
            pb -= pThis->cBits / 8;
            pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
            pPage->pbmAlloc     = pb;
            Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);

            /* Mark the bitmap padding and any unused objects as allocated. */
            for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
                ASMBitSet(pPage->pbmAlloc, iBit);

            /* Make it the hint. */
            ASMAtomicWritePtr(&pThis->pPageHint, pPage);

            /* Link the page in at the end of the list. */
            ASMAtomicWritePtr(pThis->ppPageNext, pPage);
            pThis->ppPageNext = &pPage->pNext;

            /* Add it to the page counts. */
            ASMAtomicAddS32(&pThis->cFree, cObjects);
            ASMAtomicAddU32(&pThis->cTotal, cObjects);
        }
        else
コード例 #4
0
/**
 * Basic API checks.
 * We'll return if any of these fails.
 */
static void tst1(void)
{
    RTTestISub("Basics");

    /* Create one without constructor or destructor. */
    uint32_t const cObjects = PAGE_SIZE * 2 / 256;
    RTMEMCACHE hMemCache;
    RTTESTI_CHECK_RC_RETV(RTMemCacheCreate(&hMemCache, 256, cObjects, 32, NULL, NULL, NULL, 0 /*fFlags*/), VINF_SUCCESS);
    RTTESTI_CHECK_RETV(hMemCache != NIL_RTMEMCACHE);

    /* Allocate a bit and free it again. */
    void *pv = NULL;
    RTTESTI_CHECK_RC_RETV(RTMemCacheAllocEx(hMemCache, &pv), VINF_SUCCESS);
    RTTESTI_CHECK_RETV(pv != NULL);
    RTTESTI_CHECK_RETV(RT_ALIGN_P(pv, 32) == pv);
    RTMemCacheFree(hMemCache, pv);

    RTTESTI_CHECK((pv = RTMemCacheAlloc(hMemCache)) != NULL);
    RTMemCacheFree(hMemCache, pv);

    /* Allocate everything and free it again, checking size constraints. */
    for (uint32_t iLoop = 0; iLoop < 20; iLoop++)
    {
        /* Allocate everything. */
        void *apv[cObjects];
        for (uint32_t i = 0; i < cObjects; i++)
        {
            apv[i] = NULL;
            RTTESTI_CHECK_RC(RTMemCacheAllocEx(hMemCache, &apv[i]), VINF_SUCCESS);
        }

        /* Check that we've got it all. */
        int rc;
        RTTESTI_CHECK_RC(rc = RTMemCacheAllocEx(hMemCache, &pv), VERR_MEM_CACHE_MAX_SIZE);
        if (RT_SUCCESS(rc))
            RTMemCacheFree(hMemCache, pv);

        RTTESTI_CHECK((pv = RTMemCacheAlloc(hMemCache)) == NULL);
        RTMemCacheFree(hMemCache, pv);

        /* Free all the allocations. */
        for (uint32_t i = 0; i < cObjects; i++)
        {
            RTMemCacheFree(hMemCache, apv[i]);

            RTTESTI_CHECK((pv = RTMemCacheAlloc(hMemCache)) != NULL);
            RTMemCacheFree(hMemCache, pv);
        }
    }

    /* Destroy it. */
    RTTESTI_CHECK_RC(RTMemCacheDestroy(hMemCache), VINF_SUCCESS);
    RTTESTI_CHECK_RC(RTMemCacheDestroy(NIL_RTMEMCACHE), VINF_SUCCESS);
}
コード例 #5
0
/**
 * Initializes a PDM critical section for internal use.
 *
 * The PDM critical sections are derived from the IPRT critical sections, but
 * works in GC as well.
 *
 * @returns VBox status code.
 * @param   pVM             Pointer to the VM.
 * @param   pDevIns         Device instance.
 * @param   pCritSect       Pointer to the critical section.
 * @param   RT_SRC_POS_DECL Use RT_SRC_POS.
 * @param   pszNameFmt      Format string for naming the critical section.  For
 *                          statistics and lock validation.
 * @param   ...             Arguments for the format string.
 * @thread  EMT(0)
 */
VMMR3DECL(int) PDMR3CritSectInit(PVM pVM, PPDMCRITSECT pCritSect, RT_SRC_POS_DECL, const char *pszNameFmt, ...)
{
#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
    AssertCompile(sizeof(pCritSect->padding) >= sizeof(pCritSect->s));
#endif
    Assert(RT_ALIGN_P(pCritSect, sizeof(uintptr_t)) == pCritSect);
    va_list va;
    va_start(va, pszNameFmt);
    int rc = pdmR3CritSectInitOne(pVM, &pCritSect->s, pCritSect, RT_SRC_POS_ARGS, pszNameFmt, va);
    va_end(va);
    return rc;
}
コード例 #6
0
ファイル: heapoffset.cpp プロジェクト: jeppeter/vbox
RTDECL(void) RTHeapOffsetFree(RTHEAPOFFSET hHeap, void *pv)
{
    PRTHEAPOFFSETINTERNAL pHeapInt;
    PRTHEAPOFFSETBLOCK pBlock;

    /*
     * Validate input.
     */
    if (!pv)
        return;
    AssertPtr(pv);
    Assert(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv);

    /*
     * Get the block and heap. If in strict mode, validate these.
     */
    pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
    pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
    ASSERT_BLOCK_USED(pHeapInt, pBlock);
    ASSERT_ANCHOR(pHeapInt);
    Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);

#ifdef RTHEAPOFFSET_FREE_POISON
    /*
     * Poison the block.
     */
    const size_t cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
                         - (uintptr_t)pBlock - sizeof(RTHEAPOFFSETBLOCK);
    memset(pBlock + 1, RTHEAPOFFSET_FREE_POISON, cbBlock);
#endif

    /*
     * Call worker which does the actual job.
     */
    rtHeapOffsetFreeBlock(pHeapInt, pBlock);
}
コード例 #7
0
int main(int argc, char *argv[])
{
    /*
     * Init runtime.
     */
    RTTEST hTest;
    int rc = RTTestInitAndCreate("tstRTHeapOffset", &hTest);
    if (rc)
        return rc;
    RTTestBanner(hTest);

    /*
     * Create a heap.
     */
    RTTestSub(hTest, "Basics");
    static uint8_t s_abMem[128*1024];
    RTHEAPOFFSET Heap;
    RTTESTI_CHECK_RC(rc = RTHeapOffsetInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS);
    if (RT_FAILURE(rc))
        return RTTestSummaryAndDestroy(hTest);

    /*
     * Try allocate.
     */
    static struct TstHeapOffsetOps
    {
        size_t      cb;
        unsigned    uAlignment;
        void       *pvAlloc;
        unsigned    iFreeOrder;
    } s_aOps[] =
    {
        {        16,          0,    NULL,  0 },  // 0
        {        16,          4,    NULL,  1 },
        {        16,          8,    NULL,  2 },
        {        16,         16,    NULL,  5 },
        {        16,         32,    NULL,  4 },
        {        32,          0,    NULL,  3 },  // 5
        {        31,          0,    NULL,  6 },
        {      1024,          0,    NULL,  8 },
        {      1024,         32,    NULL, 10 },
        {      1024,         32,    NULL, 12 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 13 },  // 10
        {      1024,         32,    NULL,  9 },
        { PAGE_SIZE,         32,    NULL, 11 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 14 },
        {        16,          0,    NULL, 15 },
        {        9,           0,    NULL,  7 },  // 15
        {        16,          0,    NULL,  7 },
        {        36,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {     12344,          0,    NULL,  7 },
        {        50,          0,    NULL,  7 },  // 20
        {        16,          0,    NULL,  7 },
    };
    uint32_t i;
    RTHeapOffsetDump(Heap, (PFNRTHEAPOFFSETPRINTF)RTPrintf); /** @todo Add some detail info output with a signature identical to RTPrintf. */
    size_t cbBefore = RTHeapOffsetGetFreeSize(Heap);
    static char const s_szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";

    /* allocate */
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        s_aOps[i].pvAlloc = RTHeapOffsetAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);
        RTTESTI_CHECK_MSG(s_aOps[i].pvAlloc, ("RTHeapOffsetAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!s_aOps[i].pvAlloc)
            return RTTestSummaryAndDestroy(hTest);

        memset(s_aOps[i].pvAlloc, s_szFill[i], s_aOps[i].cb);
        RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, (s_aOps[i].uAlignment ? s_aOps[i].uAlignment : 8)) == s_aOps[i].pvAlloc,
                          ("RTHeapOffsetAlloc(%p, %#x, %#x,) -> %p\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!s_aOps[i].pvAlloc)
            return RTTestSummaryAndDestroy(hTest);
    }

    /* free and allocate the same node again. */
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        if (!s_aOps[i].pvAlloc)
            continue;
        //RTPrintf("debug: i=%d pv=%#x cb=%#zx align=%#zx cbReal=%#zx\n", i, s_aOps[i].pvAlloc,
        //         s_aOps[i].cb, s_aOps[i].uAlignment, RTHeapOffsetSize(Heap, s_aOps[i].pvAlloc));
        size_t cbBeforeSub = RTHeapOffsetGetFreeSize(Heap);
        RTHeapOffsetFree(Heap, s_aOps[i].pvAlloc);
        size_t cbAfterSubFree = RTHeapOffsetGetFreeSize(Heap);

        void *pv;
        pv = RTHeapOffsetAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);
        RTTESTI_CHECK_MSG(pv, ("RTHeapOffsetAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!pv)
            return RTTestSummaryAndDestroy(hTest);
        //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapOffsetSize(Heap, pv),
        //         cbBeforeSub, cbAfterSubFree, RTHeapOffsetGetFreeSize(Heap));

        if (pv != s_aOps[i].pvAlloc)
            RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: Free+Alloc returned different address. new=%p old=%p i=%d\n", pv, s_aOps[i].pvAlloc, i);
        s_aOps[i].pvAlloc = pv;
        size_t cbAfterSubAlloc = RTHeapOffsetGetFreeSize(Heap);
        if (cbBeforeSub != cbAfterSubAlloc)
        {
            RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx. i=%d\n",
                          cbBeforeSub, cbAfterSubFree, cbAfterSubAlloc, i);
            //return 1; - won't work correctly until we start creating free block instead of donating memory on alignment.
        }
    }

    /* make a copy of the heap and the to-be-freed list. */
    static uint8_t s_abMemCopy[sizeof(s_abMem)];
    memcpy(s_abMemCopy, s_abMem, sizeof(s_abMem));
    uintptr_t    offDelta  = (uintptr_t)&s_abMemCopy[0] - (uintptr_t)&s_abMem[0];
    RTHEAPOFFSET hHeapCopy = (RTHEAPOFFSET)((uintptr_t)Heap + offDelta);
    static struct TstHeapOffsetOps s_aOpsCopy[RT_ELEMENTS(s_aOps)];
    memcpy(&s_aOpsCopy[0], &s_aOps[0], sizeof(s_aOps));

    /* free it in a specific order. */
    int cFreed = 0;
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(s_aOps); j++)
        {
            if (    s_aOps[j].iFreeOrder != i
                ||  !s_aOps[j].pvAlloc)
                continue;
            //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapOffsetGetFreeSize(Heap), s_aOps[j].cb, s_aOps[j].pvAlloc);
            RTHeapOffsetFree(Heap, s_aOps[j].pvAlloc);
            s_aOps[j].pvAlloc = NULL;
            cFreed++;
        }
    }
    RTTESTI_CHECK(cFreed == RT_ELEMENTS(s_aOps));
    RTTestIPrintf(RTTESTLVL_ALWAYS, "i=done free=%d\n", RTHeapOffsetGetFreeSize(Heap));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfter = RTHeapOffsetGetFreeSize(Heap);
    if (cbBefore != cbAfter)
    {
        RTTestIPrintf(RTTESTLVL_ALWAYS,
                      "Warning: Either we've split out an alignment chunk at the start, or we've got\n"
                      "         an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter);
        RTHeapOffsetDump(Heap, (PFNRTHEAPOFFSETPRINTF)RTPrintf);
    }

    /* relocate and free the bits in heap2 now. */
    RTTestSub(hTest, "Relocated Heap");
    /* free it in a specific order. */
    int cFreed2 = 0;
    for (i = 0; i < RT_ELEMENTS(s_aOpsCopy); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(s_aOpsCopy); j++)
        {
            if (    s_aOpsCopy[j].iFreeOrder != i
                ||  !s_aOpsCopy[j].pvAlloc)
                continue;
            //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapOffsetGetFreeSize(hHeapCopy), s_aOpsCopy[j].cb, s_aOpsCopy[j].pvAlloc);
            RTHeapOffsetFree(hHeapCopy, (uint8_t *)s_aOpsCopy[j].pvAlloc + offDelta);
            s_aOpsCopy[j].pvAlloc = NULL;
            cFreed2++;
        }
    }
    RTTESTI_CHECK(cFreed2 == RT_ELEMENTS(s_aOpsCopy));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfterCopy = RTHeapOffsetGetFreeSize(hHeapCopy);
    RTTESTI_CHECK_MSG(cbAfterCopy == cbAfter, ("cbAfterCopy=%zu cbAfter=%zu\n", cbAfterCopy, cbAfter));

    /*
     * Use random allocation pattern
     */
    RTTestSub(hTest, "Random Test");
    RTTESTI_CHECK_RC(rc = RTHeapOffsetInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS);
    if (RT_FAILURE(rc))
        return RTTestSummaryAndDestroy(hTest);

    RTRAND hRand;
    RTTESTI_CHECK_RC(rc = RTRandAdvCreateParkMiller(&hRand), VINF_SUCCESS);
    if (RT_FAILURE(rc))
        return RTTestSummaryAndDestroy(hTest);
#if 0
    RTRandAdvSeed(hRand, 42);
#else
    RTRandAdvSeed(hRand, RTTimeNanoTS());
#endif

    static struct
    {
        size_t  cb;
        void   *pv;
    } s_aHistory[1536];
    RT_ZERO(s_aHistory);

    for (unsigned iTest = 0; iTest < 131072; iTest++)
    {
        i = RTRandAdvU32Ex(hRand, 0, RT_ELEMENTS(s_aHistory) - 1);
        if (!s_aHistory[i].pv)
        {
            uint32_t uAlignment = 1 << RTRandAdvU32Ex(hRand, 0, 7);
            s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 9, 1024);
            s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, uAlignment);
            if (!s_aHistory[i].pv)
            {
                s_aHistory[i].cb = 9;
                s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 0);
            }
            if (s_aHistory[i].pv)
                memset(s_aHistory[i].pv, 0xbb, s_aHistory[i].cb);
        }
        else
        {
            RTHeapOffsetFree(Heap, s_aHistory[i].pv);
            s_aHistory[i].pv = NULL;
        }

        if ((iTest % 7777) == 7776)
        {
            /* exhaust the heap */
            for (i = 0; i < RT_ELEMENTS(s_aHistory) && RTHeapOffsetGetFreeSize(Heap) >= 256; i++)
                if (!s_aHistory[i].pv)
                {
                    s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 256, 16384);
                    s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 0);
                }
            for (i = 0; i < RT_ELEMENTS(s_aHistory) && RTHeapOffsetGetFreeSize(Heap); i++)
            {
                if (!s_aHistory[i].pv)
                {
                    s_aHistory[i].cb = 1;
                    s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 1);
                }
                if (s_aHistory[i].pv)
                    memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb);
            }
            RTTESTI_CHECK_MSG(RTHeapOffsetGetFreeSize(Heap) == 0, ("%zu\n", RTHeapOffsetGetFreeSize(Heap)));
        }
        else if ((iTest % 7777) == 1111)
        {
            /* free all */
            for (i = 0; i < RT_ELEMENTS(s_aHistory); i++)
            {
                RTHeapOffsetFree(Heap, s_aHistory[i].pv);
                s_aHistory[i].pv = NULL;
            }
            size_t cbAfterRand = RTHeapOffsetGetFreeSize(Heap);
            RTTESTI_CHECK_MSG(cbAfterRand == cbAfter, ("cbAfterRand=%zu cbAfter=%zu\n", cbAfterRand, cbAfter));
        }
    }

    /* free the rest. */
    for (i = 0; i < RT_ELEMENTS(s_aHistory); i++)
    {
        RTHeapOffsetFree(Heap, s_aHistory[i].pv);
        s_aHistory[i].pv = NULL;
    }

    /* check that we're back at the right amount of free memory. */
    size_t cbAfterRand = RTHeapOffsetGetFreeSize(Heap);
    RTTESTI_CHECK_MSG(cbAfterRand == cbAfter, ("cbAfterRand=%zu cbAfter=%zu\n", cbAfterRand, cbAfter));

    return RTTestSummaryAndDestroy(hTest);
}
コード例 #8
0
ファイル: MMAllHyper.cpp プロジェクト: LastRitter/vbox-haiku
/**
 * Free memory allocated using MMHyperAlloc().
 * The caller validates the parameters of this request.
 *
 * @returns VBox status code.
 * @param   pVM         The VM to operate on.
 * @param   pv          The memory to free.
 * @remark  Try avoid free hyper memory.
 */
static int mmHyperFreeInternal(PVM pVM, void *pv)
{
    Log2(("MMHyperFree: pv=%p\n", pv));
    if (!pv)
        return VINF_SUCCESS;
    AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
                    ("Invalid pointer %p!\n", pv),
                    VERR_INVALID_POINTER);

    /*
     * Get the heap and stats.
     * Validate the chunk at the same time.
     */
    PMMHYPERCHUNK   pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);

    AssertMsgReturn(    (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
                    ||  RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
                    ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
                    ("%p: Not used!\n", pv),
                    VERR_INVALID_POINTER);

    int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
    AssertMsgReturn(    (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
                    && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
                    ("%p: offPrev=%#RX32!\n", pv, offPrev),
                    VERR_INVALID_POINTER);

    /* statistics */
#ifdef VBOX_WITH_STATISTICS
    PMMHYPERSTAT    pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
    AssertMsgReturn(    RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
                    &&  pChunk->offStat,
                    ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
                    VERR_INVALID_POINTER);
#else
    AssertMsgReturn(!pChunk->offStat,
                    ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
                    VERR_INVALID_POINTER);
#endif

    /* The heap structure. */
    PMMHYPERHEAP    pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
    AssertMsgReturn(    !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
                    &&  pChunk->offHeap,
                    ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
                    ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
                    VERR_INVALID_POINTER);
    Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap));

    /* Some more verifications using additional info from pHeap. */
    AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
                    ("%p: offPrev=%#RX32!\n", pv, offPrev),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
                    ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
                    VERR_INVALID_POINTER);

    AssertMsgReturn(   (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
                    ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
                    (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
                    VERR_INVALID_POINTER);

#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif

#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
    /* calc block size. */
    const uint32_t cbChunk = pChunk->offNext
        ? pChunk->offNext
        : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
#endif
#ifdef MMHYPER_HEAP_FREE_POISON
    /* poison the block */
    memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
#endif

#ifdef MMHYPER_HEAP_FREE_DELAY
# ifdef MMHYPER_HEAP_FREE_POISON
    /*
     * Check poison.
     */
    unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
    while (i-- > 0)
        if (pHeap->aDelayedFrees[i].offChunk)
        {
            PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
            const size_t cb = pCur->offNext
                ? pCur->offNext - sizeof(*pCur)
                : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
            uint8_t *pab = (uint8_t *)(pCur + 1);
            for (unsigned off = 0; off < cb; off++)
                AssertReleaseMsg(pab[off] == 0xCB,
                                 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
                                  pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
        }
# endif /* MMHYPER_HEAP_FREE_POISON */

    /*
     * Delayed freeing.
     */
    int rc = VINF_SUCCESS;
    if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
    {
        PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
        rc = mmHyperFree(pHeap, pChunkFree);
    }
    pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
    pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
    pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);

#else   /* !MMHYPER_HEAP_FREE_POISON */
    /*
     * Call the worker.
     */
    int rc = mmHyperFree(pHeap, pChunk);
#endif  /* !MMHYPER_HEAP_FREE_POISON */

    /*
     * Update statistics.
     */
#ifdef VBOX_WITH_STATISTICS
    pStat->cFrees++;
    if (RT_SUCCESS(rc))
    {
        pStat->cbFreed        += cbChunk;
        pStat->cbCurAllocated -= cbChunk;
    }
    else
        pStat->cFailures++;
#endif

    return rc;
}
コード例 #9
0
ファイル: MMAllHyper.cpp プロジェクト: LastRitter/vbox-haiku
/**
 * Allocates one or more pages of memory from the specified heap.
 * The caller validates the parameters of this request.
 *
 * @returns Pointer to the allocated chunk.
 * @returns NULL on failure.
 * @param   pHeap       The heap.
 * @param   cb          Size of the memory block to allocate.
 * @internal
 */
static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
{
    Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));

#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif

    /*
     * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
     */
    if (pHeap->offFreeHead == NIL_OFFSET)
        return NULL;

    /*
     * Page aligned chunks.
     *
     * Page aligned chunks can only be allocated from the last FREE chunk.
     * This is for reasons of simplicity and fragmentation. Page aligned memory
     * must also be allocated in page aligned sizes. Page aligned memory cannot
     * be freed either.
     *
     * So, for this to work, the last FREE chunk needs to end on a page aligned
     * boundary.
     */
    PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
    ASSERT_CHUNK_FREE(pHeap, pFree);
    if (    (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (PAGE_OFFSET_MASK - 1))
        ||  pFree->cb + sizeof(MMHYPERCHUNK) < cb)
    {
        Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
        return NULL;
    }

    void *pvRet;
    if (pFree->cb > cb)
    {
        /*
         * Simple, just cut the top of the free node and return it.
         */
        pFree->cb -= cb;
        pvRet = (char *)(&pFree->core + 1) + pFree->cb;
        AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
        Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
        pHeap->cbFree -= cb;
        ASSERT_CHUNK_FREE(pHeap, pFree);
        Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
    }
    else
    {
        /*
         * Unlink the FREE node.
         */
        pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
        Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
        pHeap->cbFree -= pFree->cb;

        /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
        if (pvRet != (void *)pFree)
        {
            AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
            PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
            pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
            AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
#ifdef VBOX_WITH_STATISTICS
            PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
            pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
            pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
#endif
            Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
        }

        /* unlink from FREE chain. */
        if (pFree->offPrev)
        {
            pHeap->offFreeTail += pFree->offPrev;
            ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
        }
        else
        {
            pHeap->offFreeTail = NIL_OFFSET;
            pHeap->offFreeHead = NIL_OFFSET;
        }
        Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
    }
    pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
    Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));

#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif
    return pvRet;
}
/**
 *  Entry point.
 */
extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
{

    /*
     * Init runtime.
     */
    RTR3InitExe(argc, &argv, 0);

    /*
     * Create empty VM structure and call MMR3Init().
     */
    PVM         pVM;
    RTR0PTR     pvR0;
    SUPPAGE     aPages[RT_ALIGN_Z(sizeof(*pVM) + NUM_CPUS * sizeof(VMCPU), PAGE_SIZE) >> PAGE_SHIFT];
    int rc = SUPR3Init(NULL);
    if (RT_SUCCESS(rc))
        rc = SUPR3LowAlloc(RT_ELEMENTS(aPages), (void **)&pVM, &pvR0, &aPages[0]);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: SUP Failure! rc=%Rrc\n", rc);
        return 1;
    }
    memset(pVM, 0, sizeof(*pVM)); /* wtf? */
    pVM->paVMPagesR3 = aPages;
    pVM->pVMR0 = pvR0;

    static UVM s_UVM;
    PUVM pUVM = &s_UVM;
    pUVM->pVM = pVM;
    pVM->pUVM = pUVM;

    pVM->cCpus = NUM_CPUS;
    pVM->cbSelf = RT_UOFFSETOF(VM, aCpus[pVM->cCpus]);

    rc = STAMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3InitUVM(pUVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = CFGMR3Init(pVM, NULL, NULL);
    if (RT_FAILURE(rc))
    {
        RTPrintf("FAILURE: CFGMR3Init failed. rc=%Rrc\n", rc);
        return 1;
    }

    rc = MMR3Init(pVM);
    if (RT_FAILURE(rc))
    {
        RTPrintf("Fatal error: MMR3Init failed! rc=%Rrc\n", rc);
        return 1;
    }

    /*
     * Try allocate.
     */
    static struct
    {
        size_t      cb;
        unsigned    uAlignment;
        void       *pvAlloc;
        unsigned    iFreeOrder;
    } aOps[] =
    {
        {        16,          0,    NULL,  0 },
        {        16,          4,    NULL,  1 },
        {        16,          8,    NULL,  2 },
        {        16,         16,    NULL,  5 },
        {        16,         32,    NULL,  4 },
        {        32,          0,    NULL,  3 },
        {        31,          0,    NULL,  6 },
        {      1024,          0,    NULL,  8 },
        {      1024,         32,    NULL, 10 },
        {      1024,         32,    NULL, 12 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 13 },
        {      1024,         32,    NULL,  9 },
        { PAGE_SIZE,         32,    NULL, 11 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 14 },
        {        16,          0,    NULL, 15 },
        {        9,           0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {        36,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {     12344,          0,    NULL,  7 },
        {        50,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
    };
    unsigned i;
#ifdef DEBUG
    MMHyperHeapDump(pVM);
#endif
    size_t cbBefore = MMHyperHeapGetFreeSize(pVM);
    static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";

    /* allocate */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM, &aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        memset(aOps[i].pvAlloc, szFill[i], aOps[i].cb);
        if (RT_ALIGN_P(aOps[i].pvAlloc, (aOps[i].uAlignment ? aOps[i].uAlignment : 8)) != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %p, invalid alignment!\n", aOps[i].cb, aOps[i].uAlignment, aOps[i].pvAlloc);
            return 1;
        }
    }

    /* free and allocate the same node again. */
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        if (    !aOps[i].pvAlloc
            ||  aOps[i].uAlignment == PAGE_SIZE)
            continue;
        //size_t cbBeforeSub = MMHyperHeapGetFreeSize(pVM);
        rc = MMHyperFree(pVM, aOps[i].pvAlloc);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperFree(, %p,) -> %d i=%d\n", aOps[i].pvAlloc, rc, i);
            return 1;
        }
        //RTPrintf("debug: i=%d cbBeforeSub=%d now=%d\n", i, cbBeforeSub, MMHyperHeapGetFreeSize(pVM));
        void *pv;
        rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM_REQ, &pv);
        if (RT_FAILURE(rc))
        {
            RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
            return 1;
        }
        if (pv != aOps[i].pvAlloc)
        {
            RTPrintf("Failure: Free+Alloc returned different address. new=%p old=%p i=%d (doesn't work with delayed free)\n", pv, aOps[i].pvAlloc, i);
            //return 1;
        }
        aOps[i].pvAlloc = pv;
        #if 0 /* won't work :/ */
        size_t cbAfterSub = MMHyperHeapGetFreeSize(pVM);
        if (cbBeforeSub != cbAfterSub)
        {
            RTPrintf("Failure: cbBeforeSub=%d cbAfterSub=%d. i=%d\n", cbBeforeSub, cbAfterSub, i);
            return 1;
        }
        #endif
    }

    /* free it in a specific order. */
    int cFreed = 0;
    for (i = 0; i < RT_ELEMENTS(aOps); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(aOps); j++)
        {
            if (    aOps[j].iFreeOrder != i
                ||  !aOps[j].pvAlloc)
                continue;
            RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, MMHyperHeapGetFreeSize(pVM), aOps[j].cb, aOps[j].pvAlloc);
            if (aOps[j].uAlignment == PAGE_SIZE)
                cbBefore -= aOps[j].cb;
            else
            {
                rc = MMHyperFree(pVM, aOps[j].pvAlloc);
                if (RT_FAILURE(rc))
                {
                    RTPrintf("Failure: MMHyperFree(, %p,) -> %d j=%d i=%d\n", aOps[j].pvAlloc, rc, i, j);
                    return 1;
                }
            }
            aOps[j].pvAlloc = NULL;
            cFreed++;
        }
    }
    Assert(cFreed == RT_ELEMENTS(aOps));
    RTPrintf("i=done free=%d\n", MMHyperHeapGetFreeSize(pVM));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfter = MMHyperHeapGetFreeSize(pVM);
    if (cbBefore != cbAfter)
    {
        RTPrintf("Warning: Either we've split out an alignment chunk at the start, or we've got\n"
                 "         an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter);
#ifdef DEBUG
        MMHyperHeapDump(pVM);
#endif
    }

    RTPrintf("tstMMHyperHeap: Success\n");
#ifdef LOG_ENABLED
    RTLogFlush(NULL);
#endif
    return 0;
}
コード例 #11
0
ファイル: tstRTHeapSimple.cpp プロジェクト: miguelinux/vbox
int main(int argc, char *argv[])
{
    /*
     * Init runtime.
     */
    RTTEST hTest;
    int rc = RTTestInitAndCreate("tstRTHeapSimple", &hTest);
    if (rc)
        return rc;
    RTTestBanner(hTest);

    /*
     * Create a heap.
     */
    RTTestSub(hTest, "Basics");
    static uint8_t s_abMem[128*1024];
    RTHEAPSIMPLE Heap;
    RTTESTI_CHECK_RC(rc = RTHeapSimpleInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS);
    if (RT_FAILURE(rc))
        return RTTestSummaryAndDestroy(hTest);

    /*
     * Try allocate.
     */
    static struct TstHeapSimpleOps
    {
        size_t      cb;
        unsigned    uAlignment;
        void       *pvAlloc;
        unsigned    iFreeOrder;
    } s_aOps[] =
    {
        {        16,          0,    NULL,  0 },  // 0
        {        16,          4,    NULL,  1 },
        {        16,          8,    NULL,  2 },
        {        16,         16,    NULL,  5 },
        {        16,         32,    NULL,  4 },
        {        32,          0,    NULL,  3 },  // 5
        {        31,          0,    NULL,  6 },
        {      1024,          0,    NULL,  8 },
        {      1024,         32,    NULL, 10 },
        {      1024,         32,    NULL, 12 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 13 },  // 10
        {      1024,         32,    NULL,  9 },
        { PAGE_SIZE,         32,    NULL, 11 },
        { PAGE_SIZE,  PAGE_SIZE,    NULL, 14 },
        {        16,          0,    NULL, 15 },
        {        9,           0,    NULL,  7 },  // 15
        {        16,          0,    NULL,  7 },
        {        36,          0,    NULL,  7 },
        {        16,          0,    NULL,  7 },
        {     12344,          0,    NULL,  7 },
        {        50,          0,    NULL,  7 },  // 20
        {        16,          0,    NULL,  7 },
    };
    unsigned i;
    RTHeapSimpleDump(Heap, (PFNRTHEAPSIMPLEPRINTF)RTPrintf); /** @todo Add some detail info output with a signature identical to RTPrintf. */
    size_t cbBefore = RTHeapSimpleGetFreeSize(Heap);
    static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";

    /* allocate */
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        s_aOps[i].pvAlloc = RTHeapSimpleAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);
        RTTESTI_CHECK_MSG(s_aOps[i].pvAlloc, ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!s_aOps[i].pvAlloc)
            return RTTestSummaryAndDestroy(hTest);

        memset(s_aOps[i].pvAlloc, szFill[i], s_aOps[i].cb);
        RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, (s_aOps[i].uAlignment ? s_aOps[i].uAlignment : 8)) == s_aOps[i].pvAlloc,
                          ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> %p\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!s_aOps[i].pvAlloc)
            return RTTestSummaryAndDestroy(hTest);
    }

    /* free and allocate the same node again. */
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        if (!s_aOps[i].pvAlloc)
            continue;
        //RTPrintf("debug: i=%d pv=%#x cb=%#zx align=%#zx cbReal=%#zx\n", i, s_aOps[i].pvAlloc,
        //         s_aOps[i].cb, s_aOps[i].uAlignment, RTHeapSimpleSize(Heap, s_aOps[i].pvAlloc));
        size_t cbBeforeSub = RTHeapSimpleGetFreeSize(Heap);
        RTHeapSimpleFree(Heap, s_aOps[i].pvAlloc);
        size_t cbAfterSubFree = RTHeapSimpleGetFreeSize(Heap);

        void *pv;
        pv = RTHeapSimpleAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);
        RTTESTI_CHECK_MSG(pv, ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));
        if (!pv)
            return RTTestSummaryAndDestroy(hTest);
        //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapSimpleSize(Heap, pv),
        //         cbBeforeSub, cbAfterSubFree, RTHeapSimpleGetFreeSize(Heap));
        if (pv != s_aOps[i].pvAlloc)
            RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: Free+Alloc returned different address. new=%p old=%p i=%d\n", pv, s_aOps[i].pvAlloc, i);
        s_aOps[i].pvAlloc = pv;
        size_t cbAfterSubAlloc = RTHeapSimpleGetFreeSize(Heap);
        if (cbBeforeSub != cbAfterSubAlloc)
        {
            RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx. i=%d\n",
                          cbBeforeSub, cbAfterSubFree, cbAfterSubAlloc, i);
            //return 1; - won't work correctly until we start creating free block instead of donating memory on alignment.
        }
    }

    /* make a copy of the heap and the to-be-freed list. */
    static uint8_t s_abMemCopy[sizeof(s_abMem)];
    memcpy(s_abMemCopy, s_abMem, sizeof(s_abMem));
    uintptr_t    offDelta  = (uintptr_t)&s_abMemCopy[0] - (uintptr_t)&s_abMem[0];
    RTHEAPSIMPLE hHeapCopy = (RTHEAPSIMPLE)((uintptr_t)Heap + offDelta);
    static struct TstHeapSimpleOps s_aOpsCopy[RT_ELEMENTS(s_aOps)];
    memcpy(&s_aOpsCopy[0], &s_aOps[0], sizeof(s_aOps));

    /* free it in a specific order. */
    int cFreed = 0;
    for (i = 0; i < RT_ELEMENTS(s_aOps); i++)
    {
        unsigned j;
        for (j = 0; j < RT_ELEMENTS(s_aOps); j++)
        {
            if (    s_aOps[j].iFreeOrder != i
                ||  !s_aOps[j].pvAlloc)
                continue;
            //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapSimpleGetFreeSize(Heap), s_aOps[j].cb, s_aOps[j].pvAlloc);
            RTHeapSimpleFree(Heap, s_aOps[j].pvAlloc);
            s_aOps[j].pvAlloc = NULL;
            cFreed++;
        }
    }
    RTTESTI_CHECK(cFreed == RT_ELEMENTS(s_aOps));
    RTTestIPrintf(RTTESTLVL_ALWAYS, "i=done free=%d\n", RTHeapSimpleGetFreeSize(Heap));

    /* check that we're back at the right amount of free memory. */
    size_t cbAfter = RTHeapSimpleGetFreeSize(Heap);
    if (cbBefore != cbAfter)
    {
        RTTestIPrintf(RTTESTLVL_ALWAYS,
                      "Warning: Either we've split out an alignment chunk at the start, or we've got\n"
                      "         an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter);
        RTHeapSimpleDump(Heap, (PFNRTHEAPSIMPLEPRINTF)RTPrintf);
    }

    /* relocate and free the bits in heap2 now. */
    RTTestSub(hTest, "RTHeapSimpleRelocate");
    rc = RTHeapSimpleRelocate(hHeapCopy, offDelta);
    RTTESTI_CHECK_RC(rc, VINF_SUCCESS);
    if (RT_SUCCESS(rc))
    {
        /* free it in a specific order. */
        int cFreed2 = 0;
        for (i = 0; i < RT_ELEMENTS(s_aOpsCopy); i++)
        {
            unsigned j;
            for (j = 0; j < RT_ELEMENTS(s_aOpsCopy); j++)
            {
                if (    s_aOpsCopy[j].iFreeOrder != i
                    ||  !s_aOpsCopy[j].pvAlloc)
                    continue;
                //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapSimpleGetFreeSize(hHeapCopy), s_aOpsCopy[j].cb, s_aOpsCopy[j].pvAlloc);
                RTHeapSimpleFree(hHeapCopy, (uint8_t *)s_aOpsCopy[j].pvAlloc + offDelta);
                s_aOpsCopy[j].pvAlloc = NULL;
                cFreed2++;
            }
        }
        RTTESTI_CHECK(cFreed2 == RT_ELEMENTS(s_aOpsCopy));

        /* check that we're back at the right amount of free memory. */
        size_t cbAfterCopy = RTHeapSimpleGetFreeSize(hHeapCopy);
        RTTESTI_CHECK_MSG(cbAfterCopy == cbAfter, ("cbAfterCopy=%zu cbAfter=%zu\n", cbAfterCopy, cbAfter));
    }


    return RTTestSummaryAndDestroy(hTest);
}