コード例 #1
0
/**
 * Allocate memory for host buffer and receive it.
 *
 * @param   u32ClientId    Host connection.
 * @param   fFormat        Buffer data format.
 * @param   pData          Where to store received data.
 * @param   cbDataSize     The size of the received data.
 * @param   cbMemSize      The actual size of memory occupied by *pData.
 *
 * @returns IPRT status code.
 */
static int vbclClipboardReadHostData(uint32_t u32ClientId, uint32_t fFormat, void **pData, uint32_t *cbDataSize, uint32_t *cbMemSize)
{
    int rc;

    AssertReturn(pData && cbDataSize && cbMemSize, VERR_INVALID_PARAMETER);

    uint32_t  cbDataSizeInternal = _4K;
    uint32_t  cbMemSizeInternal  = cbDataSizeInternal;
    void     *pDataInternal      = RTMemPageAllocZ(cbDataSizeInternal);

    if (!pDataInternal)
        return VERR_NO_MEMORY;

    rc = VbglR3ClipboardReadData(u32ClientId, fFormat, pDataInternal, cbMemSizeInternal, &cbDataSizeInternal);
    if (rc == VINF_BUFFER_OVERFLOW)
    {
        /* Reallocate bigger buffer and receive all the data */
        RTMemPageFree(pDataInternal, cbMemSizeInternal);
        cbDataSizeInternal = cbMemSizeInternal = RT_ALIGN_32(cbDataSizeInternal, PAGE_SIZE);
        pDataInternal = RTMemPageAllocZ(cbMemSizeInternal);
        if (!pDataInternal)
            return VERR_NO_MEMORY;

        rc = VbglR3ClipboardReadData(u32ClientId, fFormat, pDataInternal, cbMemSizeInternal, &cbDataSizeInternal);
    }

    /* Error occurred of zero-sized buffer */
    if (RT_FAILURE(rc))
    {
        RTMemPageFree(pDataInternal, cbMemSizeInternal);
        return VERR_NO_MEMORY;
    }

    *pData      = pDataInternal;
    *cbDataSize = cbDataSizeInternal;
    *cbMemSize  = cbMemSizeInternal;

    return rc;
}
コード例 #2
0
/** Generic implementation of krdrRTFileMap. */
static int  krdrRTFileGenericMap(PKRDR pRdr, PKRDRFILEPREP pPrep, KU32 cSegments, PCKLDRSEG paSegments, KBOOL fFixed)
{
    int rc = 0;
    KU32 i;

    /*
     * Generic mapping code using kHlpPageAlloc(), kHlpPageFree() and kHlpPageProtect().
     */
    pPrep->pv = RTMemPageAlloc(pPrep->cb);
    if (!pPrep->pv)
        return KERR_NO_MEMORY;

    /*
     * Load the data.
     */
    for (i = 0; i < cSegments; i++)
    {
        void *pv;

        if (    paSegments[i].RVA == NIL_KLDRADDR
                ||  paSegments[i].cbFile <= 0)
            continue;

        pv = (KU8 *)pPrep->pv + paSegments[i].RVA;
        rc = pRdr->pOps->pfnRead(pRdr, pv, paSegments[i].cbFile, paSegments[i].offFile);
        if (rc)
            break;
    }

    /*
     * Set segment protection.
     */
    if (!rc)
    {
        rc = krdrRTFileGenericProtect(pRdr, pPrep, cSegments, paSegments, 0 /* protect */);
        if (!rc)
            return 0;
        krdrRTFileGenericProtect(pRdr, pPrep, cSegments, paSegments, 1 /* unprotect */);
    }

    /* bailout */
    RTMemPageFree(pPrep->pv, pPrep->cb);
    return rc;
}
コード例 #3
0
ファイル: memcache.cpp プロジェクト: miguelinux/vbox
RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
{
    RTMEMCACHEINT *pThis = hMemCache;
    if (!pThis)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);

#if 0 /*def RT_STRICT - don't require eveything to be freed. Caches are very convenient for lazy cleanup. */
    uint32_t cFree = pThis->cFree;
    for (PRTMEMCACHEFREEOBJ pFree = pThis->pFreeTop; pFree && cFree < pThis->cTotal + 5; pFree = pFree->pNext)
        cFree++;
    AssertMsg(cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", cFree, pThis->cTotal));
#endif

    /*
     * Destroy it.
     */
    AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
    RTCritSectDelete(&pThis->CritSect);

    while (pThis->pPageHead)
    {
        PRTMEMCACHEPAGE pPage = pThis->pPageHead;
        pThis->pPageHead = pPage->pNext;
        pPage->cFree = 0;

        if (pThis->pfnDtor)
        {
            uint32_t iObj = pPage->cObjects;
            while (iObj-- > 0)
                if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
                    pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
        }

        RTMemPageFree(pPage, PAGE_SIZE);
    }

    RTMemFree(pThis);
    return VINF_SUCCESS;
}
コード例 #4
0
/**
 * Release memory occupied by host buffer.
 *
 * @param   pData          Pointer to memory occupied by host buffer.
 * @param   cbMemSize      The actual size of memory occupied by *pData.
 */
static void vbclClipboardReleaseHostData(void **pData, uint32_t cbMemSize)
{
    AssertReturnVoid(pData && cbMemSize > 0);
    RTMemPageFree(*pData, cbMemSize);
}
コード例 #5
0
static void doTest(RTTEST hTest)
{
    NOREF(hTest);
    uint32_t iAllocCpu = 0;
    while (iAllocCpu < RTCPUSET_MAX_CPUS)
    {
        const uint32_t cbTestSet   = _1M * 32;
        const uint32_t cIterations = 384;

        /*
         * Change CPU and allocate a chunk of memory.
         */
        RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAllocCpu)));

        void *pvTest = RTMemPageAlloc(cbTestSet); /* may be leaked, who cares */
        RTTESTI_CHECK_RETV(pvTest != NULL);
        memset(pvTest, 0xef, cbTestSet);

        /*
         * Do the tests.
         */
        uint32_t iAccessCpu = 0;
        while (iAccessCpu < RTCPUSET_MAX_CPUS)
        {
            RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAccessCpu)));

            /*
             * The write test.
             */
            RTTimeNanoTS(); RTThreadYield();
            uint64_t u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
                ASMCompilerBarrier(); /* paranoia */
                memset(pvTest, i, cbTestSet);
            }
            uint64_t const cNsElapsedWrite = RTTimeNanoTS() - u64StartTS;
            uint64_t cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                            / ((long double)cNsElapsedWrite / RT_NS_1SEC_64) /* seconds */
                                            / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-write", iAllocCpu, iAccessCpu);

            /*
             * The read test.
             */
            memset(pvTest, 0, cbTestSet);
            RTTimeNanoTS(); RTThreadYield();
            u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
#if 1
                size_t register u = 0;
                size_t volatile *puCur = (size_t volatile *)pvTest;
                size_t volatile *puEnd = puCur + cbTestSet / sizeof(size_t);
                while (puCur != puEnd)
                    u += *puCur++;
#else
                ASMCompilerBarrier(); /* paranoia */
                void *pvFound = memchr(pvTest, (i & 127) + 1, cbTestSet);
                RTTESTI_CHECK(pvFound == NULL);
#endif
            }
            uint64_t const cNsElapsedRead = RTTimeNanoTS() - u64StartTS;
            cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                   / ((long double)cNsElapsedRead / RT_NS_1SEC_64) /* seconds */
                                   / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read", iAllocCpu, iAccessCpu);

            /*
             * The read/write test.
             */
            RTTimeNanoTS(); RTThreadYield();
            u64StartTS = RTTimeNanoTS();
            for (uint32_t i = 0; i < cIterations; i++)
            {
                ASMCompilerBarrier(); /* paranoia */
                memcpy(pvTest, (uint8_t *)pvTest + cbTestSet / 2, cbTestSet / 2);
            }
            uint64_t const cNsElapsedRW = RTTimeNanoTS() - u64StartTS;
            cMBPerSec = (uint64_t)(  ((uint64_t)cIterations * cbTestSet) /* bytes */
                                   / ((long double)cNsElapsedRW / RT_NS_1SEC_64) /* seconds */
                                   / _1M /* MB */ );
            RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read-write", iAllocCpu, iAccessCpu);

            /*
             * Total time.
             */
            RTTestIValueF(cNsElapsedRead + cNsElapsedWrite + cNsElapsedRW, RTTESTUNIT_NS,
                          "cpu%02u-mem%02u-time", iAllocCpu, iAccessCpu);

            /* advance */
            iAccessCpu = getNextCpu(iAccessCpu);
        }

        /*
         * Clean up and advance to the next CPU.
         */
        RTMemPageFree(pvTest, cbTestSet);
        iAllocCpu = getNextCpu(iAllocCpu);
    }
}
コード例 #6
0
int suplibOsPageFree(PSUPLIBDATA pThis, void *pvPages, size_t cPages)
{
    NOREF(pThis);
    RTMemPageFree(pvPages, cPages * PAGE_SIZE);
    return VINF_SUCCESS;
}
コード例 #7
0
/**
 * Release resources occupied by vbclClipboardReadGuestData().
 */
static void vbclClipboardReleaseGuestData(void **ppvData, uint32_t cbAlloc)
{
    AssertReturnVoid(ppvData);
    RTMemPageFree(*ppvData, cbAlloc);
    *ppvData = NULL;
}
コード例 #8
0
/**
 * Wipes free space on one or more volumes by creating large files.
 */
static RTEXITCODE handlerWipeFreeSpace(int argc, char **argv)
{
    /*
     * Parse arguments.
     */
    const char *apszDefFiles[2] = { "./wipefree.spc", NULL };
    bool        fAll            = false;
    uint32_t    u32Filler       = UINT32_C(0xf6f6f6f6);
    uint64_t    cbMinLeftOpt    = _32M;

    static RTGETOPTDEF const s_aOptions[] =
    {
        { "--all",      'a', RTGETOPT_REQ_NOTHING },
        { "--filler",   'f', RTGETOPT_REQ_UINT32 },
        { "--min-free", 'm', RTGETOPT_REQ_UINT64 },
    };
    RTGETOPTSTATE State;
    RTGetOptInit(&State, argc, argv, &s_aOptions[0], RT_ELEMENTS(s_aOptions), 1, RTGETOPTINIT_FLAGS_OPTS_FIRST);
    RTGETOPTUNION ValueUnion;
    int chOpt;
    while (  (chOpt = RTGetOpt(&State, &ValueUnion)) != 0
           && chOpt != VINF_GETOPT_NOT_OPTION)
    {
        switch (chOpt)
        {
            case 'a':
                fAll = true;
                break;
            case 'f':
                u32Filler = ValueUnion.u32;
                break;
            case 'm':
                cbMinLeftOpt = ValueUnion.u64;
                break;
            case 'h':
                RTPrintf("usage: wipefrespace [options] [filename1 [..]]\n"
                         "\n"
                         "Options:\n"
                         "  -a, --all\n"
                         "    Try do the free space wiping on all seemingly relevant file systems.\n"
                         "    Changes the meaning of the filenames  "
                         "    This is not yet implemented\n"
                         "  -p, --filler <32-bit value>\n"
                         "    What to fill the blocks we write with.\n"
                         "    Default: 0xf6f6f6f6\n"
                         "  -m, --min-free <64-bit byte count>\n"
                         "    Specifies when to stop in terms of free disk space (in bytes).\n"
                         "    Default: 32MB\n"
                         "\n"
                         "Zero or more names of files to do the free space wiping thru can be given.\n"
                         "When --all is NOT used, each of the files are used to do free space wiping on\n"
                         "the volume they will live on.  However, when --all is in effect the files are\n"
                         "appended to the volume mountpoints and only the first that can be created will\n"
                         "be used.  Files (used ones) will be removed when done.\n"
                         "\n"
                         "If no filename is given, the default is: %s\n"
                         , apszDefFiles[0]);
                return RTEXITCODE_SUCCESS;

            default:
                return RTGetOptPrintError(chOpt, &ValueUnion);
        }
    }

    char **papszFiles;
    if (chOpt == 0)
        papszFiles = (char **)apszDefFiles;
    else
        papszFiles = RTGetOptNonOptionArrayPtr(&State);

    /*
     * Allocate and prep a memory which we'll write over and over again.
     */
    uint32_t  cbFiller   = _2M;
    uint32_t *pu32Filler = (uint32_t *)RTMemPageAlloc(cbFiller);
    while (!pu32Filler)
    {
        cbFiller <<= 1;
        if (cbFiller >= _4K)
            pu32Filler = (uint32_t *)RTMemPageAlloc(cbFiller);
        else
            return RTMsgErrorExit(RTEXITCODE_FAILURE, "RTMemPageAlloc failed for sizes between 4KB and 2MB!\n");
    }
    for (uint32_t i = 0; i < cbFiller / sizeof(pu32Filler[0]); i++)
        pu32Filler[i] = u32Filler;

    /*
     * Do the requested work.
     */
    RTEXITCODE rcExit = RTEXITCODE_SUCCESS;
    if (!fAll)
    {
        for (uint32_t iFile = 0; papszFiles[iFile] != NULL; iFile++)
        {
            RTEXITCODE rcExit2 = doOneFreeSpaceWipe(papszFiles[iFile], pu32Filler, cbFiller, cbMinLeftOpt);
            if (rcExit2 != RTEXITCODE_SUCCESS && rcExit == RTEXITCODE_SUCCESS)
                rcExit = rcExit2;
        }
    }
    else
    {
        /*
         * Reject --all for now.
         */
        rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE,  "The --all option is not yet implemented!\n");
    }

    RTMemPageFree(pu32Filler, cbFiller);
    return rcExit;
}
コード例 #9
0
/** Generic implementation of krdrRTFileUnmap. */
static int krdrRTFileGenericUnmap(PKRDR pRdr, PKRDRFILEPREP pPrep, KU32 cSegments, PCKLDRSEG paSegments)
{
    krdrRTFileGenericProtect(pRdr, pPrep, cSegments, paSegments, 1 /* unprotect */);
    RTMemPageFree(pPrep->pv, pPrep->cb);
    return 0;
}
コード例 #10
0
/**
 * Internal free.
 */
RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)
{
    /*
     * Simple case.
     */
    if (!pv)
        return;

    /*
     * Check watch points.
     */
    for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
        if (gapvRTMemFreeWatch[i] == pv)
            RTAssertDoPanic();

#ifdef RTALLOC_EFENCE_TRACE
    /*
     * Find the block.
     */
    PRTMEMBLOCK pBlock = rtmemBlockRemove(pv);
    if (pBlock)
    {
        if (gfRTMemFreeLog)
            RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);

# ifdef RTALLOC_EFENCE_NOMAN_FILLER
        /*
         * Check whether the no man's land is untouched.
         */
#  ifdef RTALLOC_EFENCE_IN_FRONT
        void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned,
                                     RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
                                     RTALLOC_EFENCE_NOMAN_FILLER);
#  else
        /* Alignment must match allocation alignment in rtMemAlloc(). */
        void  *pvWrong   = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned,
                                        pBlock->cbAligned - pBlock->cbUnaligned,
                                        RTALLOC_EFENCE_NOMAN_FILLER);
        if (pvWrong)
            RTAssertDoPanic();
        pvWrong = ASMMemIsAll8((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK),
                               RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
                               RTALLOC_EFENCE_NOMAN_FILLER);
#  endif
        if (pvWrong)
            RTAssertDoPanic();
# endif

# ifdef RTALLOC_EFENCE_FREE_FILL
        /*
         * Fill the user part of the block.
         */
        memset(pv, RTALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);
# endif

# if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0
        /*
         * We're doing delayed freeing.
         * That means we'll expand the E-fence to cover the entire block.
         */
        int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE);
        if (RT_SUCCESS(rc))
        {
            /*
             * Insert it into the free list and process pending frees.
             */
            rtmemBlockDelayInsert(pBlock);
            while ((pBlock = rtmemBlockDelayRemove()) != NULL)
            {
                pv = pBlock->Core.Key;
#  ifdef RTALLOC_EFENCE_IN_FRONT
                void  *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
#  else
                void  *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
#  endif
                size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
                rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
                if (RT_SUCCESS(rc))
                    RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
                else
                    rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc);
                rtmemBlockFree(pBlock);
            }
        }
        else
            rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);

# else /* !RTALLOC_EFENCE_FREE_DELAYED */

        /*
         * Turn of the E-fence and free it.
         */
#  ifdef RTALLOC_EFENCE_IN_FRONT
        void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE;
        void *pvEFence = pvBlock;
#  else
        void *pvBlock = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
        void *pvEFence = (char *)pv + pBlock->cb;
#  endif
        int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
        if (RT_SUCCESS(rc))
            RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE);
        else
            rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
        rtmemBlockFree(pBlock);

# endif /* !RTALLOC_EFENCE_FREE_DELAYED */
    }
    else
        rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);

#else /* !RTALLOC_EFENCE_TRACE */

    /*
     * We have no size tracking, so we're not doing any freeing because
     * we cannot if the E-fence is after the block.
     * Let's just expand the E-fence to the first page of the user bit
     * since we know that it's around.
     */
    int rc = RTMemProtect((void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE);
    if (RT_FAILURE(rc))
        rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK), rc);
#endif /* !RTALLOC_EFENCE_TRACE */
}
コード例 #11
0
/**
 * Internal allocator.
 */
RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
                            const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
{
    /*
     * Sanity.
     */
    if (    RT_ALIGN_Z(RTALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE
        &&  RTALLOC_EFENCE_SIZE <= 0)
    {
        rtmemComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE);
        return NULL;
    }
    if (!cbUnaligned)
    {
#if 0
        rtmemComplain(pszOp, "Request of ZERO bytes allocation!\n");
        return NULL;
#else
        cbAligned = cbUnaligned = 1;
#endif
    }

#ifndef RTALLOC_EFENCE_IN_FRONT
    /* Alignment decreases fence accuracy, but this is at least partially
     * counteracted by filling and checking the alignment padding. When the
     * fence is in front then then no extra alignment is needed. */
    cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT);
#endif

#ifdef RTALLOC_EFENCE_TRACE
    /*
     * Allocate the trace block.
     */
    PRTMEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
    if (!pBlock)
    {
        rtmemComplain(pszOp, "Failed to allocate trace block!\n");
        return NULL;
    }
#endif

    /*
     * Allocate a block with page alignment space + the size of the E-fence.
     */
    size_t  cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE;
    void   *pvBlock = RTMemPageAlloc(cbBlock);
    if (pvBlock)
    {
        /*
         * Calc the start of the fence and the user block
         * and then change the page protection of the fence.
         */
#ifdef RTALLOC_EFENCE_IN_FRONT
        void *pvEFence = pvBlock;
        void *pv       = (char *)pvEFence + RTALLOC_EFENCE_SIZE;
# ifdef RTALLOC_EFENCE_NOMAN_FILLER
        memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned);
# endif
#else
        void *pvEFence = (char *)pvBlock + (cbBlock - RTALLOC_EFENCE_SIZE);
        void *pv       = (char *)pvEFence - cbAligned;
# ifdef RTALLOC_EFENCE_NOMAN_FILLER
        memset(pvBlock, RTALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned);
        memset((char *)pv + cbUnaligned, RTALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);
# endif
#endif

#ifdef RTALLOC_EFENCE_FENCE_FILLER
        memset(pvEFence, RTALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE);
#endif
        int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE);
        if (!rc)
        {
#ifdef RTALLOC_EFENCE_TRACE
            rtmemBlockInsert(pBlock, pv);
#endif
            if (enmType == RTMEMTYPE_RTMEMALLOCZ)
                memset(pv, 0, cbUnaligned);
#ifdef RTALLOC_EFENCE_FILLER
            else
                memset(pv, RTALLOC_EFENCE_FILLER, cbUnaligned);
#endif

            rtmemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
            return pv;
        }
        rtmemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);
        RTMemPageFree(pvBlock, cbBlock);
    }
    else
        rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned);

#ifdef RTALLOC_EFENCE_TRACE
    rtmemBlockFree(pBlock);
#endif
    return NULL;
}