void* HGSMIHeapBufferAlloc (HGSMIHEAP *pHeap, HGSMISIZE cbBuffer) { void* pvBuf; if (!pHeap->fOffsetBased) pvBuf = RTHeapSimpleAlloc (pHeap->u.hPtr, cbBuffer, 0); else pvBuf = RTHeapOffsetAlloc (pHeap->u.hOff, cbBuffer, 0); if (!pvBuf) return NULL; ++pHeap->cRefs; return pvBuf; }
/** * OS specific allocation function. */ DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr) { PRTMEMHDR pHdr; IPRT_LINUX_SAVE_EFL_AC(); /* * Allocate. */ if (fFlags & RTMEMHDR_FLAG_EXEC) { if (fFlags & RTMEMHDR_FLAG_ANY_CTX) return VERR_NOT_SUPPORTED; #if defined(RT_ARCH_AMD64) # ifdef RTMEMALLOC_EXEC_HEAP if (g_HeapExec != NIL_RTHEAPSIMPLE) { RTSpinlockAcquire(g_HeapExecSpinlock); pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0); RTSpinlockRelease(g_HeapExecSpinlock); fFlags |= RTMEMHDR_FLAG_EXEC_HEAP; } else pHdr = NULL; # elif defined(RTMEMALLOC_EXEC_VM_AREA) pHdr = rtR0MemAllocExecVmArea(cb); fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA; # else /* !RTMEMALLOC_EXEC_HEAP */ # error "you don not want to go here..." pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC); # endif /* !RTMEMALLOC_EXEC_HEAP */ #elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE) pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC); #else pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr)); #endif } else { if ( #if 1 /* vmalloc has serious performance issues, avoid it. */ cb <= PAGE_SIZE*16 - sizeof(*pHdr) #else cb <= PAGE_SIZE #endif || (fFlags & RTMEMHDR_FLAG_ANY_CTX) ) { fFlags |= RTMEMHDR_FLAG_KMALLOC; pHdr = kmalloc(cb + sizeof(*pHdr), (fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC) ? (GFP_ATOMIC | __GFP_NOWARN) : (GFP_KERNEL | __GFP_NOWARN)); if (RT_UNLIKELY( !pHdr && cb > PAGE_SIZE && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) )) { fFlags &= ~RTMEMHDR_FLAG_KMALLOC; pHdr = vmalloc(cb + sizeof(*pHdr)); } } else pHdr = vmalloc(cb + sizeof(*pHdr)); } if (RT_UNLIKELY(!pHdr)) { IPRT_LINUX_RESTORE_EFL_AC(); return VERR_NO_MEMORY; } /* * Initialize. */ pHdr->u32Magic = RTMEMHDR_MAGIC; pHdr->fFlags = fFlags; pHdr->cb = cb; pHdr->cbReq = cb; *ppHdr = pHdr; IPRT_LINUX_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * Allocate memory from the heap. * * @returns Pointer to allocated memory. * @param pHeap Heap handle. * @param enmTag Statistics tag. Statistics are collected on a per tag * basis in addition to a global one. Thus we can easily * identify how memory is used by the VM. * @param cb Size of the block. * @param fZero Whether or not to zero the memory block. * @param pR0Ptr Where to return the ring-0 pointer. */ static void *mmR3UkHeapAlloc(PMMUKHEAP pHeap, MMTAG enmTag, size_t cb, bool fZero, PRTR0PTR pR0Ptr) { if (pR0Ptr) *pR0Ptr = NIL_RTR0PTR; RTCritSectEnter(&pHeap->Lock); #ifdef MMUKHEAP_WITH_STATISTICS /* * Find/alloc statistics nodes. */ pHeap->Stat.cAllocations++; PMMUKHEAPSTAT pStat = (PMMUKHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag); if (pStat) pStat->cAllocations++; else { pStat = (PMMUKHEAPSTAT)MMR3HeapAllocZU(pHeap->pUVM, MM_TAG_MM, sizeof(MMUKHEAPSTAT)); if (!pStat) { pHeap->Stat.cFailures++; AssertMsgFailed(("Failed to allocate heap stat record.\n")); RTCritSectLeave(&pHeap->Lock); return NULL; } pStat->Core.Key = (AVLULKEY)enmTag; RTAvlULInsert(&pHeap->pStatTree, &pStat->Core); pStat->cAllocations++; /* register the statistics */ PUVM pUVM = pHeap->pUVM; const char *pszTag = mmGetTagName(enmTag); STAMR3RegisterFU(pUVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/UkHeap/%s", pszTag); STAMR3RegisterFU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number or MMR3UkHeapAlloc() calls.", "/MM/UkHeap/%s/cAllocations", pszTag); STAMR3RegisterFU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3UkHeapRealloc() calls.", "/MM/UkHeap/%s/cReallocations", pszTag); STAMR3RegisterFU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3UkHeapFree() calls.", "/MM/UkHeap/%s/cFrees", pszTag); STAMR3RegisterFU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/UkHeap/%s/cFailures", pszTag); STAMR3RegisterFU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes allocated.", "/MM/UkHeap/%s/cbAllocated", pszTag); STAMR3RegisterFU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes freed.", "/MM/UkHeap/%s/cbFreed", pszTag); } #endif /* * Validate input. */ if (cb == 0) { #ifdef MMUKHEAP_WITH_STATISTICS pStat->cFailures++; pHeap->Stat.cFailures++; #endif RTCritSectLeave(&pHeap->Lock); return NULL; } /* * Allocate heap block. */ cb = RT_ALIGN_Z(cb, MMUKHEAP_SIZE_ALIGNMENT); void *pv = NULL; PMMUKHEAPSUB pSubHeapPrev = NULL; PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead; while (pSubHeap) { if (fZero) pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT); else pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT); if (pv) { /* Move the sub-heap with free memory to the head. */ if (pSubHeapPrev) { pSubHeapPrev->pNext = pSubHeap->pNext; pSubHeap->pNext = pHeap->pSubHeapHead; pHeap->pSubHeapHead = pSubHeap; } break; } pSubHeapPrev = pSubHeap; pSubHeap = pSubHeap->pNext; } if (RT_UNLIKELY(!pv)) { /* * Add another sub-heap. */ pSubHeap = mmR3UkHeapAddSubHeap(pHeap, RT_MAX(RT_ALIGN_Z(cb, PAGE_SIZE) + PAGE_SIZE * 16, _256K)); if (pSubHeap) { if (fZero) pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT); else pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT); } if (RT_UNLIKELY(!pv)) { AssertMsgFailed(("Failed to allocate heap block %d, enmTag=%x(%.4s).\n", cb, enmTag, &enmTag)); #ifdef MMUKHEAP_WITH_STATISTICS pStat->cFailures++; pHeap->Stat.cFailures++; #endif RTCritSectLeave(&pHeap->Lock); return NULL; } } /* * Update statistics */ #ifdef MMUKHEAP_WITH_STATISTICS size_t cbActual = RTHeapSimpleSize(pSubHeap->hSimple, pv); pStat->cbAllocated += cbActual; pStat->cbCurAllocated += cbActual; pHeap->Stat.cbAllocated += cbActual; pHeap->Stat.cbCurAllocated += cbActual; #endif if (pR0Ptr) *pR0Ptr = (uintptr_t)pv - (uintptr_t)pSubHeap->pv + pSubHeap->pvR0; RTCritSectLeave(&pHeap->Lock); return pv; }
int main(int argc, char *argv[]) { /* * Init runtime. */ RTTEST hTest; int rc = RTTestInitAndCreate("tstRTHeapSimple", &hTest); if (rc) return rc; RTTestBanner(hTest); /* * Create a heap. */ RTTestSub(hTest, "Basics"); static uint8_t s_abMem[128*1024]; RTHEAPSIMPLE Heap; RTTESTI_CHECK_RC(rc = RTHeapSimpleInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS); if (RT_FAILURE(rc)) return RTTestSummaryAndDestroy(hTest); /* * Try allocate. */ static struct TstHeapSimpleOps { size_t cb; unsigned uAlignment; void *pvAlloc; unsigned iFreeOrder; } s_aOps[] = { { 16, 0, NULL, 0 }, // 0 { 16, 4, NULL, 1 }, { 16, 8, NULL, 2 }, { 16, 16, NULL, 5 }, { 16, 32, NULL, 4 }, { 32, 0, NULL, 3 }, // 5 { 31, 0, NULL, 6 }, { 1024, 0, NULL, 8 }, { 1024, 32, NULL, 10 }, { 1024, 32, NULL, 12 }, { PAGE_SIZE, PAGE_SIZE, NULL, 13 }, // 10 { 1024, 32, NULL, 9 }, { PAGE_SIZE, 32, NULL, 11 }, { PAGE_SIZE, PAGE_SIZE, NULL, 14 }, { 16, 0, NULL, 15 }, { 9, 0, NULL, 7 }, // 15 { 16, 0, NULL, 7 }, { 36, 0, NULL, 7 }, { 16, 0, NULL, 7 }, { 12344, 0, NULL, 7 }, { 50, 0, NULL, 7 }, // 20 { 16, 0, NULL, 7 }, }; unsigned i; RTHeapSimpleDump(Heap, (PFNRTHEAPSIMPLEPRINTF)RTPrintf); /** @todo Add some detail info output with a signature identical to RTPrintf. */ size_t cbBefore = RTHeapSimpleGetFreeSize(Heap); static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; /* allocate */ for (i = 0; i < RT_ELEMENTS(s_aOps); i++) { s_aOps[i].pvAlloc = RTHeapSimpleAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment); RTTESTI_CHECK_MSG(s_aOps[i].pvAlloc, ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i)); if (!s_aOps[i].pvAlloc) return RTTestSummaryAndDestroy(hTest); memset(s_aOps[i].pvAlloc, szFill[i], s_aOps[i].cb); RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, (s_aOps[i].uAlignment ? s_aOps[i].uAlignment : 8)) == s_aOps[i].pvAlloc, ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> %p\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i)); if (!s_aOps[i].pvAlloc) return RTTestSummaryAndDestroy(hTest); } /* free and allocate the same node again. */ for (i = 0; i < RT_ELEMENTS(s_aOps); i++) { if (!s_aOps[i].pvAlloc) continue; //RTPrintf("debug: i=%d pv=%#x cb=%#zx align=%#zx cbReal=%#zx\n", i, s_aOps[i].pvAlloc, // s_aOps[i].cb, s_aOps[i].uAlignment, RTHeapSimpleSize(Heap, s_aOps[i].pvAlloc)); size_t cbBeforeSub = RTHeapSimpleGetFreeSize(Heap); RTHeapSimpleFree(Heap, s_aOps[i].pvAlloc); size_t cbAfterSubFree = RTHeapSimpleGetFreeSize(Heap); void *pv; pv = RTHeapSimpleAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment); RTTESTI_CHECK_MSG(pv, ("RTHeapSimpleAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i)); if (!pv) return RTTestSummaryAndDestroy(hTest); //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapSimpleSize(Heap, pv), // cbBeforeSub, cbAfterSubFree, RTHeapSimpleGetFreeSize(Heap)); if (pv != s_aOps[i].pvAlloc) RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: Free+Alloc returned different address. new=%p old=%p i=%d\n", pv, s_aOps[i].pvAlloc, i); s_aOps[i].pvAlloc = pv; size_t cbAfterSubAlloc = RTHeapSimpleGetFreeSize(Heap); if (cbBeforeSub != cbAfterSubAlloc) { RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx. i=%d\n", cbBeforeSub, cbAfterSubFree, cbAfterSubAlloc, i); //return 1; - won't work correctly until we start creating free block instead of donating memory on alignment. } } /* make a copy of the heap and the to-be-freed list. */ static uint8_t s_abMemCopy[sizeof(s_abMem)]; memcpy(s_abMemCopy, s_abMem, sizeof(s_abMem)); uintptr_t offDelta = (uintptr_t)&s_abMemCopy[0] - (uintptr_t)&s_abMem[0]; RTHEAPSIMPLE hHeapCopy = (RTHEAPSIMPLE)((uintptr_t)Heap + offDelta); static struct TstHeapSimpleOps s_aOpsCopy[RT_ELEMENTS(s_aOps)]; memcpy(&s_aOpsCopy[0], &s_aOps[0], sizeof(s_aOps)); /* free it in a specific order. */ int cFreed = 0; for (i = 0; i < RT_ELEMENTS(s_aOps); i++) { unsigned j; for (j = 0; j < RT_ELEMENTS(s_aOps); j++) { if ( s_aOps[j].iFreeOrder != i || !s_aOps[j].pvAlloc) continue; //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapSimpleGetFreeSize(Heap), s_aOps[j].cb, s_aOps[j].pvAlloc); RTHeapSimpleFree(Heap, s_aOps[j].pvAlloc); s_aOps[j].pvAlloc = NULL; cFreed++; } } RTTESTI_CHECK(cFreed == RT_ELEMENTS(s_aOps)); RTTestIPrintf(RTTESTLVL_ALWAYS, "i=done free=%d\n", RTHeapSimpleGetFreeSize(Heap)); /* check that we're back at the right amount of free memory. */ size_t cbAfter = RTHeapSimpleGetFreeSize(Heap); if (cbBefore != cbAfter) { RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: Either we've split out an alignment chunk at the start, or we've got\n" " an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter); RTHeapSimpleDump(Heap, (PFNRTHEAPSIMPLEPRINTF)RTPrintf); } /* relocate and free the bits in heap2 now. */ RTTestSub(hTest, "RTHeapSimpleRelocate"); rc = RTHeapSimpleRelocate(hHeapCopy, offDelta); RTTESTI_CHECK_RC(rc, VINF_SUCCESS); if (RT_SUCCESS(rc)) { /* free it in a specific order. */ int cFreed2 = 0; for (i = 0; i < RT_ELEMENTS(s_aOpsCopy); i++) { unsigned j; for (j = 0; j < RT_ELEMENTS(s_aOpsCopy); j++) { if ( s_aOpsCopy[j].iFreeOrder != i || !s_aOpsCopy[j].pvAlloc) continue; //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapSimpleGetFreeSize(hHeapCopy), s_aOpsCopy[j].cb, s_aOpsCopy[j].pvAlloc); RTHeapSimpleFree(hHeapCopy, (uint8_t *)s_aOpsCopy[j].pvAlloc + offDelta); s_aOpsCopy[j].pvAlloc = NULL; cFreed2++; } } RTTESTI_CHECK(cFreed2 == RT_ELEMENTS(s_aOpsCopy)); /* check that we're back at the right amount of free memory. */ size_t cbAfterCopy = RTHeapSimpleGetFreeSize(hHeapCopy); RTTESTI_CHECK_MSG(cbAfterCopy == cbAfter, ("cbAfterCopy=%zu cbAfter=%zu\n", cbAfterCopy, cbAfter)); } return RTTestSummaryAndDestroy(hTest); }