/** * Allocates physical contiguous memory (below 4GB). * The allocation is page aligned and the content is undefined. * * @returns Pointer to the memory block. This is page aligned. * @param pPhys Where to store the physical address. * @param cb The allocation size in bytes. This is always * rounded up to PAGE_SIZE. */ RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb) { int cOrder; unsigned cPages; struct page *paPages; void *pvRet; IPRT_LINUX_SAVE_EFL_AC(); /* * validate input. */ Assert(VALID_PTR(pPhys)); Assert(cb > 0); /* * Allocate page pointer array. */ cb = RT_ALIGN_Z(cb, PAGE_SIZE); cPages = cb >> PAGE_SHIFT; cOrder = CalcPowerOf2Order(cPages); #if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32) /* ZONE_DMA32: 0-4GB */ paPages = alloc_pages(GFP_DMA32 | __GFP_NOWARN, cOrder); if (!paPages) #endif #ifdef RT_ARCH_AMD64 /* ZONE_DMA; 0-16MB */ paPages = alloc_pages(GFP_DMA | __GFP_NOWARN, cOrder); #else /* ZONE_NORMAL: 0-896MB */ paPages = alloc_pages(GFP_USER | __GFP_NOWARN, cOrder); #endif if (paPages) { /* * Reserve the pages and mark them executable. */ unsigned iPage; for (iPage = 0; iPage < cPages; iPage++) { Assert(!PageHighMem(&paPages[iPage])); if (iPage + 1 < cPages) { AssertMsg( (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage])) + PAGE_SIZE == (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage + 1])) && page_to_phys(&paPages[iPage]) + PAGE_SIZE == page_to_phys(&paPages[iPage + 1]), ("iPage=%i cPages=%u [0]=%#llx,%p [1]=%#llx,%p\n", iPage, cPages, (long long)page_to_phys(&paPages[iPage]), phys_to_virt(page_to_phys(&paPages[iPage])), (long long)page_to_phys(&paPages[iPage + 1]), phys_to_virt(page_to_phys(&paPages[iPage + 1])) )); } SetPageReserved(&paPages[iPage]); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */ MY_SET_PAGES_EXEC(&paPages[iPage], 1); #endif } *pPhys = page_to_phys(paPages); pvRet = phys_to_virt(page_to_phys(paPages)); } else pvRet = NULL; IPRT_LINUX_RESTORE_EFL_AC(); return pvRet; }
/** * Internal worker that allocates physical pages and creates the memory object for them. * * @returns IPRT status code. * @param ppMemLnx Where to store the memory object pointer. * @param enmType The object type. * @param cb The number of bytes to allocate. * @param uAlignment The alignment of the phyiscal memory. * Only valid if fContiguous == true, ignored otherwise. * @param fFlagsLnx The page allocation flags (GPFs). * @param fContiguous Whether the allocation must be contiguous. */ static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb, size_t uAlignment, unsigned fFlagsLnx, bool fContiguous) { size_t iPage; size_t const cPages = cb >> PAGE_SHIFT; struct page *paPages; /* * Allocate a memory object structure that's large enough to contain * the page pointer array. */ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb); if (!pMemLnx) return VERR_NO_MEMORY; pMemLnx->cPages = cPages; /* * Allocate the pages. * For small allocations we'll try contiguous first and then fall back on page by page. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22) if ( fContiguous || cb <= PAGE_SIZE * 2) { # ifdef VBOX_USE_INSERT_PAGE paPages = alloc_pages(fFlagsLnx | __GFP_COMP, rtR0MemObjLinuxOrder(cPages)); # else paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages)); # endif if (paPages) { fContiguous = true; for (iPage = 0; iPage < cPages; iPage++) pMemLnx->apPages[iPage] = &paPages[iPage]; } else if (fContiguous) { rtR0MemObjDelete(&pMemLnx->Core); return VERR_NO_MEMORY; } } if (!fContiguous) { for (iPage = 0; iPage < cPages; iPage++) { pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx); if (RT_UNLIKELY(!pMemLnx->apPages[iPage])) { while (iPage-- > 0) __free_page(pMemLnx->apPages[iPage]); rtR0MemObjDelete(&pMemLnx->Core); return VERR_NO_MEMORY; } } } #else /* < 2.4.22 */ /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */ paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages)); if (!paPages) { rtR0MemObjDelete(&pMemLnx->Core); return VERR_NO_MEMORY; } for (iPage = 0; iPage < cPages; iPage++) { pMemLnx->apPages[iPage] = &paPages[iPage]; MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1); if (PageHighMem(pMemLnx->apPages[iPage])) BUG(); } fContiguous = true; #endif /* < 2.4.22 */ pMemLnx->fContiguous = fContiguous; /* * Reserve the pages. */ for (iPage = 0; iPage < cPages; iPage++) SetPageReserved(pMemLnx->apPages[iPage]); /* * Note that the physical address of memory allocated with alloc_pages(flags, order) * is always 2^(PAGE_SHIFT+order)-aligned. */ if ( fContiguous && uAlignment > PAGE_SIZE) { /* * Check for alignment constraints. The physical address of memory allocated with * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned. */ if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & ~(uAlignment - 1))) { /* * This should never happen! */ printk("rtR0MemObjLinuxAllocPages(cb=%ld, uAlignment=%ld): alloc_pages(..., %d) returned physical memory at %lu!\n", (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0])); rtR0MemObjLinuxFreePages(pMemLnx); return VERR_NO_MEMORY; } } *ppMemLnx = pMemLnx; return VINF_SUCCESS; }