struct page *alloc_pages(int order) { lock(&memory_lock); struct page* result = __alloc_pages(order, NT_HIGH); unlock(&memory_lock); return result; }
struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order) { #ifdef CONFIG_NUMA return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order); #else return alloc_pages(gfp_mask, order); #endif }
struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order) { #ifdef CONFIG_NUMA return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK)); #else return alloc_pages(gfp_mask, order); #endif }
static struct page *alloc_page_table(pte_t flags) { struct page *page = (flags & PTE_LOW) ? __alloc_pages(0, NT_LOW) : alloc_pages(0); if (page) { memset(va(page_paddr(page)), 0, PAGE_SIZE); page->u.refcount = 0; } return page; }
static struct page * alloc_pages_pgdat(pg_data_t *pgdat, int gfp_mask, unsigned long order) { return __alloc_pages(pgdat->node_zonelists + gfp_mask, order); }
/** * @brief Allocate n continuous pages. * * @return * NULL if n contiguous pages could not be found * 0 otherwise */ struct page *alloc_pages(unsigned long n) { TRACE("n=%d", n); ASSERT_NOTEQUALS(n, 0); return __alloc_pages(n, zones); }
T* alloc_pages(unsigned nr_pages) { return reinterpret_cast<T*>(__alloc_pages(nr_pages)); }
static struct page * alloc_pages_pgdat(pg_data_t *pgdat, unsigned int gfp_mask, unsigned int order) { return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK)); }