/** * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu * @cpu: cpu to allocate for * @size: size allocation in bytes * @align: alignment * * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper * does the right thing for NUMA regardless of the current * configuration. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, unsigned long align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NEED_MULTIPLE_NODES int node = early_cpu_to_node(cpu); void *ptr; if (!node_online(node) || !NODE_DATA(node)) { ptr = __alloc_bootmem_nopanic(size, align, goal); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", cpu, size, __pa(ptr)); } else { ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), size, align, goal); pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", cpu, size, node, __pa(ptr)); } return ptr; #else return __alloc_bootmem_nopanic(size, align, goal); #endif }
static int __init alloc_node_page_cgroup(int nid) { struct page_cgroup *base, *pc; unsigned long table_size; unsigned long start_pfn, nr_pages, index; start_pfn = NODE_DATA(nid)->node_start_pfn; nr_pages = NODE_DATA(nid)->node_spanned_pages; if (!nr_pages) return 0; table_size = sizeof(struct page_cgroup) * nr_pages; base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!base) return -ENOMEM; for (index = 0; index < nr_pages; index++) { pc = base + index; __init_page_cgroup(pc, start_pfn + index); } NODE_DATA(nid)->node_page_cgroup = base; total_usage += table_size; return 0; }
/* __alloc_bootmem...() is protected by !slab_available() */ int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section; struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; section = __pfn_to_section(pfn); if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; if (slab_is_available()) { base = kmalloc_node(table_size, GFP_KERNEL, nid); if (!base) base = vmalloc_node(table_size, nid); } else { base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); } } else { /* * We don't have to allocate page_cgroup again, but * address of memmap may be changed. So, we have to initialize * again. */ base = section->page_cgroup + pfn; table_size = 0; /* check address of memmap is changed or not. */ if (base->page == pfn_to_page(pfn)) return 0; } if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; __init_page_cgroup(pc, pfn + index); } section = __pfn_to_section(pfn); section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
static int __init alloc_node_page_cgroup(int nid) { struct page_cgroup *base; unsigned long table_size; unsigned long nr_pages; nr_pages = NODE_DATA(nid)->node_spanned_pages; if (!nr_pages) return 0; table_size = sizeof(struct page_cgroup) * nr_pages; base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!base) return -ENOMEM; NODE_DATA(nid)->node_page_cgroup = base; total_usage += table_size; return 0; }