static int __init alloc_node_page_cgroup(int nid) { struct page_cgroup *base, *pc; unsigned long table_size; unsigned long start_pfn, nr_pages, index; start_pfn = NODE_DATA(nid)->node_start_pfn; nr_pages = NODE_DATA(nid)->node_spanned_pages; if (!nr_pages) return 0; table_size = sizeof(struct page_cgroup) * nr_pages; base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!base) return -ENOMEM; for (index = 0; index < nr_pages; index++) { pc = base + index; __init_page_cgroup(pc, start_pfn + index); } NODE_DATA(nid)->node_page_cgroup = base; total_usage += table_size; return 0; }
/* __alloc_bootmem...() is protected by !slab_available() */ int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section; struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; section = __pfn_to_section(pfn); if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; if (slab_is_available()) { base = kmalloc_node(table_size, GFP_KERNEL, nid); if (!base) base = vmalloc_node(table_size, nid); } else { base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); } } else { /* * We don't have to allocate page_cgroup again, but * address of memmap may be changed. So, we have to initialize * again. */ base = section->page_cgroup + pfn; table_size = 0; /* check address of memmap is changed or not. */ if (base->page == pfn_to_page(pfn)) return 0; } if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; __init_page_cgroup(pc, pfn + index); } section = __pfn_to_section(pfn); section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
/* __alloc_bootmem...() is protected by !slab_available() */ static int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section = __pfn_to_section(pfn); struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; VM_BUG_ON(!slab_is_available()); if (node_state(nid, N_HIGH_MEMORY)) { base = kmalloc_node(table_size, GFP_KERNEL | __GFP_NOWARN, nid); if (!base) base = vmalloc_node(table_size, nid); } else { base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN); if (!base) base = vmalloc(table_size); } } else { /* * We don't have to allocate page_cgroup again, but * address of memmap may be changed. So, we have to initialize * again. */ base = section->page_cgroup + pfn; table_size = 0; /* check address of memmap is changed or not. */ if (base->page == pfn_to_page(pfn)) return 0; } if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; __init_page_cgroup(pc, pfn + index); } section->page_cgroup = base - pfn; total_usage += table_size; return 0; }