int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) { unsigned long end_pfn = start_pfn + nr_pages; unsigned long pfn; struct memory_block *mem_blk = NULL; int err = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *mem_sect; int ret; if (!present_section_nr(section_nr)) continue; mem_sect = __nr_to_section(section_nr); /* same memblock ? */ if (mem_blk) if ((section_nr >= mem_blk->start_section_nr) && (section_nr <= mem_blk->end_section_nr)) continue; mem_blk = find_memory_block_hinted(mem_sect, mem_blk); ret = register_mem_sect_under_node(mem_blk, nid); if (!err) err = ret; /* discard ref obtained in find_memory_block() */ } if (mem_blk) kobject_put(&mem_blk->dev.kobj); return err; }
static int link_mem_sections(int nid) { unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; unsigned long pfn; struct memory_block *mem_blk = NULL; int err = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *mem_sect; int ret; if (!present_section_nr(section_nr)) continue; mem_sect = __nr_to_section(section_nr); mem_blk = find_memory_block_hinted(mem_sect, mem_blk); ret = register_mem_sect_under_node(mem_blk, nid); if (!err) err = ret; /* discard ref obtained in find_memory_block() */ } if (mem_blk) kobject_put(&mem_blk->dev.kobj); return err; }
static int __init_refok init_section_page_cgroup(unsigned long pfn, int nid) { struct page_cgroup *base, *pc; struct mem_section *section; unsigned long table_size; unsigned long nr; int index; nr = pfn_to_section_nr(pfn); section = __nr_to_section(nr); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; VM_BUG_ON(!slab_is_available()); base = alloc_page_cgroup(table_size, nid); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; init_page_cgroup(pc, nr); } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
struct page *lookup_cgroup_page(struct page_cgroup *pc) { struct mem_section *section; struct page *page; unsigned long nr; nr = page_cgroup_array_id(pc); section = __nr_to_section(nr); page = pfn_to_page(pc - section->page_cgroup); VM_BUG_ON(pc != lookup_page_cgroup(page)); return page; }
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) { unsigned long section_nr; struct mem_section *mem_sect; struct memory_block *mem_block; section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); mem_sect = __nr_to_section(section_nr); mem_block = find_memory_block(mem_sect); return mem_block; }
/* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void sparse_init(void) { unsigned long pnum; struct page *map; for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { if (!valid_section_nr(pnum)) continue; map = sparse_early_mem_map_alloc(pnum); if (!map) continue; sparse_init_one_section(__nr_to_section(pnum), pnum, map); } }
/* * Although written for the SPARSEMEM_EXTREME case, this happens * to also work for the flat array case becase * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) { unsigned long root_nr; struct mem_section* root; for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); if (!root) continue; if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) break; } return (root_nr * SECTIONS_PER_ROOT) + (ms - root); }
/* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_MARKED_PRESENT; } }
/* * Initialize the sysfs support for memory devices... */ int __init memory_dev_init(void) { unsigned int i; int ret; int err; unsigned long block_sz; struct memory_block *mem = NULL; ret = subsys_system_register(&memory_subsys, NULL); if (ret) goto out; block_sz = get_memory_block_size(); sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; /* * Create entries for memory sections that were found * during boot and have been initialized */ for (i = 0; i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; /* don't need to reuse memory_block if only one per block */ err = add_memory_section(0, __nr_to_section(i), (sections_per_block == 1) ? NULL : &mem, MEM_ONLINE, BOOT); if (!ret) ret = err; } err = memory_probe_init(); if (!ret) ret = err; err = memory_fail_init(); if (!ret) ret = err; err = block_size_init(); if (!ret) ret = err; out: if (ret) printk(KERN_ERR "%s() failed: %d\n", __func__, ret); return ret; }
static struct page *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; map = alloc_bootmem_node(NODE_DATA(nid), sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); ms->section_mem_map = 0; return NULL; }
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) { struct page_cgroup *base, *pc; struct mem_section *section; unsigned long table_size; unsigned long nr; int index; nr = pfn_to_section_nr(pfn); section = __nr_to_section(nr); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; base = alloc_page_cgroup(table_size, nid); /* * The value stored in section->page_cgroup is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; init_page_cgroup(pc, nr); } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
static int add_memory_block(int base_section_nr) { struct memory_block *mem; int i, ret, section_count = 0, section_nr; for (i = base_section_nr; (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; if (section_count == 0) section_nr = i; section_count++; } if (section_count == 0) return 0; ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); if (ret) return ret; mem->section_count = section_count; return 0; }
ret = err; err = block_size_init(); if (!ret) ret = err; out: if (ret) printk(KERN_ERR "%s() failed: %d\n", __func__, ret); return ret; } ng boot and have been initialized */ for (i = 0; i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; /* don't need to reuse memory_block if only one per block */ err = add_memory_section(0, __nr_to_section(i), (sections_per_block == 1) ? NULL : &mem, MEM_ONLINE, BOOT); if (!ret) ret = err; } err = memory_probe_init(); if (!ret) ret = err; err = memory_fail_init(); if (!ret) ret = err; err = block_size_init(); if (!ret)