/* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will * call this function after deciding the zone to which to * add the new pages. */ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int err = 0; int start_sec, end_sec; /* during initialize mem_map, align hot-added range to section */ start_sec = pfn_to_section_nr(phys_start_pfn); end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { err = __add_section(zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision * check. see add_memory() => register_memory_resource() * Warning will be printed if there is collision. */ if (err && (err != -EEXIST)) break; err = 0; } return err; }
static int link_mem_sections(int nid) { unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; unsigned long pfn; struct memory_block *mem_blk = NULL; int err = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *mem_sect; int ret; if (!present_section_nr(section_nr)) continue; mem_sect = __nr_to_section(section_nr); mem_blk = find_memory_block_hinted(mem_sect, mem_blk); ret = register_mem_sect_under_node(mem_blk, nid); if (!err) err = ret; /* discard ref obtained in find_memory_block() */ } if (mem_blk) kobject_put(&mem_blk->dev.kobj); return err; }
/* * The probe routines leave the pages reserved, just as the bootmem code does. * Make sure they're still that way. */ static bool pages_correctly_reserved(unsigned long start_pfn, unsigned long nr_pages) { int i, j; struct page *page; unsigned long pfn = start_pfn; /* * memmap between sections is not contiguous except with * SPARSEMEM_VMEMMAP. We lookup the page once per section * and assume memmap is contiguous within each section */ for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { if (WARN_ON_ONCE(!pfn_valid(pfn))) return false; page = pfn_to_page(pfn); for (j = 0; j < PAGES_PER_SECTION; j++) { if (PageReserved(page + j)) continue; printk(KERN_WARNING "section number %ld page number %d " "not reserved, was it already online?\n", pfn_to_section_nr(pfn), j); return false; } } return true; }
static int __init_refok init_section_page_cgroup(unsigned long pfn, int nid) { struct page_cgroup *base, *pc; struct mem_section *section; unsigned long table_size; unsigned long nr; int index; nr = pfn_to_section_nr(pfn); section = __nr_to_section(nr); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; VM_BUG_ON(!slab_is_available()); base = alloc_page_cgroup(table_size, nid); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; init_page_cgroup(pc, nr); } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) { unsigned long end_pfn = start_pfn + nr_pages; unsigned long pfn; struct memory_block *mem_blk = NULL; int err = 0; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *mem_sect; int ret; if (!present_section_nr(section_nr)) continue; mem_sect = __nr_to_section(section_nr); /* same memblock ? */ if (mem_blk) if ((section_nr >= mem_blk->start_section_nr) && (section_nr <= mem_blk->end_section_nr)) continue; mem_blk = find_memory_block_hinted(mem_sect, mem_blk); ret = register_mem_sect_under_node(mem_blk, nid); if (!err) err = ret; /* discard ref obtained in find_memory_block() */ } if (mem_blk) kobject_put(&mem_blk->dev.kobj); return err; }
/* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; unsigned long flags; int ret; /* * no locking for this, because it does its own * plus, it does a kmalloc */ sparse_index_init(section_nr, pgdat->node_id); memmap = __kmalloc_section_memmap(nr_pages); pgdat_resize_lock(pgdat, &flags); ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) { ret = -EEXIST; goto out; } ms->section_mem_map |= SECTION_MARKED_PRESENT; ret = sparse_init_one_section(ms, section_nr, memmap); out: pgdat_resize_unlock(pgdat, &flags); if (ret <= 0) __kfree_section_memmap(memmap, nr_pages); return ret; }
/* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); if (!mem_section[section].section_mem_map) mem_section[section].section_mem_map = SECTION_MARKED_PRESENT; } }
/* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) { struct mem_section *ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) return -EEXIST; ms->section_mem_map |= SECTION_MARKED_PRESENT; return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); }
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) { unsigned long section_nr; struct mem_section *mem_sect; struct memory_block *mem_block; section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); mem_sect = __nr_to_section(section_nr); mem_block = find_memory_block(mem_sect); return mem_block; }
/* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_MARKED_PRESENT; } }
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) { struct page_cgroup *base, *pc; struct mem_section *section; unsigned long table_size; unsigned long nr; int index; nr = pfn_to_section_nr(pfn); section = __nr_to_section(nr); if (section->page_cgroup) return 0; table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; base = alloc_page_cgroup(table_size, nid); /* * The value stored in section->page_cgroup is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; init_page_cgroup(pc, nr); } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_cgroup = base - pfn; total_usage += table_size; return 0; }