/* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; unsigned long flags; int ret; /* * no locking for this, because it does its own * plus, it does a kmalloc */ sparse_index_init(section_nr, pgdat->node_id); memmap = __kmalloc_section_memmap(nr_pages); pgdat_resize_lock(pgdat, &flags); ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) { ret = -EEXIST; goto out; } ms->section_mem_map |= SECTION_MARKED_PRESENT; ret = sparse_init_one_section(ms, section_nr, memmap); out: pgdat_resize_unlock(pgdat, &flags); if (ret <= 0) __kfree_section_memmap(memmap, nr_pages); return ret; }
/* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) { struct mem_section *ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) return -EEXIST; ms->section_mem_map |= SECTION_MARKED_PRESENT; return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); }
/* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void sparse_init(void) { unsigned long pnum; struct page *map; for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { if (!valid_section_nr(pnum)) continue; map = sparse_early_mem_map_alloc(pnum); if (map) sparse_init_one_section(&mem_section[pnum], pnum, map); } }