int arch_add_memory(int nid, u64 start, u64 size, bool for_device) { struct pglist_data *pgdata; struct zone *zone; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int rc; resize_hpt_for_hotplug(memblock_phys_mem_size()); pgdata = NODE_DATA(nid); start = (unsigned long)__va(start); rc = create_section_mapping(start, start + size); if (rc) { pr_warning( "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", start, start + size, rc); return -EFAULT; } /* this should work for most non-highmem platforms */ zone = pgdata->node_zones + zone_for_memory(nid, start, size, 0, for_device); return __add_pages(nid, zone, start_pfn, nr_pages); }
/* * This works only for the non-NUMA case. Later, we'll need a lookup * to convert from real physical addresses to nid, that doesn't use * pfn_to_nid(). */ int __devinit add_memory(u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(0); struct zone *zone; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; start += KERNELBASE; create_section_mapping(start, start + size); /* this should work for most non-highmem platforms */ zone = pgdata->node_zones; return __add_pages(zone, start_pfn, nr_pages); return 0; }
int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata; struct zone *zone; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; pgdata = NODE_DATA(nid); start = (unsigned long)__va(start); if (create_section_mapping(start, start + size)) return -EINVAL; /* this should work for most non-highmem platforms */ zone = pgdata->node_zones; return __add_pages(nid, zone, start_pfn, nr_pages); }
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, bool want_memblock) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int rc; resize_hpt_for_hotplug(memblock_phys_mem_size()); start = (unsigned long)__va(start); rc = create_section_mapping(start, start + size, nid); if (rc) { pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", start, start + size, rc); return -EFAULT; } flush_inval_dcache_range(start, start + size); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); }