/* * need an interface for the VM to add new memory regions, * but without onlining it. */ int register_new_memory(int nid, struct mem_section *section) { int ret = 0; struct memory_block *mem; if (is_zone_device_section(section)) return 0; mutex_lock(&mem_sysfs_mutex); mem = find_memory_block(section); if (mem) { mem->section_count++; put_device(&mem->dev); } else { ret = init_memory_block(&mem, section, MEM_OFFLINE); if (ret) goto out; mem->section_count++; } if (mem->section_count == sections_per_block) ret = register_mem_sect_under_node(mem, nid); out: mutex_unlock(&mem_sysfs_mutex); return ret; }
int main() { init_memory_block(); int choice ; while(1) { choice = take_user_input(); if (5 == choice) { break; } execute_choice(choice); } return 0; }
static int add_memory_section(int nid, struct mem_section *section, struct memory_block **mem_p, unsigned long state, enum mem_add_context context) { struct memory_block *mem = NULL; int scn_nr = __section_nr(section); int ret = 0; mutex_lock(&mem_sysfs_mutex); if (context == BOOT) { /* same memory block ? */ if (mem_p && *mem_p) if (scn_nr >= (*mem_p)->start_section_nr && scn_nr <= (*mem_p)->end_section_nr) { mem = *mem_p; kobject_get(&mem->dev.kobj); } } else mem = find_memory_block(section); if (mem) { mem->section_count++; kobject_put(&mem->dev.kobj); } else { ret = init_memory_block(&mem, section, state); /* store memory_block pointer for next loop */ if (!ret && context == BOOT) if (mem_p) *mem_p = mem; } if (!ret) { if (context == HOTPLUG && mem->section_count == sections_per_block) ret = register_mem_sect_under_node(mem, nid); } mutex_unlock(&mem_sysfs_mutex); return ret; }
static int add_memory_block(int base_section_nr) { struct memory_block *mem; int i, ret, section_count = 0, section_nr; for (i = base_section_nr; (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; if (section_count == 0) section_nr = i; section_count++; } if (section_count == 0) return 0; ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); if (ret) return ret; mem->section_count = section_count; return 0; }
static int add_memory_section(int nid, struct mem_section *section, unsigned long state, enum mem_add_context context) { struct memory_block *mem; int ret = 0; mutex_lock(&mem_sysfs_mutex); mem = find_memory_block(section); if (mem) { mem->section_count++; kobject_put(&mem->dev.kobj); } else ret = init_memory_block(&mem, section, state); if (!ret) { if (context == HOTPLUG && mem->section_count == sections_per_block) ret = register_mem_sect_under_node(mem, nid); } mutex_unlock(&mem_sysfs_mutex); return ret; }