void __init initmem_init(void) { x86_numa_init(); #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", max_low_pfn, highstart_pfn); printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", (ulong) pfn_to_kaddr(max_low_pfn)); printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); __vmalloc_start_set = true; setup_bootmem_allocator(); }
static void __init setup_memory(void) { unsigned long start_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); setup_bootmem_allocator(start_pfn); }
/* * On SH machines the conventional approach is to stash system RAM * in node 0, and other memory blocks in to node 1 and up, ordered by * latency. Each node's pgdat is node-local at the beginning of the node, * immediately followed by the node mem map. */ void __init setup_memory(void) { unsigned long free_pfn = PFN_UP(__pa(_end)); /* * Node 0 sets up its pgdat at the first available pfn, * and bumps it up before setting up the bootmem allocator. */ NODE_DATA(0) = pfn_to_kaddr(free_pfn); memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); free_pfn += PFN_UP(sizeof(struct pglist_data)); NODE_DATA(0)->bdata = &bootmem_node_data[0]; /* Set up node 0 */ setup_bootmem_allocator(free_pfn); /* Give the platforms a chance to hook up their nodes */ plat_mem_setup(); }
/* * On SH machines the conventional approach is to stash system RAM * in node 0, and other memory blocks in to node 1 and up, ordered by * latency. Each node's pgdat is node-local at the beginning of the node, * immediately followed by the node mem map. */ void __init setup_memory(void) { unsigned long free_pfn = PFN_UP(__pa(_end)); u64 base = min_low_pfn << PAGE_SHIFT; u64 size = (max_low_pfn << PAGE_SHIFT) - base; lmb_add(base, size); /* Reserve the LMB regions used by the kernel, initrd, etc.. */ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); /* * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. */ if (CONFIG_ZERO_PAGE_OFFSET != 0) lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); lmb_analyze(); lmb_dump_all(); /* * Node 0 sets up its pgdat at the first available pfn, * and bumps it up before setting up the bootmem allocator. */ NODE_DATA(0) = pfn_to_kaddr(free_pfn); memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); free_pfn += PFN_UP(sizeof(struct pglist_data)); NODE_DATA(0)->bdata = &bootmem_node_data[0]; /* Set up node 0 */ setup_bootmem_allocator(free_pfn); /* Give the platforms a chance to hook up their nodes */ plat_mem_setup(); }