unsigned long __init setup_memory(void) { unsigned long bootmap_size; unsigned long min_pfn; int nid; mem_prof_t *mp; max_low_pfn = 0; min_low_pfn = -1; mem_prof_init(); for_each_online_node(nid) { mp = &mem_prof[nid]; NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; NODE_DATA(nid)->bdata = &node_bdata[nid]; min_pfn = mp->start_pfn; max_pfn = mp->start_pfn + mp->pages; bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, mp->start_pfn, max_pfn); free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), PFN_PHYS(mp->pages)); reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size); if (max_low_pfn < max_pfn) max_low_pfn = max_pfn; if (min_low_pfn > min_pfn) min_low_pfn = min_pfn; } #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0; initrd_end = initrd_start + INITRD_SIZE; printk("initrd:start[%08lx],size[%08lx]\n", initrd_start, INITRD_SIZE); } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, PFN_PHYS(max_low_pfn)); initrd_start = 0; } } #endif /* CONFIG_BLK_DEV_INITRD */ return max_low_pfn; }
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) { unsigned long bootmap_pages; unsigned long start_pfn, end_pfn; unsigned long bootmem_paddr; /* Don't allow bogus node assignment */ BUG_ON(nid > MAX_NUMNODES || nid <= 0); start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; pmb_bolt_mapping((unsigned long)__va(start), start, end - start, PAGE_KERNEL); lmb_add(start, end - start); __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), SMP_CACHE_BYTES, end)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, PAGE_SIZE, end); init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); /* Reserve the pgdat and bootmap space with the bootmem allocator */ reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, sizeof(struct pglist_data), BOOTMEM_DEFAULT); reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); /* It's up */ node_set_online(nid); /* Kick sparsemem */ sparse_memory_present_with_active_regions(nid); }
void __init reserve_bootmem_generic(unsigned long phys, unsigned len) { #ifdef CONFIG_NUMA int nid = phys_to_nid(phys); #endif unsigned long pfn = phys >> PAGE_SHIFT; if (pfn >= end_pfn) { /* This can happen with kdump kernels when accessing firmware tables. */ if (pfn < end_pfn_map) return; printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", phys, len); return; } /* Should check here against the e820 map to avoid double free */ #ifdef CONFIG_NUMA reserve_bootmem_node(NODE_DATA(nid), phys, len); #else reserve_bootmem(phys, len); #endif if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { dma_reserve += len / PAGE_SIZE; set_dma_reserve(dma_reserve); } }
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) { unsigned long bootmap_pages, bootmap_start, bootmap_size; unsigned long start_pfn, free_pfn, end_pfn; /* Don't allow bogus node assignment */ BUG_ON(nid > MAX_NUMNODES || nid == 0); /* * The free pfn starts at the beginning of the range, and is * advanced as necessary for pgdat and node map allocations. */ free_pfn = start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ NODE_DATA(nid) = pfn_to_kaddr(free_pfn); free_pfn += PFN_UP(sizeof(struct pglist_data)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn); bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); /* Reserve the pgdat and bootmap space with the bootmem allocator */ reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, sizeof(struct pglist_data), BOOTMEM_DEFAULT); reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT, bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); /* It's up */ node_set_online(nid); /* Kick sparsemem */ sparse_memory_present_with_active_regions(nid); }
static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; int i; /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); pgdat = NODE_DATA(0); init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; if (!bank->highmem) free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); } /* * Reserve the memblock reserved regions in bootmem. */ for (i = 0; i < memblock.reserved.cnt; i++) { phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); if (start >= start_pfn && memblock_end_pfn(&memblock.reserved, i) <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); } }
void __init setup_arch(char **cmdline_p) { #if defined(CONFIG_SH_GENERIC) || defined(CONFIG_SH_UNKNOWN) extern struct sh_machine_vector mv_unknown; #endif struct sh_machine_vector *mv = NULL; char mv_name[MV_NAME_SIZE] = ""; unsigned long mv_io_base = 0; int mv_mmio_enable = 0; unsigned long bootmap_size; unsigned long start_pfn, max_pfn, max_low_pfn; #ifdef CONFIG_SH_EARLY_PRINTK sh_console_init(); #endif ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long)&_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; code_resource.start = virt_to_bus(&_text); code_resource.end = virt_to_bus(&_etext)-1; data_resource.start = virt_to_bus(&_etext); data_resource.end = virt_to_bus(&_edata)-1; parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base, &mv_mmio_enable); #ifdef CONFIG_CMDLINE_BOOL sprintf(*cmdline_p, CONFIG_CMDLINE); #endif #ifdef CONFIG_SH_GENERIC if (mv == NULL) { mv = &mv_unknown; if (*mv_name != '\0') { printk("Warning: Unsupported machine %s, using unknown\n", mv_name); } } sh_mv = *mv; #endif #ifdef CONFIG_SH_UNKNOWN sh_mv = mv_unknown; #endif #if defined(CONFIG_SH_GENERIC) || defined(CONFIG_SH_UNKNOWN) if (mv_io_base != 0) { sh_mv.mv_inb = generic_inb; sh_mv.mv_inw = generic_inw; sh_mv.mv_inl = generic_inl; sh_mv.mv_outb = generic_outb; sh_mv.mv_outw = generic_outw; sh_mv.mv_outl = generic_outl; sh_mv.mv_inb_p = generic_inb_p; sh_mv.mv_inw_p = generic_inw_p; sh_mv.mv_inl_p = generic_inl_p; sh_mv.mv_outb_p = generic_outb_p; sh_mv.mv_outw_p = generic_outw_p; sh_mv.mv_outl_p = generic_outl_p; sh_mv.mv_insb = generic_insb; sh_mv.mv_insw = generic_insw; sh_mv.mv_insl = generic_insl; sh_mv.mv_outsb = generic_outsb; sh_mv.mv_outsw = generic_outsw; sh_mv.mv_outsl = generic_outsl; sh_mv.mv_isa_port2addr = generic_isa_port2addr; generic_io_base = mv_io_base; } if (mv_mmio_enable != 0) { sh_mv.mv_readb = generic_readb; sh_mv.mv_readw = generic_readw; sh_mv.mv_readl = generic_readl; sh_mv.mv_writeb = generic_writeb; sh_mv.mv_writew = generic_writew; sh_mv.mv_writel = generic_writel; } #endif #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) #ifdef CONFIG_DISCONTIGMEM NODE_DATA(0)->bdata = &discontig_node_bdata[0]; NODE_DATA(1)->bdata = &discontig_node_bdata[1]; bootmap_size = init_bootmem_node(NODE_DATA(1), PFN_UP(__MEMORY_START_2ND), PFN_UP(__MEMORY_START_2ND), PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND)); free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND); reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size); #endif /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(&_end)); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, __MEMORY_START>>PAGE_SHIFT, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ { unsigned long curr_pfn, last_pfn, pages; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(__MEMORY_START); /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(__pa(memory_end)); if (last_pfn > max_low_pfn) last_pfn = max_low_pfn; pages = last_pfn - curr_pfn; free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #if 0 /* * Request the standard RAM and ROM resources - * they eat up PCI memory space */ request_resource(&iomem_resource, ram_resources+0); request_resource(&iomem_resource, ram_resources+1); request_resource(&iomem_resource, ram_resources+2); request_resource(ram_resources+1, &code_resource); request_resource(ram_resources+1, &data_resource); probe_roms(); /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, standard_io_resources+i); #endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif /* Perform the machine specific initialisation */ if (sh_mv.mv_init_arch != NULL) { sh_mv.mv_init_arch(); } #if defined(__SH4__) init_task.used_math = 0; init_task.flags &= ~PF_USEDFPU; #endif #ifdef CONFIG_UBC_WAKEUP /* * Some brain-damaged loaders decided it would be a good idea to put * the UBC to sleep. This causes some issues when it comes to things * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. * we wake it up and hope that all is well. */ ubc_wakeup(); #endif paging_init(); }
void __init setup_arch(char **cmdline_p) { unsigned long bootmap_size; unsigned long start_pfn, max_pfn, max_low_pfn; #ifdef CONFIG_EARLY_PRINTK extern void enable_early_printk(void); enable_early_printk(); #endif #ifdef CONFIG_CMDLINE_BOOL strcpy(COMMAND_LINE, CONFIG_CMDLINE); #endif ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_bus(_text); code_resource.end = virt_to_bus(_etext)-1; data_resource.start = virt_to_bus(_etext); data_resource.end = virt_to_bus(_edata)-1; sh_mv_setup(cmdline_p); #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, __MEMORY_START>>PAGE_SHIFT, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ { unsigned long curr_pfn, last_pfn, pages; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(__MEMORY_START); /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(__pa(memory_end)); if (last_pfn > max_low_pfn) last_pfn = max_low_pfn; pages = last_pfn - curr_pfn; free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { LOADER_TYPE = 1; INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; } if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ platform_setup(); paging_init(); }
void __init do_init_bootmem(void) { int nid; min_low_pfn = 0; max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; if (parse_numa_properties()) setup_nonnuma(); for (nid = 0; nid < numnodes; nid++) { unsigned long start_paddr, end_paddr; int i; unsigned long bootmem_paddr; unsigned long bootmap_pages; if (node_data[nid].node_spanned_pages == 0) continue; start_paddr = node_data[nid].node_start_pfn * PAGE_SIZE; end_paddr = start_paddr + (node_data[nid].node_spanned_pages * PAGE_SIZE); dbg("node %d\n", nid); dbg("start_paddr = %lx\n", start_paddr); dbg("end_paddr = %lx\n", end_paddr); NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT); dbg("bootmap_pages = %lx\n", bootmap_pages); bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, PAGE_SIZE, end_paddr); dbg("bootmap_paddr = %lx\n", bootmem_paddr); init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, start_paddr >> PAGE_SHIFT, end_paddr >> PAGE_SHIFT); for (i = 0; i < lmb.memory.cnt; i++) { unsigned long physbase, size; unsigned long type = lmb.memory.region[i].type; if (type != LMB_MEMORY_AREA) continue; physbase = lmb.memory.region[i].physbase; size = lmb.memory.region[i].size; if (physbase < end_paddr && (physbase+size) > start_paddr) { /* overlaps */ if (physbase < start_paddr) { size -= start_paddr - physbase; physbase = start_paddr; } if (size > end_paddr - start_paddr) size = end_paddr - start_paddr; dbg("free_bootmem %lx %lx\n", physbase, size); free_bootmem_node(NODE_DATA(nid), physbase, size); } } for (i = 0; i < lmb.reserved.cnt; i++) { unsigned long physbase = lmb.reserved.region[i].physbase; unsigned long size = lmb.reserved.region[i].size; if (physbase < end_paddr && (physbase+size) > start_paddr) { /* overlaps */ if (physbase < start_paddr) { size -= start_paddr - physbase; physbase = start_paddr; } if (size > end_paddr - start_paddr) size = end_paddr - start_paddr; dbg("reserve_bootmem %lx %lx\n", physbase, size); reserve_bootmem_node(NODE_DATA(nid), physbase, size); } } } }