unsigned long __init setup_memory(void) { unsigned long bootmap_size; unsigned long min_pfn; int nid; mem_prof_t *mp; max_low_pfn = 0; min_low_pfn = -1; mem_prof_init(); for_each_online_node(nid) { mp = &mem_prof[nid]; NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; min_pfn = mp->start_pfn; max_pfn = mp->start_pfn + mp->pages; bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, mp->start_pfn, max_pfn); free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), PFN_PHYS(mp->pages)); reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size, BOOTMEM_DEFAULT); if (max_low_pfn < max_pfn) max_low_pfn = max_pfn; if (min_low_pfn > min_pfn) min_low_pfn = min_pfn; } #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START, INITRD_SIZE, BOOTMEM_DEFAULT); initrd_start = INITRD_START + PAGE_OFFSET; initrd_end = initrd_start + INITRD_SIZE; printk("initrd:start[%08lx],size[%08lx]\n", initrd_start, INITRD_SIZE); } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08llx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, (unsigned long long)PFN_PHYS(max_low_pfn)); initrd_start = 0; } } #endif /* CONFIG_BLK_DEV_INITRD */ return max_low_pfn; }
// start_pfn : 뱅크 0 시작 주소의 물리 small page 번호 (0x20000) // end_pfn : 뱅크 0의 마지막 주소의 물리 small page 번호 (0x4f800) static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; // pg_data_t : struct pglist_data /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); // boot_pages : 6 // start_pfn ~ end_pfn 까지를 bitmap으로 바꿨을 때 총 프레임의 갯수가 반환됨. // boot_pages << PAGE_SHIFT : 0x6000, L1_CACHE_BYTES : 64, _pfn_to_phys(end_pfn) : 0x4F800000 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); // non-reserved 영역에서 비트맵용 영역을 만들어 가져옴 // 영역의 시작 주소가 반환됨(물리) /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); // 비어 있는 함수임 pgdat = NODE_DATA(0); //*pgdat = contig_page_data // // .bdata = &bootmem_node_data[0] // 현재는 0으로 되어 있는 값임 // init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); // bdata_list 에 등록 // bitmap 값을 0xFF로 초기화 /* Free the lowmem regions from memblock into bootmem. */ for_each_memblock(memory, reg) { // for (reg = memblock.memory.regions; reg < (memblock.memory.regions + memblock.memory.cnt), reg++) unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); // start : 0x20000 // end : 0xA0000 if (end >= end_pfn) end = end_pfn; if (start >= end) break; // start : 0x20000000, (end - start) << PAGE_SHIFT : 0x2F800000 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); // start부터 end에 해당하는 bitmap을 전부 0으로 설정 // 일단 전부 FREE로 만듬 }
// ARM10C 20131207 // min: 0x20000, max_low: 0x4f800 static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ // start_pfn: 0x20000, end_pfn: 0x4f800, end_pfn - start_pfn: 0x2f800 // boot_pages: 0x6 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); // boot_pages << PAGE_SHIFT: 0x6000, L1_CACHE_BYTES: 64 // __pfn_to_phys(0x4f800); 0x4f800000 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); // pglist_data.bdata 의 bootmem_node_data 주소로 설정 pgdat = NODE_DATA(0); // pgdat: ?, __phys_to_pfn(bitmap): ?, start_pfn: 0x20000, end_pfn: 0x4f800 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); /* Free the lowmem regions from memblock into bootmem. */ for_each_memblock(memory, reg) { // start: 0x20000 unsigned long start = memblock_region_memory_base_pfn(reg); // end: 0xA0000 unsigned long end = memblock_region_memory_end_pfn(reg); // end: 0xA0000, end_pfn: 0x4f800 if (end >= end_pfn) // end: 0x4f800 end = end_pfn; // start: 0x20000, end: 0x4f800 if (start >= end) break; // __pfn_to_phys(0x20000): 0x20000000, (end - start) << PAGE_SHIFT: 0x2f800000 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); }
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) { unsigned long bootmap_pages, bootmap_start, bootmap_size; unsigned long start_pfn, free_pfn, end_pfn; /* Don't allow bogus node assignment */ BUG_ON(nid > MAX_NUMNODES || nid == 0); /* * The free pfn starts at the beginning of the range, and is * advanced as necessary for pgdat and node map allocations. */ free_pfn = start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ NODE_DATA(nid) = pfn_to_kaddr(free_pfn); free_pfn += PFN_UP(sizeof(struct pglist_data)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn); bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); /* Reserve the pgdat and bootmap space with the bootmem allocator */ reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, sizeof(struct pglist_data), BOOTMEM_DEFAULT); reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT, bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); /* It's up */ node_set_online(nid); /* Kick sparsemem */ sparse_memory_present_with_active_regions(nid); }
static void __init bootmem_init(void) { unsigned long start_pfn, bootmap_size; unsigned long size = initrd_end - initrd_start; start_pfn = PFN_UP(__pa(&_end)); min_low_pfn = PFN_UP(MEMORY_START); max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE); /* Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, min_low_pfn, max_low_pfn); add_active_range(0, min_low_pfn, max_low_pfn); free_bootmem(PFN_PHYS(start_pfn), (max_low_pfn - start_pfn) << PAGE_SHIFT); memory_present(0, start_pfn, max_low_pfn); /* Reserve space for the bootmem bitmap. */ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); if (size == 0) { printk(KERN_INFO "Initrd not found or empty"); goto disable; } if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { printk(KERN_ERR "Initrd extends beyond end of memory"); goto disable; } /* Reserve space for the initrd bitmap. */ reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", initrd_start, size); return; disable: printk(KERN_CONT " - disabling initrd\n"); initrd_start = 0; initrd_end = 0; }
static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; int i; /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); pgdat = NODE_DATA(0); init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; if (!bank->highmem) free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); } /* * Reserve the memblock reserved regions in bootmem. */ for (i = 0; i < memblock.reserved.cnt; i++) { phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); if (start >= start_pfn && memblock_end_pfn(&memblock.reserved, i) <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); } }
static void __init arm_bootmem_init(unsigned long start_pfn, unsigned long end_pfn) { struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; /* * Allocate the bootmem bitmap page. This must be in a region * of memory which has already been mapped. */ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, __pfn_to_phys(end_pfn)); /* * Initialise the bootmem allocator, handing the * memory banks over to bootmem. */ node_set_online(0); pgdat = NODE_DATA(0); init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); /* Free the lowmem regions from memblock into bootmem. */ for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); if (end >= end_pfn) end = end_pfn; if (start >= end) break; free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); }
static void __init bootmem_init(void) { unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; int i; /* * Init any data related to initrd. It's a nop if INITRD is * not selected. Once that done we can determine the low bound * of usable memory. */ reserved_end = max(init_initrd(), (unsigned long) PFN_UP(__pa_symbol(&_end))); /* * max_low_pfn is not a number of pages. The number of pages * of the system is given by 'max_low_pfn - min_low_pfn'. */ min_low_pfn = ~0UL; max_low_pfn = 0; /* * Find the highest page frame number we have available. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (end > max_low_pfn) max_low_pfn = end; if (start < min_low_pfn) min_low_pfn = start; if (end <= reserved_end) continue; if (start >= mapstart) continue; mapstart = max(reserved_end, start); } if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { pr_info("Wasting %lu bytes for tracking %lu unused pages\n", (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), min_low_pfn - ARCH_PFN_OFFSET); } else if (min_low_pfn < ARCH_PFN_OFFSET) { pr_info("%lu free pages won't be used\n", ARCH_PFN_OFFSET - min_low_pfn); } min_low_pfn = ARCH_PFN_OFFSET; /* * Determine low and high memory ranges */ max_pfn = max_low_pfn; if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); highend_pfn = max_low_pfn; #endif max_low_pfn = PFN_DOWN(HIGHMEM_START); } /* * Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, min_low_pfn, max_low_pfn); for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (start <= min_low_pfn) start = min_low_pfn; if (start >= end) continue; #ifndef CONFIG_HIGHMEM if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; #endif add_active_range(0, start, end); } /* * Register fully available low RAM pages with the bootmem allocator. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end, size; /* * Reserve usable memory. */ if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); /* * We are rounding up the start address of usable memory * and at the end of the usable range downwards. */ if (start >= max_low_pfn) continue; if (start < reserved_end) start = reserved_end; if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; size = end - start; /* Register lowmem ranges */ #ifdef CONFIG_BRCMSTB /* carve out space for bmem */ brcm_free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); #else free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); #endif } /* * Reserve the bootmap memory. */ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); /* * Reserve initrd memory if needed. */ finalize_initrd(); /* * Call memory_present() on all valid ranges, for SPARSEMEM. * This must be done after setting up bootmem, since memory_present() * may allocate bootmem. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); memory_present(0, start, end); } }
void __init setup_arch(char **cmdline_p) { #if defined(CONFIG_SH_GENERIC) || defined(CONFIG_SH_UNKNOWN) extern struct sh_machine_vector mv_unknown; #endif struct sh_machine_vector *mv = NULL; char mv_name[MV_NAME_SIZE] = ""; unsigned long mv_io_base = 0; int mv_mmio_enable = 0; unsigned long bootmap_size; unsigned long start_pfn, max_pfn, max_low_pfn; #ifdef CONFIG_SH_EARLY_PRINTK sh_console_init(); #endif ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long)&_text; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; code_resource.start = virt_to_bus(&_text); code_resource.end = virt_to_bus(&_etext)-1; data_resource.start = virt_to_bus(&_etext); data_resource.end = virt_to_bus(&_edata)-1; parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base, &mv_mmio_enable); #ifdef CONFIG_CMDLINE_BOOL sprintf(*cmdline_p, CONFIG_CMDLINE); #endif #ifdef CONFIG_SH_GENERIC if (mv == NULL) { mv = &mv_unknown; if (*mv_name != '\0') { printk("Warning: Unsupported machine %s, using unknown\n", mv_name); } } sh_mv = *mv; #endif #ifdef CONFIG_SH_UNKNOWN sh_mv = mv_unknown; #endif #if defined(CONFIG_SH_GENERIC) || defined(CONFIG_SH_UNKNOWN) if (mv_io_base != 0) { sh_mv.mv_inb = generic_inb; sh_mv.mv_inw = generic_inw; sh_mv.mv_inl = generic_inl; sh_mv.mv_outb = generic_outb; sh_mv.mv_outw = generic_outw; sh_mv.mv_outl = generic_outl; sh_mv.mv_inb_p = generic_inb_p; sh_mv.mv_inw_p = generic_inw_p; sh_mv.mv_inl_p = generic_inl_p; sh_mv.mv_outb_p = generic_outb_p; sh_mv.mv_outw_p = generic_outw_p; sh_mv.mv_outl_p = generic_outl_p; sh_mv.mv_insb = generic_insb; sh_mv.mv_insw = generic_insw; sh_mv.mv_insl = generic_insl; sh_mv.mv_outsb = generic_outsb; sh_mv.mv_outsw = generic_outsw; sh_mv.mv_outsl = generic_outsl; sh_mv.mv_isa_port2addr = generic_isa_port2addr; generic_io_base = mv_io_base; } if (mv_mmio_enable != 0) { sh_mv.mv_readb = generic_readb; sh_mv.mv_readw = generic_readw; sh_mv.mv_readl = generic_readl; sh_mv.mv_writeb = generic_writeb; sh_mv.mv_writew = generic_writew; sh_mv.mv_writel = generic_writel; } #endif #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) #ifdef CONFIG_DISCONTIGMEM NODE_DATA(0)->bdata = &discontig_node_bdata[0]; NODE_DATA(1)->bdata = &discontig_node_bdata[1]; bootmap_size = init_bootmem_node(NODE_DATA(1), PFN_UP(__MEMORY_START_2ND), PFN_UP(__MEMORY_START_2ND), PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND)); free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND); reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size); #endif /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(&_end)); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, __MEMORY_START>>PAGE_SHIFT, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ { unsigned long curr_pfn, last_pfn, pages; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(__MEMORY_START); /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(__pa(memory_end)); if (last_pfn > max_low_pfn) last_pfn = max_low_pfn; pages = last_pfn - curr_pfn; free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #if 0 /* * Request the standard RAM and ROM resources - * they eat up PCI memory space */ request_resource(&iomem_resource, ram_resources+0); request_resource(&iomem_resource, ram_resources+1); request_resource(&iomem_resource, ram_resources+2); request_resource(ram_resources+1, &code_resource); request_resource(ram_resources+1, &data_resource); probe_roms(); /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, standard_io_resources+i); #endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif /* Perform the machine specific initialisation */ if (sh_mv.mv_init_arch != NULL) { sh_mv.mv_init_arch(); } #if defined(__SH4__) init_task.used_math = 0; init_task.flags &= ~PF_USEDFPU; #endif #ifdef CONFIG_UBC_WAKEUP /* * Some brain-damaged loaders decided it would be a good idea to put * the UBC to sleep. This causes some issues when it comes to things * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. * we wake it up and hope that all is well. */ ubc_wakeup(); #endif paging_init(); }
void __init setup_arch(char **cmdline_p) { int bootmap_size; memory_start = PAGE_ALIGN(_ramstart); memory_end = _ramend; init_mm.start_code = (unsigned long) &_stext; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) 0; config_BSP(&command_line[0], sizeof(command_line)); #if defined(CONFIG_BOOTPARAM) strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line)); command_line[sizeof(command_line) - 1] = 0; #endif /* CONFIG_BOOTPARAM */ process_uboot_commandline(&command_line[0], sizeof(command_line)); pr_info("uClinux with CPU " CPU_NAME "\n"); #ifdef CONFIG_UCDIMM pr_info("uCdimm by Lineo, Inc. <www.lineo.com>\n"); #endif #ifdef CONFIG_M68VZ328 pr_info("M68VZ328 support by Evan Stawnyczy <*****@*****.**>\n"); #endif #ifdef CONFIG_COLDFIRE pr_info("COLDFIRE port done by Greg Ungerer, [email protected]\n"); #ifdef CONFIG_M5307 pr_info("Modified for M5307 by Dave Miller, [email protected]\n"); #endif #ifdef CONFIG_ELITE pr_info("Modified for M5206eLITE by Rob Scott, [email protected]\n"); #endif #endif pr_info("Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n"); #if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 ) pr_info("TRG SuperPilot FLASH card support <*****@*****.**>\n"); #endif #if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 ) pr_info("PalmV support by Lineo Inc. <*****@*****.**>\n"); #endif #ifdef CONFIG_DRAGEN2 pr_info("DragonEngine II board support by Georges Menie\n"); #endif #ifdef CONFIG_M5235EVB pr_info("Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", __bss_stop, memory_start, memory_start, memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE-1] = 0; #if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif /* * Give all the memory to the bootmap allocator, tell it to put the * boot mem_map at the start of memory. */ min_low_pfn = PFN_DOWN(memory_start); max_pfn = max_low_pfn = PFN_DOWN(memory_end); bootmap_size = init_bootmem_node( NODE_DATA(0), min_low_pfn, /* map goes here */ PFN_DOWN(PAGE_OFFSET), max_pfn); /* * Free the usable memory, we have to make sure we do not free * the bootmem bitmap so we then reserve it after freeing it :-) */ free_bootmem(memory_start, memory_end - memory_start); reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT); #if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD) if ((initrd_start > 0) && (initrd_start < initrd_end) && (initrd_end < memory_end)) reserve_bootmem(initrd_start, initrd_end - initrd_start, BOOTMEM_DEFAULT); #endif /* if defined(CONFIG_BLK_DEV_INITRD) */ /* * Get kmalloc into gear. */ paging_init(); }
void __init setup_arch(char **cmdline_p) { extern void init_etrax_debug(void); unsigned long bootmap_size; unsigned long start_pfn, max_pfn; unsigned long memory_start; /* register an initial console printing routine for printk's */ init_etrax_debug(); /* we should really poll for DRAM size! */ high_memory = &dram_end; if(romfs_in_flash || !romfs_length) { /* if we have the romfs in flash, or if there is no rom filesystem, * our free area starts directly after the BSS */ memory_start = (unsigned long) &_end; } else { /* otherwise the free area starts after the ROM filesystem */ printk("ROM fs in RAM, size %lu bytes\n", romfs_length); memory_start = romfs_start + romfs_length; } /* process 1's initial memory region is the kernel code/data */ init_mm.start_code = (unsigned long) &text_start; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; /* min_low_pfn points to the start of DRAM, start_pfn points * to the first DRAM pages after the kernel, and max_low_pfn * to the end of DRAM. */ /* * partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(memory_start); /* usually c0000000 + kernel + romfs */ max_pfn = PFN_DOWN((unsigned long)high_memory); /* usually c0000000 + dram size */ /* * Initialize the boot-time allocator (start, end) * * We give it access to all our DRAM, but we could as well just have * given it a small slice. No point in doing that though, unless we * have non-contiguous memory and want the boot-stuff to be in, say, * the smallest area. * * It will put a bitmap of the allocated pages in the beginning * of the range we give it, but it won't mark the bitmaps pages * as reserved. We have to do that ourselves below. * * We need to use init_bootmem_node instead of init_bootmem * because our map starts at a quite high address (min_low_pfn). */ max_low_pfn = max_pfn; min_low_pfn = PAGE_OFFSET >> PAGE_SHIFT; bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, min_low_pfn, max_low_pfn); /* And free all memory not belonging to the kernel (addr, size) */ free_bootmem(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn - start_pfn)); /* * Reserve the bootmem bitmap itself as well. We do this in two * steps (first step was init_bootmem()) because this catches * the (very unlikely) case of us accidentally initializing the * bootmem allocator with an invalid RAM area. * * Arguments are start, size */ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); /* paging_init() sets up the MMU and marks all pages as reserved */ paging_init(); *cmdline_p = cris_command_line; #ifdef CONFIG_ETRAX_CMDLINE if (!strcmp(cris_command_line, "")) { strlcpy(cris_command_line, CONFIG_ETRAX_CMDLINE, COMMAND_LINE_SIZE); cris_command_line[COMMAND_LINE_SIZE - 1] = '\0'; } #endif /* Save command line for future references. */ memcpy(boot_command_line, cris_command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; /* give credit for the CRIS port */ show_etrax_copyright(); /* Setup utsname */ strcpy(init_utsname()->machine, cris_machine_name); }
void __init setup_memory(void) { int i; unsigned long map_size; u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ for (i = 0; i < lmb.memory.cnt; i++) { memory_start = (u32) lmb.memory.region[i].base; memory_end = (u32) lmb.memory.region[i].base + (u32) lmb.memory.region[i].size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { memory_size = memory_end - memory_start; PAGE_OFFSET = memory_start; printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " "size 0x%08x\n", __func__, memory_start, memory_end, memory_size); break; } } if (!memory_start || !memory_end) { panic("%s: Missing memory setting 0x%08x-0x%08x\n", __func__, memory_start, memory_end); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; lmb_reserve(kernel_align_start, kernel_align_size); printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); /* * Kernel: * start: base phys address of kernel - page align * end: base phys address of kernel - page align * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); /* * Find an area to use for the bootmem bitmap. * We look for the first area which is at least * 128kB in length (128kB is enough for a bitmap * for 4GB of memory, using 4kB pages), plus 1 page * (in case the address isn't page-aligned). */ map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn); lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ free_bootmem(memory_start, memory_size); /* reserve allocate blocks */ for (i = 0; i < lmb.reserved.cnt; i++) { pr_debug("reserved %d - 0x%08x-0x%08x\n", i, (u32) lmb.reserved.region[i].base, (u32) lmb_size_bytes(&lmb.reserved, i)); reserve_bootmem(lmb.reserved.region[i].base, lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); } paging_init(); }
void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, min_low_pfn, max_low_pfn); add_active_range(0, min_low_pfn, max_low_pfn); register_bootmem_low_pages(); node_set_online(0); /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem(__MEMORY_START+PAGE_SIZE, (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem(__MEMORY_START, PAGE_SIZE); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_BLK_DEV_INITRD ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { LOADER_TYPE = 1; INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; } if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem(INITRD_START + __MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) reserve_bootmem(crashk_res.start, crashk_res.end - crashk_res.start + 1); #endif }
static void __init bootmem_init(void) { unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; int i; /* * Sanity check any INITRD first. We don't take it into account * for bootmem setup initially, rely on the end-of-kernel-code * as our memory range starting point. Once bootmem is inited we * will reserve the area used for the initrd. */ init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); /* * max_low_pfn is not a number of pages. The number of pages * of the system is given by 'max_low_pfn - min_low_pfn'. */ min_low_pfn = ~0UL; max_low_pfn = 0; /* * Find the highest page frame number we have available. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (end > max_low_pfn) max_low_pfn = end; if (start < min_low_pfn) min_low_pfn = start; if (end <= reserved_end) continue; if (start >= mapstart) continue; mapstart = max(reserved_end, start); } if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { printk(KERN_INFO "Wasting %lu bytes for tracking %lu unused pages\n", (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), min_low_pfn - ARCH_PFN_OFFSET); } else if (min_low_pfn < ARCH_PFN_OFFSET) { printk(KERN_INFO "%lu free pages won't be used\n", ARCH_PFN_OFFSET - min_low_pfn); } min_low_pfn = ARCH_PFN_OFFSET; /* * Determine low and high memory ranges */ max_pfn = max_low_pfn; if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); highend_pfn = max_low_pfn; #endif max_low_pfn = PFN_DOWN(HIGHMEM_START); } /* * Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, min_low_pfn, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end, size; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); /* * Reserve usable memory. */ switch (boot_mem_map.map[i].type) { case BOOT_MEM_RAM: break; case BOOT_MEM_INIT_RAM: memory_present(0, start, end); continue; default: /* Not usable memory */ continue; } /* * We are rounding up the start address of usable memory * and at the end of the usable range downwards. */ if (start >= max_low_pfn) continue; if (start < reserved_end) start = reserved_end; if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; size = end - start; /* Register lowmem ranges */ free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); memory_present(0, start, end); } /* * Reserve the bootmap memory. */ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size); /* * Reserve initrd memory if needed. */ finalize_initrd(); }
void __init setup_arch(char **cmdline_p) { unsigned long bootmap_size; unsigned long start_pfn, max_pfn, max_low_pfn; #ifdef CONFIG_EARLY_PRINTK extern void enable_early_printk(void); enable_early_printk(); #endif #ifdef CONFIG_CMDLINE_BOOL strcpy(COMMAND_LINE, CONFIG_CMDLINE); #endif ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_bus(_text); code_resource.end = virt_to_bus(_etext)-1; data_resource.start = virt_to_bus(_etext); data_resource.end = virt_to_bus(_edata)-1; sh_mv_setup(cmdline_p); #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((x) << PAGE_SHIFT) #ifdef CONFIG_DISCONTIGMEM NODE_DATA(0)->bdata = &discontig_node_bdata[0]; NODE_DATA(1)->bdata = &discontig_node_bdata[1]; bootmap_size = init_bootmem_node(NODE_DATA(1), PFN_UP(__MEMORY_START_2ND), PFN_UP(__MEMORY_START_2ND), PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND)); free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND); reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size); #endif /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, __MEMORY_START>>PAGE_SHIFT, max_low_pfn); /* * Register fully available low RAM pages with the bootmem allocator. */ { unsigned long curr_pfn, last_pfn, pages; /* * We are rounding up the start address of usable memory: */ curr_pfn = PFN_UP(__MEMORY_START); /* * ... and at the end of the usable range downwards: */ last_pfn = PFN_DOWN(__pa(memory_end)); if (last_pfn > max_low_pfn) last_pfn = max_low_pfn; pages = last_pfn - curr_pfn; free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step * was init_bootmem()), because this catches the (definitely buggy) * case of us accidentally initializing the bootmem allocator with * an invalid RAM area. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); /* * reserve physical page 0 - it's a special BIOS page on many boxes, * enabling clean reboots, SMP operation, laptop functions. */ reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); #ifdef CONFIG_BLK_DEV_INITRD ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { LOADER_TYPE = 1; INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; } if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " "(0x%08lx > 0x%08lx)\ndisabling initrd\n", INITRD_START + INITRD_SIZE, max_low_pfn << PAGE_SHIFT); initrd_start = 0; } } #endif #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ platform_setup(); paging_init(); }
static void __init bootmem_init(void) { unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; int i; /* * Init any data related to initrd. It's a nop if INITRD is * not selected. Once that done we can determine the low bound * of usable memory. */ #ifdef CONFIG_XEN reserved_end = max(init_initrd(), (unsigned long) PFN_UP(__pa_symbol(&_end) + PAGE_SIZE)); #else reserved_end = max(init_initrd(), (unsigned long) PFN_UP(__pa_symbol(&_end))); #endif /* * max_low_pfn is not a number of pages. The number of pages * of the system is given by 'max_low_pfn - min_low_pfn'. */ min_low_pfn = ~0UL; max_low_pfn = 0; /* * Find the highest page frame number we have available. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; /* Changing the math in several places below to add -1. If a region * ends exactly on a boundary where there is no DRAM, the last PFN * will be incorrect (one page of memory will have no DRAM backing). * By subtracting one here, this problem is eliminated. */ start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1); if (end > max_low_pfn) max_low_pfn = end; if (start < min_low_pfn) min_low_pfn = start; if (end <= reserved_end) continue; if (start >= mapstart) continue; mapstart = max(reserved_end, start); } if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { pr_info("Wasting %lu bytes for tracking %lu unused pages\n", (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), min_low_pfn - ARCH_PFN_OFFSET); } else if (min_low_pfn < ARCH_PFN_OFFSET) { pr_info("%lu free pages won't be used\n", ARCH_PFN_OFFSET - min_low_pfn); } min_low_pfn = ARCH_PFN_OFFSET; /* * Determine low and high memory ranges */ max_pfn = max_low_pfn; if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { #ifdef CONFIG_HIGHMEM highstart_pfn = PFN_DOWN(HIGHMEM_START); highend_pfn = max_low_pfn; #endif max_low_pfn = PFN_DOWN(HIGHMEM_START); } max_low_pfn = recalculate_max_low_pfn(max_low_pfn); #ifdef DEBUG_MAPPED_KERNEL printk("max_low_pfn = 0x%lx\n", max_low_pfn); #endif /* * Initialize the boot-time allocator with low memory only. */ bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, min_low_pfn, max_low_pfn); for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1); if (start <= min_low_pfn) start = min_low_pfn; if (start >= end) continue; #ifndef CONFIG_HIGHMEM if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; #endif add_active_range(0, start, end); } /* * Register fully available low RAM pages with the bootmem allocator. */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end, size; /* * Reserve usable memory. */ if (boot_mem_map.map[i].type != BOOT_MEM_RAM) continue; start = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1); /* * We are rounding up the start address of usable memory * and at the end of the usable range downwards. */ if (start >= max_low_pfn) continue; if (start < reserved_end) start = reserved_end; if (end > max_low_pfn) end = max_low_pfn; /* * ... finally, is the area going away? */ if (end <= start) continue; size = end - start; /* Register lowmem ranges */ free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); memory_present(0, start, end); } /* * Reserve the bootmap memory. */ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); /* * Reserve initrd memory if needed. */ finalize_initrd(); }