void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; max_mapnr = num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ cmma_init(); /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = 0; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >>10, initsize >> 10); printk("Write protected kernel read-only data: %#lx - %#lx\n", (unsigned long)&_stext, PFN_ALIGN((unsigned long)&_eshared) - 1); }
void __init mem_init(void) { cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ cmma_init(); /* this will put all low memory onto the freelists */ free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ cmma_init_nodat(); mem_init_print_info(NULL); }
void __init mem_init(void) { if (MACHINE_HAS_TLB_LC) cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(0, mm_cpumask(&init_mm)); atomic_set(&init_mm.context.attach_count, 1); max_mapnr = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); /* Setup guest page hinting */ cmma_init(); /* this will put all low memory onto the freelists */ free_all_bootmem(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_print_info(NULL); printk("Write protected kernel read-only data: %#lx - %#lx\n", (unsigned long)&_stext, PFN_ALIGN((unsigned long)&_eshared) - 1); }
__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) { int codepages = 0; int datapages = 0; unsigned long tmp; extern int _etext, _ftext; #ifdef CONFIG_MIPS_JAZZ if (mips_machgroup == MACH_GROUP_JAZZ) start_mem = vdma_init(start_mem, end_mem); #endif end_mem &= PAGE_MASK; max_mapnr = MAP_NR(end_mem); high_memory = (void *)end_mem; num_physpages = 0; /* mark usable pages in the mem_map[] */ start_mem = PAGE_ALIGN(start_mem); for(tmp = MAP_NR(start_mem);tmp < max_mapnr;tmp++) clear_bit(PG_reserved, &mem_map[tmp].flags); prom_fixup_mem_map(start_mem, (unsigned long)high_memory); for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) { /* * This is only for PC-style DMA. The onboard DMA * of Jazz and Tyne machines is completely different and * not handled via a flag in mem_map_t. */ if (tmp >= MAX_DMA_ADDRESS) clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); if (PageReserved(mem_map+MAP_NR(tmp))) { if ((tmp < (unsigned long) &_etext) && (tmp >= (unsigned long) &_ftext)) codepages++; else if ((tmp < start_mem) && (tmp > (unsigned long) &_etext)) datapages++; continue; } num_physpages++; atomic_set(&mem_map[MAP_NR(tmp)].count, 1); #ifdef CONFIG_BLK_DEV_INITRD if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end)) #endif free_page(tmp); } tmp = nr_free_pages << PAGE_SHIFT; /* Setup zeroed pages. */ tmp -= setup_zero_pages(); printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n", tmp >> 10, max_mapnr << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10)); }