/* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { int i; unsigned long zones_size[MAX_NR_ZONES]; /* * old: we can DMA to/from any address.put all page into ZONE_DMA * We use only ZONE_NORMAL */ zones_size[ZONE_NORMAL] = max_mapnr; /* every other zones are empty */ for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; free_area_init(zones_size); }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views of the world. */ #ifdef DEBUG unsigned long start_mem = PAGE_ALIGN(memory_start); #endif unsigned long end_mem = memory_end & PAGE_MASK; #ifdef DEBUG printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif /* * Initialize the bad page table and bad page to point * to a couple of allocated pages. */ empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). */ set_fs (USER_DS); #ifdef DEBUG printk (KERN_DEBUG "before free_area_init\n"); printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif { unsigned long zones_size[MAX_NR_ZONES] = {0, }; zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); } }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; /* allocate some pages for kernel housekeeping tasks */ empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE); #if CONFIG_HIGHMEM if (num_physpages - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); memset(pkmap_page_table, 0, PAGE_SIZE); pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); pue = pud_offset(pge, PKMAP_BASE); pme = pmd_offset(pue, PKMAP_BASE); __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE); } #endif /* distribute the allocatable pages across the various zones and pass them to the allocator */ zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn; zones_size[ZONE_NORMAL] = 0; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; #endif free_area_init(zones_size); #ifdef CONFIG_MMU /* initialise init's MMU context */ init_new_context(&init_task, &init_mm); #endif } /* end paging_init() */
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views of the world. */ unsigned long end_mem = memory_end & PAGE_MASK; unsigned long zones_size[MAX_NR_ZONES] = {0, }; empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); memset(empty_zero_page, 0, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). */ set_fs (USER_DS); zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { struct pglist_data *pgdat = NODE_DATA(0); unsigned long zones_size[MAX_NR_ZONES] = {0, }; empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); /* * Set up user data space */ set_fs(KERNEL_DS); /* * Define zones */ zones_size[ZONE_NORMAL] = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT; pgdat->node_zones[ZONE_NORMAL].zone_start_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views og the world. */ unsigned long start_mem = PAGE_ALIGN(memory_start); unsigned long end_mem = memory_end & PAGE_MASK; pr_debug("start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); /* * Initialize the bad page table and bad page to point * to a couple of allocated pages. */ empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!empty_zero_page) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). */ set_fs(USER_DS); pr_debug("before free_area_init\n"); pr_debug("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); { unsigned long zones_size[MAX_NR_ZONES] = {0, }; zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); } }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { /* * Make sure start_mem is page aligned, otherwise bootmem and * page_alloc get different views of the world. */ #ifdef DEBUG unsigned long start_mem = PAGE_ALIGN(memory_start); #endif unsigned long end_mem = memory_end & PAGE_MASK; #ifdef DEBUG printk ("start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif set_fs(KERNEL_DS); #ifdef DEBUG printk ("before free_area_init\n"); printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", start_mem, end_mem); #endif { unsigned i; unsigned long zones_size[MAX_NR_ZONES]; for (i = 0; i < MAX_NR_ZONES; i++) zones_size[i] = 0; zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; free_area_init(zones_size); } printk ("after free_area_init\n"); }
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem) { return free_area_init(start_mem, end_mem); }
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */ void __init paging_init(void) { pgd_t * pg_dir; pte_t * pg_table; int i; unsigned long address; unsigned long next_pgtable; unsigned long bootmem_end; unsigned long zones_size[3] = {0, 0, 0}; unsigned long size; #ifdef TEST_VERIFY_AREA wp_works_ok = 0; #endif empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); address = PAGE_OFFSET; pg_dir = swapper_pg_dir; memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir)); memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir)); size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); next_pgtable = (unsigned long)alloc_bootmem_pages(size); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; /* Map whole memory from PAGE_OFFSET (0x0E000000) */ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; while (address < (unsigned long)high_memory) { pg_table = (pte_t *) __pa (next_pgtable); next_pgtable += PTRS_PER_PTE * sizeof (pte_t); pgd_val(*pg_dir) = (unsigned long) pg_table; pg_dir++; /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va ((unsigned long) pg_table); for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { pte_t pte = __mk_pte(address, PAGE_INIT); if (address >= (unsigned long)high_memory) pte_val (pte) = 0; set_pte (pg_table, pte); address += PAGE_SIZE; } } mmu_emu_init(bootmem_end); current->mm = NULL; /* memory sizing is a hack stolen from motorola.c.. hope it works for us */ zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; zones_size[1] = 0; free_area_init(zones_size); }