Beispiel #1
0
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long pgd_type, asce_bits;
	psw_t psw;

	init_mm.pgd = swapper_pg_dir;
	if (VMALLOC_END > _REGION2_SIZE) {
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
	vmem_map_init();

        /* enable virtual mapping in kernel mode */
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	psw.mask = __extract_psw();
	psw_bits(psw).dat = 1;
	psw_bits(psw).as = PSW_BITS_AS_HOME;
	__load_psw_mask(psw.mask);

	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
}
Beispiel #2
0
static void __init arch_mem_init(char **cmdline_p)
{
	extern void plat_mem_setup(void);

	/* call board setup routine */
	plat_mem_setup();

	printk("Determined physical RAM map:\n");
	print_memory_map();

	strlcpy(command_line, arcs_cmdline, sizeof(command_line));
	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);

	*cmdline_p = command_line;

	parse_early_param();

	if (usermem) {
		printk("User-defined physical RAM map:\n");
		print_memory_map();
	}

	bootmem_init();
#ifdef CONFIG_SPARSEMEM
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
#endif
	sparse_init();
	paging_init();
}
Beispiel #3
0
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long pgd_type, asce_bits;

	init_mm.pgd = swapper_pg_dir;
	if (VMALLOC_END > (1UL << 42)) {
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
	S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	clear_table((unsigned long *) init_mm.pgd, pgd_type,
		    sizeof(unsigned long)*2048);
	vmem_map_init();

        /* enable virtual mapping in kernel mode */
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	__arch_local_irq_stosm(0x04);

	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
}
void __init paging_init(void)
{
	static const int ssm_mask = 0x04000000L;
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long pgd_type;

	init_mm.pgd = swapper_pg_dir;
	S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
	/* A three level page table (4TB) is enough for the kernel space. */
	S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
	pgd_type = _REGION3_ENTRY_EMPTY;
#else
	S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
	pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
	clear_table((unsigned long *) init_mm.pgd, pgd_type,
		    sizeof(unsigned long)*2048);
	vmem_map_init();

        /* enable virtual mapping in kernel mode */
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	__raw_local_irq_ssm(ssm_mask);

	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
#endif
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
}
Beispiel #5
0
void __init do_init_bootmem(void)
{
	unsigned long i;
	unsigned long start, bootmap_pages;
	unsigned long total_pages;
	int boot_mapsize;

	max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
	total_pages = total_lowmem >> PAGE_SHIFT;
#endif

	/*
	 * Find an area to use for the bootmem bitmap.  Calculate the size of
	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
	 * Add 1 additional page in case the address isn't page-aligned.
	 */
	bootmap_pages = bootmem_bootmap_pages(total_pages);

	start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);

	boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);

	/* Add active regions with valid PFNs */
	for (i = 0; i < lmb.memory.cnt; i++) {
		unsigned long start_pfn, end_pfn;
		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
		add_active_range(0, start_pfn, end_pfn);
	}

	/* Add all physical memory to the bootmem map, mark each area
	 * present.
	 */
#ifdef CONFIG_HIGHMEM
	free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
#else
	free_bootmem_with_active_regions(0, max_pfn);
#endif

	/* reserve the sections we're already using */
	for (i = 0; i < lmb.reserved.cnt; i++)
		reserve_bootmem(lmb.reserved.region[i].base,
				lmb_size_bytes(&lmb.reserved, i));

	/* XXX need to clip this if using highmem? */
	sparse_memory_present_with_active_regions(0);

	init_bootmem_done = 1;
}
Beispiel #6
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
    unsigned long bootmap_pages;
    unsigned long start_pfn, end_pfn;
    unsigned long bootmem_paddr;

    /* Don't allow bogus node assignment */
    BUG_ON(nid > MAX_NUMNODES || nid <= 0);

    start_pfn = start >> PAGE_SHIFT;
    end_pfn = end >> PAGE_SHIFT;

    pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
                     PAGE_KERNEL);

    lmb_add(start, end - start);

    __add_active_range(nid, start_pfn, end_pfn);

    /* Node-local pgdat */
    NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
                                         SMP_CACHE_BYTES, end));
    memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

    NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
    NODE_DATA(nid)->node_start_pfn = start_pfn;
    NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

    /* Node-local bootmap */
    bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
    bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
                                   PAGE_SIZE, end);
    init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
                      start_pfn, end_pfn);

    free_bootmem_with_active_regions(nid, end_pfn);

    /* Reserve the pgdat and bootmap space with the bootmem allocator */
    reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
                         sizeof(struct pglist_data), BOOTMEM_DEFAULT);
    reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
                         bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

    /* It's up */
    node_set_online(nid);

    /* Kick sparsemem */
    sparse_memory_present_with_active_regions(nid);
}
Beispiel #7
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
	unsigned long bootmap_pages, bootmap_start, bootmap_size;
	unsigned long start_pfn, free_pfn, end_pfn;

	/* Don't allow bogus node assignment */
	BUG_ON(nid > MAX_NUMNODES || nid == 0);

	/*
	 * The free pfn starts at the beginning of the range, and is
	 * advanced as necessary for pgdat and node map allocations.
	 */
	free_pfn = start_pfn = start >> PAGE_SHIFT;
	end_pfn = end >> PAGE_SHIFT;

	__add_active_range(nid, start_pfn, end_pfn);

	/* Node-local pgdat */
	NODE_DATA(nid) = pfn_to_kaddr(free_pfn);
	free_pfn += PFN_UP(sizeof(struct pglist_data));
	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

	/* Node-local bootmap */
	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn);
	bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn,
				    end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/* Reserve the pgdat and bootmap space with the bootmem allocator */
	reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
			     sizeof(struct pglist_data), BOOTMEM_DEFAULT);
	reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
			     bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

	/* It's up */
	node_set_online(nid);

	/* Kick sparsemem */
	sparse_memory_present_with_active_regions(nid);
}
Beispiel #8
0
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long pgd_type, asce_bits;

	init_mm.pgd = swapper_pg_dir;
#ifdef CONFIG_64BIT
	if (VMALLOC_END > (1UL << 42)) {
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
#else
	asce_bits = _ASCE_TABLE_LENGTH;
	pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
	clear_table((unsigned long *) init_mm.pgd, pgd_type,
		    sizeof(unsigned long)*2048);
	vmem_map_init();

        /* enable virtual mapping in kernel mode */
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
	arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));

	atomic_set(&init_mm.context.attach_count, 1);

	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
}
Beispiel #9
0
void __init setup_bootmem_allocator(unsigned long free_pfn)
{
	unsigned long bootmap_size;

	/*
	 * Find a proper area for the bootmem bitmap. After this
	 * bootstrap step all allocations (until the page allocator
	 * is intact) must be done via bootmem_alloc().
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
					 min_low_pfn, max_low_pfn);

	add_active_range(0, min_low_pfn, max_low_pfn);
	register_bootmem_low_pages();

	node_set_online(0);

	/*
	 * Reserve the kernel text and
	 * Reserve the bootmem bitmap. We do this in two steps (first step
	 * was init_bootmem()), because this catches the (definitely buggy)
	 * case of us accidentally initializing the bootmem allocator with
	 * an invalid RAM area.
	 */
	reserve_bootmem(__MEMORY_START+PAGE_SIZE,
		(PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem(__MEMORY_START, PAGE_SIZE);

	sparse_memory_present_with_active_regions(0);

#ifdef CONFIG_BLK_DEV_INITRD
	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
	if (&__rd_start != &__rd_end) {
		LOADER_TYPE = 1;
		INITRD_START = PHYSADDR((unsigned long)&__rd_start) -
					__MEMORY_START;
		INITRD_SIZE = (unsigned long)&__rd_end -
			      (unsigned long)&__rd_start;
	}

	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
			reserve_bootmem(INITRD_START + __MEMORY_START,
					INITRD_SIZE);
			initrd_start = INITRD_START + PAGE_OFFSET +
					__MEMORY_START;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			printk("initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				    INITRD_START + INITRD_SIZE,
				    max_low_pfn << PAGE_SHIFT);
			initrd_start = 0;
		}
	}
#endif
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end)
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
#endif
}