Пример #1
0
/*
 * set up paging
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = {0,};
	pte_t *ppte;
	int loop;

	/* main kernel space -> RAM mapping is handled as 1:1 transparent by
	 * the MMU */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
	memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));

	/* load the VMALLOC area PTE table addresses into the kernel PGD */
	ppte = kernel_vmalloc_ptes;
	for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
	     loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
	     loop++
	     ) {
		set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
		ppte += PAGE_SIZE / sizeof(pte_t);
	}

	/* declare the sizes of the RAM zones (only use the normal zone) */
	zones_size[ZONE_NORMAL] =
		contig_page_data.bdata->node_low_pfn -
		contig_page_data.bdata->node_min_pfn;

	/* pass the memory from the bootmem allocator to the main allocator */
	free_area_init(zones_size);

#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
	/* The Atomic Operation Unit registers need to be mapped to userspace
	 * for all processes.  The following uses vm_area_register_early() to
	 * reserve the first page of the vmalloc area and sets the pte for that
	 * page.
	 *
	 * glibc hardcodes this virtual mapping, so we're pretty much stuck with
	 * it from now on.
	 */
	user_iomap_vm.flags = VM_USERMAP;
	user_iomap_vm.size = 1 << PAGE_SHIFT;
	vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
	ppte = kernel_vmalloc_ptes;
	set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
			      PAGE_USERIO));
#endif

	local_flush_tlb_all();
}
Пример #2
0
void __init xen_vmalloc_p2m_tree(void)
{
	static struct vm_struct vm;

	vm.flags = VM_ALLOC;
	vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
			PMD_SIZE * PMDS_PER_MID_PAGE);
	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

	xen_max_p2m_pfn = vm.size / sizeof(unsigned long);

	xen_rebuild_p2m_list(vm.addr);

	xen_p2m_addr = vm.addr;
	xen_p2m_size = xen_max_p2m_pfn;

	xen_inv_extra_mem();

	m2p_override_init();
}
Пример #3
0
void __init xen_vmalloc_p2m_tree(void)
{
    static struct vm_struct vm;
    unsigned long p2m_limit;

    p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
    vm.flags = VM_ALLOC;
    vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
                    PMD_SIZE * PMDS_PER_MID_PAGE);
    vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
    pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

    xen_max_p2m_pfn = vm.size / sizeof(unsigned long);

    xen_rebuild_p2m_list(vm.addr);

    xen_p2m_addr = vm.addr;
    xen_p2m_size = xen_max_p2m_pfn;

    xen_inv_extra_mem();
}
Пример #4
0
static ssize_t __init setup_pcpu_remap(size_t static_size)
{
	static struct vm_struct vm;
	size_t ptrs_size, dyn_size;
	unsigned int cpu;
	ssize_t ret;

	/*
	 * If large page isn't supported, there's no benefit in doing
	 * this.  Also, on non-NUMA, embedding is better.
	 */
	if (!cpu_has_pse || !pcpu_need_numa())
		return -EINVAL;

	/*
	 * Currently supports only single page.  Supporting multiple
	 * pages won't be too difficult if it ever becomes necessary.
	 */
	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
			       PERCPU_DYNAMIC_RESERVE);
	if (pcpur_size > PMD_SIZE) {
		pr_warning("PERCPU: static data is larger than large page, "
			   "can't use large page\n");
		return -EINVAL;
	}
	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;

	/* allocate pointer array and alloc large pages */
	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
	pcpur_ptrs = alloc_bootmem(ptrs_size);

	for_each_possible_cpu(cpu) {
		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
		if (!pcpur_ptrs[cpu])
			goto enomem;

		/*
		 * Only use pcpur_size bytes and give back the rest.
		 *
		 * Ingo: The 2MB up-rounding bootmem is needed to make
		 * sure the partial 2MB page is still fully RAM - it's
		 * not well-specified to have a PAT-incompatible area
		 * (unmapped RAM, device memory, etc.) in that hole.
		 */
		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
			     PMD_SIZE - pcpur_size);

		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
	}

	/* allocate address and map */
	vm.flags = VM_ALLOC;
	vm.size = num_possible_cpus() * PMD_SIZE;
	vm_area_register_early(&vm, PMD_SIZE);

	for_each_possible_cpu(cpu) {
		pmd_t *pmd;

		pmd = populate_extra_pmd((unsigned long)vm.addr
					 + cpu * PMD_SIZE);
		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
				     PAGE_KERNEL_LARGE));
	}

	/* we're ready, commit */
	pr_info("PERCPU: Remapped at %p with large pages, static data "
		"%zu bytes\n", vm.addr, static_size);

	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
				     PMD_SIZE, vm.addr, NULL);
	goto out_free_ar;

enomem:
	for_each_possible_cpu(cpu)
		if (pcpur_ptrs[cpu])
			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
	return ret;
}