示例#1
0
文件: p2m.c 项目: DenisLug/mptcp
static void __init xen_rebuild_p2m_list(unsigned long *p2m)
{
	unsigned int i, chunk;
	unsigned long pfn;
	unsigned long *mfns;
	pte_t *ptep;
	pmd_t *pmdp;
	int type;

	p2m_missing = alloc_p2m_page();
	p2m_init(p2m_missing);
	p2m_identity = alloc_p2m_page();
	p2m_init(p2m_identity);

	p2m_missing_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
	p2m_identity_pte = alloc_p2m_page();
	paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		set_pte(p2m_missing_pte + i,
			pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
		set_pte(p2m_identity_pte + i,
			pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
	}

	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
		/*
		 * Try to map missing/identity PMDs or p2m-pages if possible.
		 * We have to respect the structure of the mfn_list_list
		 * which will be built just afterwards.
		 * Chunk size to test is one p2m page if we are in the middle
		 * of a mfn_list_list mid page and the complete mid page area
		 * if we are at index 0 of the mid page. Please note that a
		 * mid page might cover more than one PMD, e.g. on 32 bit PAE
		 * kernels.
		 */
		chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
			P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;

		type = xen_p2m_elem_type(pfn);
		i = 0;
		if (type != P2M_TYPE_PFN)
			for (i = 1; i < chunk; i++)
				if (xen_p2m_elem_type(pfn + i) != type)
					break;
		if (i < chunk)
			/* Reset to minimal chunk size. */
			chunk = P2M_PER_PAGE;

		if (type == P2M_TYPE_PFN || i < chunk) {
			/* Use initial p2m page contents. */
#ifdef CONFIG_X86_64
			mfns = alloc_p2m_page();
			copy_page(mfns, xen_p2m_addr + pfn);
#else
			mfns = xen_p2m_addr + pfn;
#endif
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
			continue;
		}

		if (chunk == P2M_PER_PAGE) {
			/* Map complete missing or identity p2m-page. */
			mfns = (type == P2M_TYPE_MISSING) ?
				p2m_missing : p2m_identity;
			ptep = populate_extra_pte((unsigned long)(p2m + pfn));
			set_pte(ptep,
				pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
			continue;
		}

		/* Complete missing or identity PMD(s) can be mapped. */
		ptep = (type == P2M_TYPE_MISSING) ?
			p2m_missing_pte : p2m_identity_pte;
		for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
			pmdp = populate_extra_pmd(
				(unsigned long)(p2m + pfn) + i * PMD_SIZE);
			set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
		}
	}
}
示例#2
0
static ssize_t __init setup_pcpu_remap(size_t static_size)
{
	static struct vm_struct vm;
	size_t ptrs_size, dyn_size;
	unsigned int cpu;
	ssize_t ret;

	/*
	 * If large page isn't supported, there's no benefit in doing
	 * this.  Also, on non-NUMA, embedding is better.
	 */
	if (!cpu_has_pse || !pcpu_need_numa())
		return -EINVAL;

	/*
	 * Currently supports only single page.  Supporting multiple
	 * pages won't be too difficult if it ever becomes necessary.
	 */
	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
			       PERCPU_DYNAMIC_RESERVE);
	if (pcpur_size > PMD_SIZE) {
		pr_warning("PERCPU: static data is larger than large page, "
			   "can't use large page\n");
		return -EINVAL;
	}
	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;

	/* allocate pointer array and alloc large pages */
	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
	pcpur_ptrs = alloc_bootmem(ptrs_size);

	for_each_possible_cpu(cpu) {
		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
		if (!pcpur_ptrs[cpu])
			goto enomem;

		/*
		 * Only use pcpur_size bytes and give back the rest.
		 *
		 * Ingo: The 2MB up-rounding bootmem is needed to make
		 * sure the partial 2MB page is still fully RAM - it's
		 * not well-specified to have a PAT-incompatible area
		 * (unmapped RAM, device memory, etc.) in that hole.
		 */
		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
			     PMD_SIZE - pcpur_size);

		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
	}

	/* allocate address and map */
	vm.flags = VM_ALLOC;
	vm.size = num_possible_cpus() * PMD_SIZE;
	vm_area_register_early(&vm, PMD_SIZE);

	for_each_possible_cpu(cpu) {
		pmd_t *pmd;

		pmd = populate_extra_pmd((unsigned long)vm.addr
					 + cpu * PMD_SIZE);
		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
				     PAGE_KERNEL_LARGE));
	}

	/* we're ready, commit */
	pr_info("PERCPU: Remapped at %p with large pages, static data "
		"%zu bytes\n", vm.addr, static_size);

	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
				     PMD_SIZE, vm.addr, NULL);
	goto out_free_ar;

enomem:
	for_each_possible_cpu(cpu)
		if (pcpur_ptrs[cpu])
			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
	return ret;
}