Example #1
0
static ssize_t __init setup_pcpu_4k(size_t static_size)
{
	size_t pages_size;
	unsigned int cpu;
	int i, j;
	ssize_t ret;

	pcpu4k_nr_static_pages = PFN_UP(static_size);

	/* unaligned allocations can't be freed, round up to page size */
	pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
			       * sizeof(pcpu4k_pages[0]));
	pcpu4k_pages = alloc_bootmem(pages_size);

	/* allocate and copy */
	j = 0;
	for_each_possible_cpu(cpu)
		for (i = 0; i < pcpu4k_nr_static_pages; i++) {
			void *ptr;

			ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
			if (!ptr)
				goto enomem;

			memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
			pcpu4k_pages[j++] = virt_to_page(ptr);
		}

	/* we're ready, commit */
	pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
		pcpu4k_nr_static_pages, static_size);

	ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, -1,
				     -1, NULL, pcpu4k_populate_pte);
	goto out_free_ar;

enomem:
	while (--j >= 0)
		free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpu4k_pages), pages_size);
	return ret;
}
Example #2
0
/*
 * Helpers for first chunk memory allocation
 */
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
	return pcpu_alloc_bootmem(cpu, size, align);
}
Example #3
0
static ssize_t __init setup_pcpu_remap(size_t static_size)
{
	static struct vm_struct vm;
	size_t ptrs_size, dyn_size;
	unsigned int cpu;
	ssize_t ret;

	/*
	 * If large page isn't supported, there's no benefit in doing
	 * this.  Also, on non-NUMA, embedding is better.
	 */
	if (!cpu_has_pse || !pcpu_need_numa())
		return -EINVAL;

	/*
	 * Currently supports only single page.  Supporting multiple
	 * pages won't be too difficult if it ever becomes necessary.
	 */
	pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
			       PERCPU_DYNAMIC_RESERVE);
	if (pcpur_size > PMD_SIZE) {
		pr_warning("PERCPU: static data is larger than large page, "
			   "can't use large page\n");
		return -EINVAL;
	}
	dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;

	/* allocate pointer array and alloc large pages */
	ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
	pcpur_ptrs = alloc_bootmem(ptrs_size);

	for_each_possible_cpu(cpu) {
		pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
		if (!pcpur_ptrs[cpu])
			goto enomem;

		/*
		 * Only use pcpur_size bytes and give back the rest.
		 *
		 * Ingo: The 2MB up-rounding bootmem is needed to make
		 * sure the partial 2MB page is still fully RAM - it's
		 * not well-specified to have a PAT-incompatible area
		 * (unmapped RAM, device memory, etc.) in that hole.
		 */
		free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
			     PMD_SIZE - pcpur_size);

		memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
	}

	/* allocate address and map */
	vm.flags = VM_ALLOC;
	vm.size = num_possible_cpus() * PMD_SIZE;
	vm_area_register_early(&vm, PMD_SIZE);

	for_each_possible_cpu(cpu) {
		pmd_t *pmd;

		pmd = populate_extra_pmd((unsigned long)vm.addr
					 + cpu * PMD_SIZE);
		set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
				     PAGE_KERNEL_LARGE));
	}

	/* we're ready, commit */
	pr_info("PERCPU: Remapped at %p with large pages, static data "
		"%zu bytes\n", vm.addr, static_size);

	ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
				     PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
				     PMD_SIZE, vm.addr, NULL);
	goto out_free_ar;

enomem:
	for_each_possible_cpu(cpu)
		if (pcpur_ptrs[cpu])
			free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
	ret = -ENOMEM;
out_free_ar:
	free_bootmem(__pa(pcpur_ptrs), ptrs_size);
	return ret;
}