Esempio n. 1
0
int init_module(void)
{
	int i, ret = -ENOMEM;
	struct vm_struct *vm;
	struct page *pages[2] = { NULL, NULL };
	unsigned char *buf;

	pr_info("MyMapTest Begin\n");

	vm = alloc_vm_area(2 * PAGE_SIZE);
	if (!vm) {
		pr_info("Failed to allocate vm area\n");
		goto out;
	}

	pages[0] = alloc_page(GFP_KERNEL);
	pages[1] = alloc_page(GFP_KERNEL);
	if (!pages[0] || !pages[1]) {
		pr_info("Page allocation failed\n");
		goto out;
	}

	/* Fill pages with test pattern */
	buf = kmap_atomic(pages[0]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'a';
	kunmap_atomic(buf);

	buf = kmap_atomic(pages[1]);
	for (i = 0; i < PAGE_SIZE; i++)
		buf[i] = 'z';
	kunmap_atomic(buf);

	buf = NULL;

	/*
	 * Now, map both pages *contiguously* using a different method
	 * and verify contents of each page.
	 */
	ret = map_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE,
				PAGE_KERNEL, pages);
	pr_info("map_kernel_range_noflush returned: %d\n", ret);

	buf = vm->addr;

	for (i = 0; i < PAGE_SIZE; i++) {
		if (buf[i] != 'a')
			pr_info("mismatch in page-0 at location %d\n", i);
	}

	for (i = PAGE_SIZE; i <= PAGE_SIZE; i++) {
		if (buf[i] != 'z')
			pr_info("mismatch in page-1 at location %d\n", i);
	}

	unmap_kernel_range_noflush((unsigned long)vm->addr, 2 * PAGE_SIZE);

	__flush_tlb_one((unsigned long)buf);
	__flush_tlb_one((unsigned long)buf + PAGE_SIZE);

	ret = 0;	/* Success */
out:
	if (vm)
		free_vm_area(vm);
	if (pages[0])
		__free_page(pages[0]);
	if (pages[1])
		__free_page(pages[1]);

	/*
	 * A non 0 return means init_module failed; module can't be loaded. 
	 */
	return ret;
}
Esempio n. 2
0
static int __pcpu_map_pages(unsigned long addr, struct page **pages,
			    int nr_pages)
{
	return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
					PAGE_KERNEL, pages);
}
Esempio n. 3
0
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
				    void *start, void *end)
{
	void *page_addr;
	unsigned long user_page_addr;
	struct binder_lru_page *page;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = NULL;
	bool need_mm = false;

	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
		     "%d: %s pages %pK-%pK\n", alloc->pid,
		     allocate ? "allocate" : "free", start, end);

	if (end <= start)
		return 0;

	trace_binder_update_page_range(alloc, allocate, start, end);

	if (allocate == 0)
		goto free_range;

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
		if (!page->page_ptr) {
			need_mm = true;
			break;
		}
	}

	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
		mm = alloc->vma_vm_mm;

	if (mm) {
		down_read(&mm->mmap_sem);
		vma = alloc->vma;
	}

	if (!vma && need_mm) {
		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
			alloc->pid);
		goto err_no_vma;
	}

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		int ret;
		bool on_lru;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		if (page->page_ptr) {
			trace_binder_alloc_lru_start(alloc, index);

			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
			WARN_ON(!on_lru);

			trace_binder_alloc_lru_end(alloc, index);
			continue;
		}

		if (WARN_ON(!vma))
			goto err_page_ptr_cleared;

		trace_binder_alloc_page_start(alloc, index);
		page->page_ptr = alloc_page(GFP_KERNEL |
					    __GFP_HIGHMEM |
					    __GFP_ZERO);
		if (!page->page_ptr) {
			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
				alloc->pid, page_addr);
			goto err_alloc_page_failed;
		}
		page->alloc = alloc;
		INIT_LIST_HEAD(&page->lru);

		ret = map_kernel_range_noflush((unsigned long)page_addr,
					       PAGE_SIZE, PAGE_KERNEL,
					       &page->page_ptr);
		flush_cache_vmap((unsigned long)page_addr,
				(unsigned long)page_addr + PAGE_SIZE);
		if (ret != 1) {
			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
			       alloc->pid, page_addr);
			goto err_map_kernel_failed;
		}
		user_page_addr =
			(uintptr_t)page_addr + alloc->user_buffer_offset;
		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
		if (ret) {
			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
			       alloc->pid, user_page_addr);
			goto err_vm_insert_page_failed;
		}

		if (index + 1 > alloc->pages_high)
			alloc->pages_high = index + 1;

		trace_binder_alloc_page_end(alloc, index);
		/* vm_insert_page does not seem to increment the refcount */
	}
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return 0;

free_range:
	for (page_addr = end - PAGE_SIZE; page_addr >= start;
	     page_addr -= PAGE_SIZE) {
		bool ret;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		trace_binder_free_lru_start(alloc, index);

		ret = list_lru_add(&binder_alloc_lru, &page->lru);
		WARN_ON(!ret);

		trace_binder_free_lru_end(alloc, index);
		continue;

err_vm_insert_page_failed:
		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
		__free_page(page->page_ptr);
		page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
		;
	}
err_no_vma:
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return vma ? -ENOMEM : -ESRCH;
}