示例#1
0
文件: nommu.c 项目: rochecr/linux
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
			     unsigned long start, unsigned long nr_pages,
			     int write, int force, struct page **pages)
{
	return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
					 force, pages, 0);
}
示例#2
0
/**
 * process_vm_rw_single_vec - read/write pages from task specified
 * @addr: start memory address of target process
 * @len: size of area to copy to/from
 * @iter: where to copy to/from locally
 * @process_pages: struct pages area that can store at least
 *  nr_pages_to_copy struct page pointers
 * @mm: mm for task
 * @task: task to read/write from
 * @vm_write: 0 means copy from, 1 means copy to
 * Returns 0 on success or on failure error code
 */
static int process_vm_rw_single_vec(unsigned long addr,
				    unsigned long len,
				    struct iov_iter *iter,
				    struct page **process_pages,
				    struct mm_struct *mm,
				    struct task_struct *task,
				    int vm_write)
{
	unsigned long pa = addr & PAGE_MASK;
	unsigned long start_offset = addr - pa;
	unsigned long nr_pages;
	ssize_t rc = 0;
	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
		/ sizeof(struct pages *);

	/* Work out address and page range required */
	if (len == 0)
		return 0;
	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;

	while (!rc && nr_pages && iov_iter_count(iter)) {
		int pages = min(nr_pages, max_pages_per_loop);
		size_t bytes;

		/*
		 * Get the pages we're interested in.  We must
		 * add FOLL_REMOTE because task/mm might not
		 * current/current->mm
		 */
		pages = __get_user_pages_unlocked(task, mm, pa, pages,
						  vm_write, 0, process_pages,
						  FOLL_REMOTE);
		if (pages <= 0)
			return -EFAULT;

		bytes = pages * PAGE_SIZE - start_offset;
		if (bytes > len)
			bytes = len;

		rc = process_vm_rw_pages(process_pages,
					 start_offset, bytes, iter,
					 vm_write);
		len -= bytes;
		start_offset = 0;
		nr_pages -= pages;
		pa += pages * PAGE_SIZE;
		while (pages)
			put_page(process_pages[--pages]);
	}

	return rc;
}
示例#3
0
int iterate_phdr(int (*cb) (struct phdr_info *info,
			    struct task_struct *task,
			    void *data),
		 struct task_struct *task, void *data)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = task->mm;
	struct phdr_info pi;
	char buf[NAME_BUFLEN];
	int res = 0, err = 0;
	struct page *page; // FIXME Is one page enough for all phdrs?
	Elf64_Ehdr *ehdr;
	bool first = true;

	if (!mm) return -EINVAL;

	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->vm_pgoff)
			// Only the first page contains the elf
			// headers, normally.
			continue;

		err = __get_user_pages_unlocked(
			task, task->mm, vma->vm_start,
			1, 0, 0, &page, FOLL_TOUCH);
		if (err < 0)
			continue;

		ehdr = vmap(&page, 1, vma->vm_flags, vma->vm_page_prot);
		if (!ehdr) goto PUT;

		// Test magic bytes to check that it is an ehdr
		err = 0;
		err |= (ehdr->e_ident[0] != ELFMAG0);
		err |= (ehdr->e_ident[1] != ELFMAG1);
		err |= (ehdr->e_ident[2] != ELFMAG2);
		err |= (ehdr->e_ident[3] != ELFMAG3);
		if (err) goto UNMAP;

		// Set addresses
		pi.addr = first ? 0 : vma->vm_start;
		pi.phdr = (void *) ehdr + ehdr->e_phoff;
		pi.phnum = ehdr->e_phnum;

		// Find path
		pi.name = vma_file_path(vma, buf, NAME_BUFLEN);

		// Call the callback
		res = cb(&pi, task, data);

		// Free resources
	UNMAP:
		vunmap(ehdr);
	PUT:
		put_page(page);

		if (res) break;

		first = false;
	}
	return res;
}