Пример #1
0
static int fimg2d_check_address_range(unsigned long addr, size_t size)
{
	struct vm_area_struct *vma;
	struct vm_area_struct *nvma;
	int ret = 0;

	if (addr + size <= addr) {
		fimg2d_err("address overflow. addr:0x%lx, size:%d\n",
				addr, size);
		return -EINVAL;
	}

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);

	if (vma == NULL) {
		fimg2d_err("vma is NULL\n");
		ret = -EINVAL;
	} else {
		nvma = vma->vm_next;

		while ((vma->vm_end < (addr + size)) &&
				(vma != NULL) && (nvma != NULL) &&
				(vma->vm_end == nvma->vm_start)) {
			vma = vma->vm_next;
			nvma = nvma->vm_next;
		}

		if (vma->vm_end < (addr + size)) {
			fimg2d_err("addr : %#lx, size : %#x - out of vma[%#lx, %#lx] range\n",
					addr, size, vma->vm_start, vma->vm_end);
			ret =  -EFAULT;
		}
	}

	up_read(&current->mm->mmap_sem);

	if (!ret) {
		/*
		 * Invoking COW gainst the first and last page if they can be
		 * accessed by CPU and G2D concurrently.
		 * checking the return value of get_user_pages_fast() is not
		 * required because the calls to get_user_pages_fast() are to
		 * invoke COW so that COW is not invoked against the first and
		 * the last pages while G2D is working.
		 */
		if (!IS_ALIGNED(addr, PAGE_SIZE))
			get_user_pages_fast(addr, 1, 1, NULL);

		if (!IS_ALIGNED(addr + size, PAGE_SIZE))
			get_user_pages_fast(addr + size, 1, 1, NULL);
	}

	return ret;
}
Пример #2
0
/*
 * Sets up a given context for notify to work. Maps the notify
 * boolean in user VA into kernel space.
 */
static int vmci_host_setup_notify(struct vmci_ctx *context,
				  unsigned long uva)
{
	int retval;

	if (context->notify_page) {
		pr_devel("%s: Notify mechanism is already set up\n", __func__);
		return VMCI_ERROR_DUPLICATE_ENTRY;
	}

	/*
	 * We are using 'bool' internally, but let's make sure we explicit
	 * about the size.
	 */
	BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
	if (!access_ok((void __user *)uva, sizeof(u8)))
		return VMCI_ERROR_GENERIC;

	/*
	 * Lock physical page backing a given user VA.
	 */
	retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
	if (retval != 1) {
		context->notify_page = NULL;
		return VMCI_ERROR_GENERIC;
	}

	/*
	 * Map the locked page and set up notify pointer.
	 */
	context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
	vmci_ctx_check_signal_notify(context);

	return VMCI_SUCCESS;
}
Пример #3
0
/*
 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
 */
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
	unsigned long offset, addr = (unsigned long)from;
	unsigned long size, len = 0;
	struct page *page;
	void *map;
	int ret;

	do {
#ifndef CONFIG_L4
		ret = __get_user_pages_fast(addr, 1, 0, &page);
#else
		ret = get_user_pages_fast(addr, 1, 0, &page);
#endif
		if (!ret)
			break;

		offset = addr & (PAGE_SIZE - 1);
		size = min(PAGE_SIZE - offset, n - len);

		map = kmap_atomic(page);
		memcpy(to, map+offset, size);
		kunmap_atomic(map);
		put_page(page);

		len  += size;
		to   += size;
		addr += size;

	} while (len < n);

	return len;
}
Пример #4
0
static ssize_t get_pages_iovec(struct iov_iter *i,
		   struct page **pages, size_t maxsize,
		   size_t *start)
{
	size_t offset = i->iov_offset;
	const struct iovec *iov = i->iov;
	size_t len;
	unsigned long addr;
	int n;
	int res;

	len = iov->iov_len - offset;
	if (len > i->count)
		len = i->count;
	if (len > maxsize)
		len = maxsize;
	addr = (unsigned long)iov->iov_base + offset;
	len += *start = addr & (PAGE_SIZE - 1);
	addr &= ~(PAGE_SIZE - 1);
	n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
	res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
	if (unlikely(res < 0))
		return res;
	return (res == n ? len : res * PAGE_SIZE) - *start;
}
Пример #5
0
static int pin_user_pages(unsigned long first_page,
			  unsigned long last_page,
			  unsigned int last_page_size,
			  int is_write,
			  struct page *pages[MAX_BUFFERS_PER_COMMAND],
			  unsigned int *iter_last_page_size)
{
	int ret;
	int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;

	if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
		requested_pages = MAX_BUFFERS_PER_COMMAND;
		*iter_last_page_size = PAGE_SIZE;
	} else {
		*iter_last_page_size = last_page_size;
	}

	ret = get_user_pages_fast(first_page, requested_pages, !is_write,
				  pages);
	if (ret <= 0)
		return -EFAULT;
	if (ret < requested_pages)
		*iter_last_page_size = PAGE_SIZE;

	return ret;
}
Пример #6
0
ssize_t iov_iter_get_pages(struct iov_iter *i,
		   struct page **pages, size_t maxsize, unsigned maxpages,
		   size_t *start)
{
	if (maxsize > i->count)
		maxsize = i->count;

	if (!maxsize)
		return 0;

	iterate_all_kinds(i, maxsize, v, ({
		unsigned long addr = (unsigned long)v.iov_base;
		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
		int n;
		int res;

		if (len > maxpages * PAGE_SIZE)
			len = maxpages * PAGE_SIZE;
		addr &= ~(PAGE_SIZE - 1);
		n = DIV_ROUND_UP(len, PAGE_SIZE);
		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
		if (unlikely(res < 0))
			return res;
		return (res == n ? len : res * PAGE_SIZE) - *start;
	0;}),({
Пример #7
0
/*H:350
 * This routine takes a page number given by the Guest and converts it to
 * an actual, physical page number.  It can fail for several reasons: the
 * virtual address might not be mapped by the Launcher, the write flag is set
 * and the page is read-only, or the write flag was set and the page was
 * shared so had to be copied, but we ran out of memory.
 *
 * This holds a reference to the page, so release_pte() is careful to put that
 * back.
 */
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
	struct page *page;

	/* gup me one page at this address please! */
	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
		return page_to_pfn(page);

	/* This value indicates failure. */
	return -1UL;
}
int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
{
	int nr_mapped_pages;

	nr_mapped_pages = get_user_pages_fast((unsigned long)data,
					      *nr_pages, write, pages);
	if (nr_mapped_pages <= 0)
		return nr_mapped_pages;

	*nr_pages = nr_mapped_pages;
	return 0;
}
Пример #9
0
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     PageHuge(page) ? 1 : 0, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && PageHuge(page))
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (PageHuge(page)) {
		flags |= __EPTE_SZ;
		*epte = epte_addr(page_to_phys(page) & ~((1 << 21) - 1)) |
			flags;
	} else
		*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}
Пример #10
0
/* return number of pages pinned... */
static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
				     struct ipath_user_sdma_pkt *pkt,
				     unsigned long addr, int tlen, int npages)
{
	struct page *pages[2];
	int j;
	int ret;

	ret = get_user_pages_fast(addr, npages, 0, pages);
	if (ret != npages) {
		int i;

		for (i = 0; i < ret; i++)
			put_page(pages[i]);

		ret = -ENOMEM;
		goto done;
	}

	for (j = 0; j < npages; j++) {
		/* map the pages... */
		const int flen =
			ipath_user_sdma_page_length(addr, tlen);
		dma_addr_t dma_addr =
			dma_map_page(&dd->pcidev->dev,
				     pages[j], 0, flen, DMA_TO_DEVICE);
		unsigned long fofs = addr & ~PAGE_MASK;

		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
			ret = -ENOMEM;
			goto done;
		}

		ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
					  pages[j], kmap(pages[j]),
					  dma_addr);

		pkt->naddr++;
		addr += flen;
		tlen -= flen;
	}

done:
	return ret;
}
Пример #11
0
/* TODO: This is really inefficient.  We need something like get_user()
 * (instruction directly accesses the data, with an exception table entry
 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 */
static int set_bit_to_user(int nr, void __user *addr)
{
	unsigned long log = (unsigned long)addr;
	struct page *page;
	void *base;
	int bit = nr + (log % PAGE_SIZE) * 8;
	int r;
	r = get_user_pages_fast(log, 1, 1, &page);
	if (r < 0)
		return r;
	BUG_ON(r != 1);
	base = kmap_atomic(page, KM_USER0);
	set_bit(bit, base);
	kunmap_atomic(base, KM_USER0);
	set_page_dirty_lock(page);
	put_page(page);
	return 0;
}
Пример #12
0
/**
 * lttng_logger_write - write a userspace string into the trace system
 * @file: file pointer
 * @user_buf: user string
 * @count: length to copy
 * @ppos: file position
 *
 * Copy a userspace string into a trace event named "lttng:logger".
 * Copies at most @count bytes into the event "msg" dynamic array.
 * Truncates the count at LTTNG_LOGGER_COUNT_MAX. Returns the number of
 * bytes copied from the source.
 * Return -1 on error, with EFAULT errno.
 */
static
ssize_t lttng_logger_write(struct file *file, const char __user *user_buf,
		    size_t count, loff_t *ppos)
{
	unsigned int nr_pages = 1, i;
	unsigned long uaddr = (unsigned long) user_buf;
	struct page *pages[2];
	ssize_t written;
	int ret;

	/* Truncate count */
	if (unlikely(count > LTTNG_LOGGER_COUNT_MAX))
		count = LTTNG_LOGGER_COUNT_MAX;

	/* How many pages are we dealing with ? */
	if (unlikely((uaddr & PAGE_MASK) != ((uaddr + count) & PAGE_MASK)))
		nr_pages = 2;

	/* Pin userspace pages */
	ret = get_user_pages_fast(uaddr, nr_pages, 0, pages);
	if (unlikely(ret < nr_pages)) {
		if (ret > 0) {
			BUG_ON(ret != 1);
			put_page(pages[0]);
		}
		written = -EFAULT;
		goto end;
	}

	/* Trace the event */
	trace_lttng_logger(user_buf, count);
	written = count;
	*ppos += written;

	for (i = 0; i < nr_pages; i++)
		put_page(pages[i]);
end:
	return written;
}
Пример #13
0
static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
		   struct page ***pages, size_t maxsize,
		   size_t *start)
{
	size_t offset = i->iov_offset;
	const struct iovec *iov = i->iov;
	size_t len;
	unsigned long addr;
	void *p;
	int n;
	int res;

	len = iov->iov_len - offset;
	if (len > i->count)
		len = i->count;
	if (len > maxsize)
		len = maxsize;
	addr = (unsigned long)iov->iov_base + offset;
	len += *start = addr & (PAGE_SIZE - 1);
	addr &= ~(PAGE_SIZE - 1);
	n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
	
	p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
	if (!p)
		p = vmalloc(n * sizeof(struct page *));
	if (!p)
		return -ENOMEM;

	res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
	if (unlikely(res < 0)) {
		kvfree(p);
		return res;
	}
	*pages = p;
	return (res == n ? len : res * PAGE_SIZE) - *start;
}
Пример #14
0
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
				 size_t length, u32 flags)
{
	struct tee_device *teedev = ctx->teedev;
	const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
	struct tee_shm *shm;
	void *ret;
	int rc;
	int num_pages;
	unsigned long start;

	if (flags != req_flags)
		return ERR_PTR(-ENOTSUPP);

	if (!tee_device_get(teedev))
		return ERR_PTR(-EINVAL);

	if (!teedev->desc->ops->shm_register ||
	    !teedev->desc->ops->shm_unregister) {
		tee_device_put(teedev);
		return ERR_PTR(-ENOTSUPP);
	}

	teedev_ctx_get(ctx);

	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
	if (!shm) {
		ret = ERR_PTR(-ENOMEM);
		goto err;
	}

	shm->flags = flags | TEE_SHM_REGISTER;
	shm->teedev = teedev;
	shm->ctx = ctx;
	shm->id = -1;
	start = rounddown(addr, PAGE_SIZE);
	shm->offset = addr - start;
	shm->size = length;
	num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
	if (!shm->pages) {
		ret = ERR_PTR(-ENOMEM);
		goto err;
	}

	rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
	if (rc > 0)
		shm->num_pages = rc;
	if (rc != num_pages) {
		if (rc >= 0)
			rc = -ENOMEM;
		ret = ERR_PTR(rc);
		goto err;
	}

	mutex_lock(&teedev->mutex);
	shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
	mutex_unlock(&teedev->mutex);

	if (shm->id < 0) {
		ret = ERR_PTR(shm->id);
		goto err;
	}

	rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
					     shm->num_pages, start);
	if (rc) {
		ret = ERR_PTR(rc);
		goto err;
	}

	if (flags & TEE_SHM_DMA_BUF) {
		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

		exp_info.ops = &tee_shm_dma_buf_ops;
		exp_info.size = shm->size;
		exp_info.flags = O_RDWR;
		exp_info.priv = shm;

		shm->dmabuf = dma_buf_export(&exp_info);
		if (IS_ERR(shm->dmabuf)) {
			ret = ERR_CAST(shm->dmabuf);
			teedev->desc->ops->shm_unregister(ctx, shm);
			goto err;
		}
	}

	mutex_lock(&teedev->mutex);
	list_add_tail(&shm->link, &ctx->list_shm);
	mutex_unlock(&teedev->mutex);

	return shm;
err:
	if (shm) {
		size_t n;

		if (shm->id >= 0) {
			mutex_lock(&teedev->mutex);
			idr_remove(&teedev->idr, shm->id);
			mutex_unlock(&teedev->mutex);
		}
		if (shm->pages) {
			for (n = 0; n < shm->num_pages; n++)
				put_page(shm->pages[n]);
			kfree(shm->pages);
		}
	}
	kfree(shm);
	teedev_ctx_put(ctx);
	tee_device_put(teedev);
	return ret;
}
Пример #15
0
/*
 * @optval points to the userspace buffer that the information snapshot
 * will be copied into.
 *
 * @optlen on input is the size of the buffer in userspace.  @optlen
 * on output is the size of the requested snapshot in bytes.
 *
 * This function returns -errno if there is a failure, particularly -ENOSPC
 * if the given userspace buffer was not large enough to fit the snapshot.
 * On success it returns the positive number of bytes of each array element
 * in the snapshot.
 */
int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
			int __user *optlen)
{
	struct rds_info_iterator iter;
	struct rds_info_lengths lens;
	unsigned long nr_pages = 0;
	unsigned long start;
	unsigned long i;
	rds_info_func func;
	struct page **pages = NULL;
	int ret;
	int len;
	int total;

	if (get_user(len, optlen)) {
		ret = -EFAULT;
		goto out;
	}

	/* check for all kinds of wrapping and the like */
	start = (unsigned long)optval;
	if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
		ret = -EINVAL;
		goto out;
	}

	/* a 0 len call is just trying to probe its length */
	if (len == 0)
		goto call_func;

	nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
			>> PAGE_SHIFT;

	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
	if (pages == NULL) {
		ret = -ENOMEM;
		goto out;
	}
	ret = get_user_pages_fast(start, nr_pages, 1, pages);
	if (ret != nr_pages) {
		if (ret > 0)
			nr_pages = ret;
		else
			nr_pages = 0;
		ret = -EAGAIN; /* XXX ? */
		goto out;
	}

	rdsdebug("len %d nr_pages %lu\n", len, nr_pages);

call_func:
	func = rds_info_funcs[optname - RDS_INFO_FIRST];
	if (func == NULL) {
		ret = -ENOPROTOOPT;
		goto out;
	}

	iter.pages = pages;
	iter.addr = NULL;
	iter.offset = start & (PAGE_SIZE - 1);

	func(sock, len, &iter, &lens);
	BUG_ON(lens.each == 0);

	total = lens.nr * lens.each;

	rds_info_iter_unmap(&iter);

	if (total > len) {
		len = total;
		ret = -ENOSPC;
	} else {
		len = total;
		ret = lens.each;
	}

	if (put_user(len, optlen))
		ret = -EFAULT;

out:
	for (i = 0; pages != NULL && i < nr_pages; i++)
		put_page(pages[i]);
	kfree(pages);

	return ret;
}
Пример #16
0
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write,
			unsigned long gpa, unsigned long hva)
{
	int ret;
	epte_t *epte, flags;
	struct page *page;
	unsigned huge_shift;
	int level;

	ret = get_user_pages_fast(hva, 1, make_write, &page);
	if (ret != 1) {
		ret = ept_set_pfnmap_epte(vcpu, make_write, gpa, hva);
		if (ret)
			printk(KERN_ERR "ept: failed to get user page %lx\n", hva);
		return ret;
	}

	spin_lock(&vcpu->ept_lock);

	huge_shift = compound_order(compound_head(page)) + PAGE_SHIFT;
	level = 0;
	if (huge_shift == 30)
		level = 2;
	else if (huge_shift == 21)
		level = 1;

	ret = ept_lookup_gpa(vcpu, (void *) gpa,
			     level, 1, &epte);
	if (ret) {
		spin_unlock(&vcpu->ept_lock);
		put_page(page);
		printk(KERN_ERR "ept: failed to lookup EPT entry\n");
		return ret;
	}

	if (epte_present(*epte)) {
		if (!epte_big(*epte) && level == 2)
			ept_clear_l2_epte(epte);
		else if (!epte_big(*epte) && level == 1)
			ept_clear_l1_epte(epte);
		else
			ept_clear_epte(epte);
	}

	flags = __EPTE_READ | __EPTE_EXEC |
		__EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT;
	if (make_write)
		flags |= __EPTE_WRITE;
	if (vcpu->ept_ad_enabled) {
		/* premark A/D to avoid extra memory references */
		flags |= __EPTE_A;
		if (make_write)
			flags |= __EPTE_D;
	}

	if (level) {
		struct page *tmp = page;
		page = compound_head(page);
		get_page(page);
		put_page(tmp);

		flags |= __EPTE_SZ;
	}

	*epte = epte_addr(page_to_phys(page)) | flags;

	spin_unlock(&vcpu->ept_lock);

	return 0;
}
Пример #17
0
/*
 * Map an iov into an array of pages and offset/length tupples. With the
 * partial_page structure, we can map several non-contiguous ranges into
 * our ones pages[] map instead of splitting that operation into pieces.
 * Could easily be exported as a generic helper for other users, in which
 * case one would probably want to add a 'max_nr_pages' parameter as well.
 */
static int get_iovec_page_array(const struct iovec __user *iov,
				unsigned int nr_vecs, struct page **pages,
				struct partial_page *partial, int aligned)
{
	int buffers = 0, error = 0;

	while (nr_vecs) {
		unsigned long off, npages;
		void __user *base;
		size_t len;
		int i;

		/*
		 * Get user address base and length for this iovec.
		 */
		error = get_user(base, &iov->iov_base);
		if (unlikely(error))
			break;
		error = get_user(len, &iov->iov_len);
		if (unlikely(error))
			break;

		/*
		 * Sanity check this iovec. 0 read succeeds.
		 */
		if (unlikely(!len))
			break;
		error = -EFAULT;
		if (unlikely(!base))
			break;

		if (unlikely(!access_ok(VERIFY_READ, base, len)))
			break;

		/*
		 * Get this base offset and number of pages, then map
		 * in the user pages.
		 */
		off = (unsigned long) base & ~PAGE_MASK;

		/*
		 * If asked for alignment, the offset must be zero and the
		 * length a multiple of the PAGE_SIZE.
		 */
		error = -EINVAL;
		if (aligned && (off || len & ~PAGE_MASK))
			break;

		npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (npages > PIPE_BUFFERS - buffers)
			npages = PIPE_BUFFERS - buffers;

		error = get_user_pages_fast((unsigned long)base, npages,
					0, &pages[buffers]);

		if (unlikely(error <= 0))
			break;

		/*
		 * Fill this contiguous range into the partial page map.
		 */
		for (i = 0; i < error; i++) {
			const int plen = min_t(size_t, len, PAGE_SIZE - off);

			partial[buffers].offset = off;
			partial[buffers].len = plen;

			off = 0;
			len -= plen;
			buffers++;
		}

		/*
		 * We didn't complete this iov, stop here since it probably
		 * means we have to move some of this into a pipe to
		 * be able to continue.
		 */
		if (len)
			break;

		/*
		 * Don't continue if we mapped fewer pages than we asked for,
		 * or if we mapped the max number of pages that we have
		 * room for.
		 */
		if (error < npages || buffers == PIPE_BUFFERS)
			break;

		nr_vecs--;
		iov++;
	}

	if (buffers)
		return buffers;

	return error;
}
Пример #18
0
static int __gup_benchmark_ioctl(unsigned int cmd,
		struct gup_benchmark *gup)
{
	ktime_t start_time, end_time;
	unsigned long i, nr_pages, addr, next;
	int nr;
	struct page **pages;

	if (gup->size > ULONG_MAX)
		return -EINVAL;

	nr_pages = gup->size / PAGE_SIZE;
	pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
	if (!pages)
		return -ENOMEM;

	i = 0;
	nr = gup->nr_pages_per_call;
	start_time = ktime_get();
	for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) {
		if (nr != gup->nr_pages_per_call)
			break;

		next = addr + nr * PAGE_SIZE;
		if (next > gup->addr + gup->size) {
			next = gup->addr + gup->size;
			nr = (next - addr) / PAGE_SIZE;
		}

		switch (cmd) {
		case GUP_FAST_BENCHMARK:
			nr = get_user_pages_fast(addr, nr, gup->flags & 1,
						 pages + i);
			break;
		case GUP_LONGTERM_BENCHMARK:
			nr = get_user_pages_longterm(addr, nr, gup->flags & 1,
						     pages + i, NULL);
			break;
		case GUP_BENCHMARK:
			nr = get_user_pages(addr, nr, gup->flags & 1, pages + i,
					    NULL);
			break;
		default:
			return -1;
		}

		if (nr <= 0)
			break;
		i += nr;
	}
	end_time = ktime_get();

	gup->get_delta_usec = ktime_us_delta(end_time, start_time);
	gup->size = addr - gup->addr;

	start_time = ktime_get();
	for (i = 0; i < nr_pages; i++) {
		if (!pages[i])
			break;
		put_page(pages[i]);
	}
	end_time = ktime_get();
	gup->put_delta_usec = ktime_us_delta(end_time, start_time);

	kvfree(pages);
	return 0;
}