Example #1
0
/* map an outside memory to an inside memory by task */
static int memory_map_task(const byte *addr, word *size, void **map, byte **new_addr, int write, struct task_struct *task)
{
	word start;
	word offset;
	word end_offset;
	word npages;
	struct page **pages = NULL;
	int ret;

	if (*size == 0) {
		return 0;
	}

	start = ROUNDDOWN((word)addr, PAGE_SIZE);
	offset = ((word)addr) & (PAGE_SIZE - 1);
	end_offset = (((word)addr) + *size) & (PAGE_SIZE - 1);

	npages = ROUNDUP((word)addr + *size, PAGE_SIZE) - start;
	npages /= PAGE_SIZE;

	if (npages == 0) {
		/* integer overflow when rounding up */
		ERROR(-ERROR_MEM);
	}

	pages = memory_alloc(npages * sizeof(struct page *));
	if (NULL == pages) {
		ERROR(-ERROR_MEM);
	}

	ret = get_user_pages_remote(task, task->mm, start, npages, write ? FOLL_WRITE : 0, pages, NULL, NULL);
	if (ret <= 0) {
		memory_free(pages);
		ERROR(-ERROR_POINT);
	}

	if (ret != npages) {
		BUG_ON(ret > npages);

		*size -= ((npages - ret) - 1) * PAGE_SIZE;
		*size -= (end_offset ? end_offset : PAGE_SIZE);
		npages = ret;
	}

	BUG_ON((int)*size < 0);

#ifndef PAGE_KERNEL_RO
	*map = vmap(pages, npages, 0, PAGE_KERNEL);
#else
	*map = vmap(pages, npages, 0, write ? PAGE_KERNEL : PAGE_KERNEL_RO);
#endif
	memory_free(pages);
	if (NULL == *map) {
		ERROR(-ERROR_POINT);
	}

	*new_addr = (byte *)(((word)(*map)) + offset);
	return 0;
}
long gupr_wrapper(struct task_struct *tsk, struct mm_struct *mm,
		  unsigned long start, unsigned long nr_pages,
		  unsigned int gup_flags, struct page **pages,
		  struct vm_area_struct **vmas)
{
    return get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags,
				 pages, vmas);
}
Example #3
0
/*
 * NOTE:
 * Expect the breakpoint instruction to be the smallest size instruction for
 * the architecture. If an arch has variable length instruction and the
 * breakpoint instruction is not of the smallest length instruction
 * supported by that architecture then we need to modify is_trap_at_addr and
 * uprobe_write_opcode accordingly. This would never be a problem for archs
 * that have fixed length instructions.
 *
 * uprobe_write_opcode - write the opcode at a given virtual address.
 * @mm: the probed process address space.
 * @vaddr: the virtual address to store the opcode.
 * @opcode: opcode to be written at @vaddr.
 *
 * Called with mm->mmap_sem held for write.
 * Return 0 (success) or a negative errno.
 */
int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
			uprobe_opcode_t opcode)
{
	struct page *old_page, *new_page;
	struct vm_area_struct *vma;
	int ret;

retry:
	/* Read the page with vaddr into memory */
	ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
			&vma);
	if (ret <= 0)
		return ret;

	ret = verify_opcode(old_page, vaddr, &opcode);
	if (ret <= 0)
		goto put_old;

	ret = anon_vma_prepare(vma);
	if (ret)
		goto put_old;

	ret = -ENOMEM;
	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
	if (!new_page)
		goto put_old;

	__SetPageUptodate(new_page);
	copy_highpage(new_page, old_page);
	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);

	ret = __replace_page(vma, vaddr, old_page, new_page);
	put_page(new_page);
put_old:
	put_page(old_page);

	if (unlikely(ret == -EAGAIN))
		goto retry;
	return ret;
}
Example #4
0
/**
 * crystalhd_map_dio - Map user address for DMA
 * @adp:	Adapter instance
 * @ubuff:	User buffer to map.
 * @ubuff_sz:	User buffer size.
 * @uv_offset:	UV buffer offset.
 * @en_422mode: TRUE:422 FALSE:420 Capture mode.
 * @dir_tx:	TRUE for Tx (To device from host)
 * @dio_hnd:	Handle to mapped DIO request.
 *
 * Return:
 *	Status.
 *
 * This routine maps user address and lock pages for DMA.
 *
 */
BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
			  uint32_t ubuff_sz, uint32_t uv_offset,
			  bool en_422mode, bool dir_tx,
			  struct crystalhd_dio_req **dio_hnd)
{
	struct device *dev;
	struct crystalhd_dio_req	*dio;
	uint32_t start = 0, end = 0, count = 0;
	uint32_t spsz = 0;
	unsigned long uaddr = 0, uv_start = 0;
	int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;

	if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
		printk(KERN_ERR "%s: Invalid arg\n", __func__);
		return BC_STS_INV_ARG;
	}

	dev = &adp->pdev->dev;

	/* Compute pages */
	uaddr = (unsigned long)ubuff;
	count = ubuff_sz;
	end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
	start = uaddr >> PAGE_SHIFT;
	nr_pages = end - start;

	if (!count || ((uaddr + count) < uaddr)) {
		dev_err(dev, "User addr overflow!!\n");
		return BC_STS_INV_ARG;
	}

	dio = crystalhd_alloc_dio(adp);
	if (!dio) {
		dev_err(dev, "dio pool empty..\n");
		return BC_STS_INSUFF_RES;
	}

	if (dir_tx) {
		rw = WRITE;
		dio->direction = DMA_TO_DEVICE;
	} else {
		rw = READ;
		dio->direction = DMA_FROM_DEVICE;
	}

	if (nr_pages > dio->max_pages) {
		dev_err(dev, "max_pages(%d) exceeded(%d)!!\n",
			dio->max_pages, nr_pages);
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_INSUFF_RES;
	}

	if (uv_offset) {
		uv_start = (uaddr + uv_offset)  >> PAGE_SHIFT;
		dio->uinfo.uv_sg_ix = uv_start - start;
		dio->uinfo.uv_sg_off = ((uaddr + uv_offset) & ~PAGE_MASK);
	}

	dio->fb_size = ubuff_sz & 0x03;
	if (dio->fb_size) {
		res = copy_from_user(dio->fb_va,
				     (void *)(uaddr + count - dio->fb_size),
				     dio->fb_size);
		if (res) {
			dev_err(dev, "failed %d to copy %u fill bytes from %p\n",
				res, dio->fb_size,
				(void *)(uaddr + count-dio->fb_size));
			crystalhd_unmap_dio(adp, dio);
			return BC_STS_INSUFF_RES;
		}
	}

	down_read(&current->mm->mmap_sem);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)
	res = get_user_pages(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
			     dio->pages, NULL);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
	res = get_user_pages_remote(current, current->mm, uaddr, nr_pages, rw == READ,
			     0, dio->pages, NULL);
#else
	res = get_user_pages(current, current->mm, uaddr, nr_pages, rw == READ,
			     0, dio->pages, NULL);
#endif

	up_read(&current->mm->mmap_sem);

	/* Save for release..*/
	dio->sig = crystalhd_dio_locked;
	if (res < nr_pages) {
		dev_err(dev, "get pages failed: %d-%d\n", nr_pages, res);
		dio->page_cnt = res;
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_ERROR;
	}

	dio->page_cnt = nr_pages;
	/* Get scatter/gather */
	crystalhd_init_sg(dio->sg, dio->page_cnt);
	crystalhd_set_sg(&dio->sg[0], dio->pages[0], 0, uaddr & ~PAGE_MASK);
	if (nr_pages > 1) {
		dio->sg[0].length = PAGE_SIZE - dio->sg[0].offset;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
		dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
		count -= dio->sg[0].length;
		for (i = 1; i < nr_pages; i++) {
			if (count < 4) {
				spsz = count;
				skip_fb_sg = 1;
			} else {
				spsz = (count < PAGE_SIZE) ?
					(count & ~0x03) : PAGE_SIZE;
			}
			crystalhd_set_sg(&dio->sg[i], dio->pages[i], spsz, 0);
			count -= spsz;
		}
	} else {
		if (count < 4) {
			dio->sg[0].length = count;
			skip_fb_sg = 1;
		} else {
			dio->sg[0].length = count - dio->fb_size;
		}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 23)
#ifdef CONFIG_X86_64
		dio->sg[0].dma_length = dio->sg[0].length;
#endif
#endif
	}
	dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
				 dio->page_cnt, dio->direction);
	if (dio->sg_cnt <= 0) {
		dev_err(dev, "sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
		crystalhd_unmap_dio(adp, dio);
		return BC_STS_ERROR;
	}
	if (dio->sg_cnt && skip_fb_sg)
		dio->sg_cnt -= 1;
	dio->sig = crystalhd_dio_sg_mapped;
	/* Fill in User info.. */
	dio->uinfo.xfr_len   = ubuff_sz;
	dio->uinfo.xfr_buff  = ubuff;
	dio->uinfo.uv_offset = uv_offset;
	dio->uinfo.b422mode  = en_422mode;
	dio->uinfo.dir_tx    = dir_tx;

	*dio_hnd = dio;

	return BC_STS_SUCCESS;
}
Example #5
0
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
	struct get_pages_work *work = container_of(_work, typeof(*work), work);
	struct drm_i915_gem_object *obj = work->obj;
	const int npages = obj->base.size >> PAGE_SHIFT;
	struct page **pvec;
	int pinned, ret;

	ret = -ENOMEM;
	pinned = 0;

	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (pvec != NULL) {
		struct mm_struct *mm = obj->userptr.mm->mm;
		unsigned int flags = 0;

		if (!i915_gem_object_is_readonly(obj))
			flags |= FOLL_WRITE;

		ret = -EFAULT;
		if (mmget_not_zero(mm)) {
			down_read(&mm->mmap_sem);
			while (pinned < npages) {
				ret = get_user_pages_remote
					(work->task, mm,
					 obj->userptr.ptr + pinned * PAGE_SIZE,
					 npages - pinned,
					 flags,
					 pvec + pinned, NULL, NULL);
				if (ret < 0)
					break;

				pinned += ret;
			}
			up_read(&mm->mmap_sem);
			mmput(mm);
		}
	}

	mutex_lock(&obj->mm.lock);
	if (obj->userptr.work == &work->work) {
		struct sg_table *pages = ERR_PTR(ret);

		if (pinned == npages) {
			pages = __i915_gem_userptr_alloc_pages(obj, pvec,
							       npages);
			if (!IS_ERR(pages)) {
				pinned = 0;
				pages = NULL;
			}
		}

		obj->userptr.work = ERR_CAST(pages);
		if (IS_ERR(pages))
			__i915_gem_userptr_set_active(obj, false);
	}
	mutex_unlock(&obj->mm.lock);

	release_pages(pvec, pinned);
	kvfree(pvec);

	i915_gem_object_put(obj);
	put_task_struct(work->task);
	kfree(work);
}