Exemplo n.º 1
0
/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
	       __func__, buf->num_pages);
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
			   &attrs);
	if (buf->vaddr)
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(buf->dma_sgt);
	while (--i >= 0) {
		if (buf->dma_dir == DMA_FROM_DEVICE)
			set_page_dirty_lock(buf->pages[i]);
		if (!vma_is_io(buf->vma))
			put_page(buf->pages[i]);
	}
	kfree(buf->pages);
	vb2_put_vma(buf->vma);
	kfree(buf);
}
Exemplo n.º 2
0
static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
{
	int write;
	int dirty;
	struct page *page;
	int i;

	BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
	write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
	dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);

	for (i = 0; i < ttm->num_pages; ++i) {
		page = ttm->pages[i];
		if (page == NULL)
			continue;

		if (page == ttm->dummy_read_page) {
			BUG_ON(write);
			continue;
		}

		if (write && dirty && !PageReserved(page))
			set_page_dirty_lock(page);

		ttm->pages[i] = NULL;
		put_page(page);
	}
}
Exemplo n.º 3
0
/**
 * process_vm_rw_pages - read/write pages from task specified
 * @pages: array of pointers to pages we want to copy
 * @start_offset: offset in page to start copying from/to
 * @len: number of bytes to copy
 * @iter: where to copy to/from locally
 * @vm_write: 0 means copy from, 1 means copy to
 * Returns 0 on success, error code otherwise
 */
static int process_vm_rw_pages(struct page **pages,
			       unsigned offset,
			       size_t len,
			       struct iov_iter *iter,
			       int vm_write)
{
	/* Do the copy for each page */
	while (len && iov_iter_count(iter)) {
		struct page *page = *pages++;
		size_t copy = PAGE_SIZE - offset;
		size_t copied;

		if (copy > len)
			copy = len;

		if (vm_write) {
			copied = copy_page_from_iter(page, offset, copy, iter);
			set_page_dirty_lock(page);
		} else {
			copied = copy_page_to_iter(page, offset, copy, iter);
		}
		len -= copied;
		if (copied < copy && iov_iter_count(iter))
			return -EFAULT;
		offset = 0;
	}
	return 0;
}
Exemplo n.º 4
0
unsigned long __copy_to_user_ll(void __user *to, const void *from,
				unsigned long n)
{
	BUG_ON((long) n < 0);
#ifndef CONFIG_X86_WP_WORKS_OK
	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
			((unsigned long )to) < TASK_SIZE) {
		/* 
		 * CPU does not honor the WP bit when writing
		 * from supervisory mode, and due to preemption or SMP,
		 * the page tables can change at any time.
		 * Do it manually.	Manfred <*****@*****.**>
		 */
		while (n) {
		      	unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
			unsigned long len = PAGE_SIZE - offset;
			int retval;
			struct page *pg;
			void *maddr;
			
			if (len > n)
				len = n;

survive:
			down_read(&current->mm->mmap_sem);
			retval = get_user_pages(current, current->mm,
					(unsigned long )to, 1, 1, 0, &pg, NULL);

			if (retval == -ENOMEM && current->pid == 1) {
				up_read(&current->mm->mmap_sem);
				blk_congestion_wait(WRITE, HZ/50);
				goto survive;
			}

			if (retval != 1) {
				up_read(&current->mm->mmap_sem);
		       		break;
		       	}

			maddr = kmap_atomic(pg, KM_USER0);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr, KM_USER0);
			set_page_dirty_lock(pg);
			put_page(pg);
			up_read(&current->mm->mmap_sem);

			from += len;
			to += len;
			n -= len;
		}
		return n;
	}
#endif
	if (movsl_is_ok(to, from, n))
		__copy_user(to, from, n);
	else
		n = __copy_user_intel(to, from, n);
	return n;
}
Exemplo n.º 5
0
Arquivo: ept.c Projeto: jyizheng/dune
static void free_ept_page(epte_t epte)
{
	struct page *page = pfn_to_page(epte_addr(epte) >> PAGE_SHIFT);

	if (epte & __EPTE_WRITE)
		set_page_dirty_lock(page);
	put_page(page);
}
Exemplo n.º 6
0
unsigned long __copy_to_user_ll(void __user *to, const void *from,
				unsigned long n)
{
#ifndef CONFIG_X86_WP_WORKS_OK
	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
			((unsigned long)to) < TASK_SIZE) {
		
		if (in_atomic())
			return n;

		
		while (n) {
			unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
			unsigned long len = PAGE_SIZE - offset;
			int retval;
			struct page *pg;
			void *maddr;

			if (len > n)
				len = n;

survive:
			down_read(&current->mm->mmap_sem);
			retval = get_user_pages(current, current->mm,
					(unsigned long)to, 1, 1, 0, &pg, NULL);

			if (retval == -ENOMEM && is_global_init(current)) {
				up_read(&current->mm->mmap_sem);
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto survive;
			}

			if (retval != 1) {
				up_read(&current->mm->mmap_sem);
				break;
			}

			maddr = kmap_atomic(pg, KM_USER0);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr, KM_USER0);
			set_page_dirty_lock(pg);
			put_page(pg);
			up_read(&current->mm->mmap_sem);

			from += len;
			to += len;
			n -= len;
		}
		return n;
	}
#endif
	if (movsl_is_ok(to, from, n))
		__copy_user(to, from, n);
	else
		n = __copy_user_intel(to, from, n);
	return n;
}
Exemplo n.º 7
0
/**
 * nfs_free_user_pages - tear down page struct array
 * @pages: array of page struct pointers underlying target buffer
 */
static void
nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
{
	int i;
	for (i = 0; i < npages; i++) {
		if (do_dirty)
			set_page_dirty_lock(pages[i]);
		page_cache_release(pages[i]);
	}
	kfree(pages);
}
Exemplo n.º 8
0
void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
{
	int i;

	for (i = 0; i < num_pages; i++) {
		if (dirty)
			set_page_dirty_lock(pages[i]);
		put_page(pages[i]);
	}
	kvfree(pages);
}
Exemplo n.º 9
0
static void fuse_release_user_pages(struct fuse_req *req, int write)
{
	unsigned i;

	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (write)
			set_page_dirty_lock(page);
		put_page(page);
	}
}
static void __qib_release_user_pages(struct page **p, size_t num_pages,
				     int dirty)
{
	size_t i;

	for (i = 0; i < num_pages; i++) {
		if (dirty)
			set_page_dirty_lock(p[i]);
		put_page(p[i]);
	}
}
Exemplo n.º 11
0
static void free_ept_page(epte_t epte)
{
	struct page *page = pfn_to_page(epte_addr(epte) >> PAGE_SHIFT);

	/* PFN mapppings are not backed by pages. */
	if (epte & __EPTE_PFNMAP)
		return;

	if (epte & __EPTE_WRITE)
		set_page_dirty_lock(page);
	put_page(page);
}
Exemplo n.º 12
0
/* Unmap and put previous page of userspace buffer */
static void fuse_copy_finish(struct fuse_copy_state *cs)
{
	if (cs->mapaddr) {
		kunmap_atomic(cs->mapaddr, KM_USER0);
		if (cs->write) {
			flush_dcache_page(cs->pg);
			set_page_dirty_lock(cs->pg);
		}
		put_page(cs->pg);
		cs->mapaddr = NULL;
	}
}
Exemplo n.º 13
0
static int cfs_access_process_vm(struct task_struct *tsk,
				 struct mm_struct *mm,
				 unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
	 * which is already holding mmap_sem for writes.  If some other
	 * thread gets the write lock in the meantime, this thread will
	 * block, but at least it won't deadlock on itself.  LU-1735 */
	if (down_read_trylock(&mm->mmap_sem) == 0)
		return -EDEADLK;

	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);

	return buf - old_buf;
}
Exemplo n.º 14
0
static void __qib_release_user_pages(struct page **p, size_t num_pages,
				     int dirty)
{
	size_t i;

	for (i = 0; i < num_pages; i++) {
		qib_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
			 (unsigned long) num_pages, p[i]);
		if (dirty)
			set_page_dirty_lock(p[i]);
		put_page(p[i]);
	}
}
Exemplo n.º 15
0
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);

	return buf - old_buf;
}
Exemplo n.º 16
0
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
{
	int i, j;

	if (!pinned_list)
		return;

	for (i = 0; i < pinned_list->nr_iovecs; i++) {
		struct dma_page_list *page_list = &pinned_list->page_list[i];
		for (j = 0; j < page_list->nr_pages; j++) {
			set_page_dirty_lock(page_list->pages[j]);
			page_cache_release(page_list->pages[j]);
		}
	}

	kfree(pinned_list);
}
Exemplo n.º 17
0
Arquivo: rw26.c Projeto: rread/lustre
/*  ll_free_user_pages - tear down page struct array
 *  @pages: array of page struct pointers underlying target buffer */
static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
{
    int i;

    for (i = 0; i < npages; i++) {
        if (pages[i] == NULL)
            break;
        if (do_dirty)
            set_page_dirty_lock(pages[i]);
        put_page(pages[i]);
    }

#if defined(HAVE_DIRECTIO_ITER) || defined(HAVE_IOV_ITER_RW)
    kvfree(pages);
#else
    OBD_FREE_LARGE(pages, npages * sizeof(*pages));
#endif
}
Exemplo n.º 18
0
/* TODO: This is really inefficient.  We need something like get_user()
 * (instruction directly accesses the data, with an exception table entry
 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 */
static int set_bit_to_user(int nr, void __user *addr)
{
	unsigned long log = (unsigned long)addr;
	struct page *page;
	void *base;
	int bit = nr + (log % PAGE_SIZE) * 8;
	int r;
	r = get_user_pages_fast(log, 1, 1, &page);
	if (r < 0)
		return r;
	BUG_ON(r != 1);
	base = kmap_atomic(page, KM_USER0);
	set_bit(bit, base);
	kunmap_atomic(base, KM_USER0);
	set_page_dirty_lock(page);
	put_page(page);
	return 0;
}
static void EplApiProcessImagePutUserPages(
    struct page** ppPage_p, BOOL fDirty_p)
{
    unsigned int    nIndex;

    for (nIndex = 0; nIndex < EPL_API_PI_PAGE_COUNT; nIndex++)
    {
        if (ppPage_p[nIndex] == NULL)
        {
            break;
        }
        if (fDirty_p != FALSE)
        {
            set_page_dirty_lock(ppPage_p[nIndex]);
        }
        put_page(ppPage_p[nIndex]);
        ppPage_p[nIndex] = NULL;
    }
}
/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	int i = buf->num_pages;

	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
	       __func__, buf->num_pages);
	if (buf->vaddr)
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(&buf->sg_table);
	while (--i >= 0) {
		if (buf->write)
			set_page_dirty_lock(buf->pages[i]);
		if (!vma_is_io(buf->vma))
			put_page(buf->pages[i]);
	}
	kfree(buf->pages);
	vb2_put_vma(buf->vma);
	kfree(buf);
}
Exemplo n.º 21
0
static void vb2_vmalloc_put_userptr(void *buf_priv)
{
	struct vb2_vmalloc_buf *buf = buf_priv;
	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
	unsigned int i;

	if (buf->pages) {
		if (vaddr)
			vm_unmap_ram((void *)vaddr, buf->n_pages);
		for (i = 0; i < buf->n_pages; ++i) {
			if (buf->dma_dir == DMA_FROM_DEVICE)
				set_page_dirty_lock(buf->pages[i]);
			put_page(buf->pages[i]);
		}
		kfree(buf->pages);
	} else {
		vb2_put_vma(buf->vma);
		iounmap((__force void __iomem *)buf->vaddr);
	}
	kfree(buf);
}
Exemplo n.º 22
0
static void vb2_vmalloc_put_userptr(void *buf_priv)
{
	struct vb2_vmalloc_buf *buf = buf_priv;
	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
	unsigned int i;
	struct page **pages;
	unsigned int n_pages;

	if (!buf->vec->is_pfns) {
		n_pages = frame_vector_count(buf->vec);
		pages = frame_vector_pages(buf->vec);
		if (vaddr)
			vm_unmap_ram((void *)vaddr, n_pages);
		if (buf->dma_dir == DMA_FROM_DEVICE)
			for (i = 0; i < n_pages; i++)
				set_page_dirty_lock(pages[i]);
	} else {
		iounmap((__force void __iomem *)buf->vaddr);
	}
	vb2_destroy_framevec(buf->vec);
	kfree(buf);
}
Exemplo n.º 23
0
static void vb2_vmalloc_put_userptr(void *buf_priv)
{
	struct vb2_vmalloc_buf *buf = buf_priv;
	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
	unsigned int i;

	if (buf->pages) {
		if (vaddr)
			vm_unmap_ram((void *)vaddr, buf->n_pages);
		for (i = 0; i < buf->n_pages; ++i) {
			if (buf->write)
				set_page_dirty_lock(buf->pages[i]);
			put_page(buf->pages[i]);
		}
		kfree(buf->pages);
	} else {
		if (buf->vma)
			vb2_put_vma(buf->vma);
		iounmap(buf->vaddr);
	}
	kfree(buf);
}
Exemplo n.º 24
0
/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);

	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
	       __func__, buf->num_pages);
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
			   &attrs);
	if (buf->vaddr)
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(buf->dma_sgt);
	while (--i >= 0) {
		if (buf->dma_dir == DMA_FROM_DEVICE)
			set_page_dirty_lock(buf->pages[i]);
	}
	vb2_destroy_framevec(buf->vec);
	kfree(buf);
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
	int write;
	int dirty;
	struct page *page;
	int i;
	struct ttm_backend *be = ttm->be;

	BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
	write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
	dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);

	if (be)
		be->func->clear(be);

	for (i = 0; i < ttm->num_pages; ++i) {
		page = ttm->pages[i];
		if (page == NULL)
			continue;

		if (page == ttm->dummy_read_page) {
			BUG_ON(write);
			continue;
		}

		if (write && dirty && !PageReserved(page))
			set_page_dirty_lock(page);

		ttm->pages[i] = NULL;
		ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
		put_page(page);
	}
	ttm->state = tt_unpopulated;
	ttm->first_himem_page = ttm->num_pages;
	ttm->last_lomem_page = -1;
}
Exemplo n.º 26
0
/* pvfs_bufmap_copy_to_user_task_iovec()
 *
 * copies data out of a mapped buffer to a vector of user space address
 * of a given task specified by the task structure argument (tsk)
 * This is used by the client-daemon for completing an aio
 * operation that was issued by an arbitrary user program.
 * Unfortunately, we cannot use a copy_to_user
 * in that case and need to map in the user pages before
 * attempting the copy!
 *
 * NOTE: There is no need for an analogous copy from user task since
 * the data buffers get copied in the context of the process initiating
 * the write system call!
 *
 * Returns number of bytes copied on success, -errno on failure.
 */
size_t pvfs_bufmap_copy_to_user_task_iovec(
        struct task_struct *tsk,
        struct iovec *iovec, unsigned long nr_segs,
        int buffer_index,
        size_t size_to_be_copied)
{
    size_t ret = 0, amt_copied = 0, cur_copy_size = 0;
    int from_page_index = 0;
    void *from_kaddr = NULL;
    struct iovec *copied_iovec = NULL;
    struct pvfs_bufmap_desc *from = &desc_array[buffer_index];

    struct mm_struct *mm = NULL;
    struct vm_area_struct *vma = NULL;
    struct page *page = NULL;
    unsigned long to_addr = 0;
    void *maddr = NULL;
    unsigned int to_offset = 0;
    unsigned int seg, from_page_offset = 0;

    gossip_debug(GOSSIP_BUFMAP_DEBUG, "pvfs_bufmap_copy_to_user_task_iovec: "
            " PID: %d, iovec %p, from %p, index %d, "
            " size %zd\n", tsk->pid, iovec, from, buffer_index, size_to_be_copied);

    down_read(&bufmap_init_sem);
    if (bufmap_init == 0)
    {
        gossip_err("pvfs2_bufmap_copy_to_user: not yet "
                    "initialized.\n");
        gossip_err("pvfs2: please confirm that pvfs2-client "
                "daemon is running.\n");
        up_read(&bufmap_init_sem);
        return -EIO;
    }
    /*
     * copy the passed in iovec so that we can change some of its fields
     */
    copied_iovec = kmalloc(nr_segs * sizeof(*copied_iovec),
                           PVFS2_BUFMAP_GFP_FLAGS);
    if (copied_iovec == NULL)
    {
        gossip_err("pvfs_bufmap_copy_to_user_iovec: failed allocating memory\n");
        up_read(&bufmap_init_sem);
        return -ENOMEM;
    }
    memcpy(copied_iovec, iovec, nr_segs * sizeof(*copied_iovec));
    /*
     * Go through each segment in the iovec and make sure that
     * the summation of iov_len is greater than the given size.
     */
    for (seg = 0, amt_copied = 0; seg < nr_segs; seg++)
    {
        amt_copied += copied_iovec[seg].iov_len;
    }
    if (amt_copied < size_to_be_copied)
    {
        gossip_err("pvfs_bufmap_copy_to_user_task_iovec: computed total (%zd) "
                "is less than (%zd)\n", amt_copied, size_to_be_copied);
        kfree(copied_iovec);
        up_read(&bufmap_init_sem);
        return -EINVAL;
    }
    mm = get_task_mm(tsk);
    if (!mm) 
    {
        kfree(copied_iovec);
        up_read(&bufmap_init_sem);
        return -EIO;
    }
    from_page_index = 0;
    amt_copied = 0;
    seg = 0;
    from_page_offset = 0;
    /* 
     * Go through each of the page in the specified process
     * address space and copy from the mapped
     * buffer, and make sure to do this one page at a time!
     */
    down_read(&mm->mmap_sem);
    while (amt_copied < size_to_be_copied)
    {
        int inc_from_page_index = 0;
	struct iovec *iv = &copied_iovec[seg];

        if (iv->iov_len < (PAGE_SIZE - from_page_offset))
        {
            cur_copy_size = PVFS_util_min(iv->iov_len, size_to_be_copied - amt_copied);
            seg++;
            to_addr = (unsigned long) iv->iov_base;
            inc_from_page_index = 0;
        }
        else if (iv->iov_len == (PAGE_SIZE - from_page_offset))
        {
            cur_copy_size = PVFS_util_min(iv->iov_len, size_to_be_copied - amt_copied);
            seg++;
            to_addr = (unsigned long) iv->iov_base;
            inc_from_page_index = 1;
        }
        else 
        {
            cur_copy_size = PVFS_util_min(PAGE_SIZE - from_page_offset, size_to_be_copied - amt_copied);
            to_addr = (unsigned long) iv->iov_base;
            iv->iov_base += cur_copy_size;
            iv->iov_len  -= cur_copy_size;
            inc_from_page_index = 1;
        }
        ret = get_user_pages(tsk, mm, to_addr, 
                1,/* count */
                1,/* write */
                1,/* force */
                &page, &vma);
        if (ret <= 0)
            break;
        to_offset = to_addr & (PAGE_SIZE - 1);
        maddr = pvfs2_kmap(page);
        from_kaddr = pvfs2_kmap(from->page_array[from_page_index]);
        copy_to_user_page(vma, page, to_addr,
             maddr + to_offset /* dst */, 
             from_kaddr + from_page_offset, /* src */
             cur_copy_size /* len */);
        set_page_dirty_lock(page);
        pvfs2_kunmap(from->page_array[from_page_index]);
        pvfs2_kunmap(page);
        page_cache_release(page);

        amt_copied += cur_copy_size;
        if (inc_from_page_index)
        {
            from_page_offset = 0;
            from_page_index++;
        }
        else 
        {
            from_page_offset += cur_copy_size;
        }
    }
    up_read(&mm->mmap_sem);
    mmput(mm);
    up_read(&bufmap_init_sem);
    kfree(copied_iovec);
    return (amt_copied < size_to_be_copied) ? -EFAULT: amt_copied;
}
Exemplo n.º 27
0
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, ret, offset;
		void *maddr;
		unsigned long paddr;
		int xip = 0;
#ifdef CONFIG_CRAMFS_XIP_DEBUGGABLE
		if (xip_enable_debug && !write) {
			vma = find_extend_vma(mm, addr);
			if (vma && (vma->vm_flags & VM_XIP))
				xip = find_xip_untouched_entry(mm, addr, &paddr);
		}
#endif
		if (xip) {
			maddr = ioremap(paddr, PAGE_SIZE);
			if (!maddr) 
				break;
			page = NULL;
		} else {
			ret = get_user_pages(tsk, mm, addr, 1,
					     write, 1, &page, &vma);
			if (ret <= 0)
				break;
			maddr = kmap(page);
		}
		
		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		
		if (xip) 
			iounmap(maddr);
		else {
			kunmap(page);
			page_cache_release(page);
		}

		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);
	
	return buf - old_buf;
}
/**
 * process_vm_rw_pages - read/write pages from task specified
 * @task: task to read/write from
 * @mm: mm for task
 * @process_pages: struct pages area that can store at least
 *  nr_pages_to_copy struct page pointers
 * @pa: address of page in task to start copying from/to
 * @start_offset: offset in page to start copying from/to
 * @len: number of bytes to copy
 * @lvec: iovec array specifying where to copy to/from
 * @lvec_cnt: number of elements in iovec array
 * @lvec_current: index in iovec array we are up to
 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
 * @vm_write: 0 means copy from, 1 means copy to
 * @nr_pages_to_copy: number of pages to copy
 * @bytes_copied: returns number of bytes successfully copied
 * Returns 0 on success, error code otherwise
 */
static int process_vm_rw_pages(struct task_struct *task,
			       struct mm_struct *mm,
			       struct page **process_pages,
			       unsigned long pa,
			       unsigned long start_offset,
			       unsigned long len,
			       const struct iovec *lvec,
			       unsigned long lvec_cnt,
			       unsigned long *lvec_current,
			       size_t *lvec_offset,
			       int vm_write,
			       unsigned int nr_pages_to_copy,
			       ssize_t *bytes_copied)
{
	int pages_pinned;
	void *target_kaddr;
	int pgs_copied = 0;
	int j;
	int ret;
	ssize_t bytes_to_copy;
	ssize_t rc = 0;

	*bytes_copied = 0;

	/* Get the pages we're interested in */
	down_read(&mm->mmap_sem);
	pages_pinned = get_user_pages(task, mm, pa,
				      nr_pages_to_copy,
				      vm_write, 0, process_pages, NULL);
	up_read(&mm->mmap_sem);

	if (pages_pinned != nr_pages_to_copy) {
		rc = -EFAULT;
		goto end;
	}

	/* Do the copy for each page */
	for (pgs_copied = 0;
	     (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
	     pgs_copied++) {
		/* Make sure we have a non zero length iovec */
		while (*lvec_current < lvec_cnt
		       && lvec[*lvec_current].iov_len == 0)
			(*lvec_current)++;
		if (*lvec_current == lvec_cnt)
			break;

		/*
		 * Will copy smallest of:
		 * - bytes remaining in page
		 * - bytes remaining in destination iovec
		 */
		bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
				      len - *bytes_copied);
		bytes_to_copy = min_t(ssize_t, bytes_to_copy,
				      lvec[*lvec_current].iov_len
				      - *lvec_offset);

		target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;

		if (vm_write)
			ret = copy_from_user(target_kaddr,
					     lvec[*lvec_current].iov_base
					     + *lvec_offset,
					     bytes_to_copy);
		else
			ret = copy_to_user(lvec[*lvec_current].iov_base
					   + *lvec_offset,
					   target_kaddr, bytes_to_copy);
		kunmap(process_pages[pgs_copied]);
		if (ret) {
			*bytes_copied += bytes_to_copy - ret;
			pgs_copied++;
			rc = -EFAULT;
			goto end;
		}
		*bytes_copied += bytes_to_copy;
		*lvec_offset += bytes_to_copy;
		if (*lvec_offset == lvec[*lvec_current].iov_len) {
			/*
			 * Need to copy remaining part of page into the
			 * next iovec if there are any bytes left in page
			 */
			(*lvec_current)++;
			*lvec_offset = 0;
			start_offset = (start_offset + bytes_to_copy)
				% PAGE_SIZE;
			if (start_offset)
				pgs_copied--;
		} else {
			start_offset = 0;
		}
	}

end:
	if (vm_write) {
		for (j = 0; j < pages_pinned; j++) {
			if (j < pgs_copied)
				set_page_dirty_lock(process_pages[j]);
			put_page(process_pages[j]);
		}
	} else {
		for (j = 0; j < pages_pinned; j++)
			put_page(process_pages[j]);
	}

	return rc;
}