Ejemplo n.º 1
0
/**
    \param cmd ignored
    \param data the user-specified address
    \return mfn corresponding to "data" argument, or -1 on error
*/
static long u2mfn_ioctl(struct file *f, unsigned int cmd,
		       unsigned long data)
{
	struct page *user_page;
	void *kaddr;
	long ret;

	if (_IOC_TYPE(cmd) != U2MFN_MAGIC) {
		printk("Qubes u2mfn: wrong IOCTL magic");
		return -ENOTTY;
	}

	switch (cmd) {
	case U2MFN_GET_MFN_FOR_PAGE:
		down_read(&current->mm->mmap_sem);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
		ret = get_user_pages
			(data, 1, (FOLL_WRITE | FOLL_FORCE), &user_page, 0);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
		ret = get_user_pages
		    (data, 1, 1, 0, &user_page, 0);
#else
		ret = get_user_pages
		    (current, current->mm, data, 1, 1, 0, &user_page, 0);
#endif
		up_read(&current->mm->mmap_sem);
		if (ret != 1) {
			printk("U2MFN_GET_MFN_FOR_PAGE: get_user_pages failed, ret=0x%lx\n", ret);
			return -1;
		}
		kaddr = kmap(user_page);
		ret = VIRT_TO_MFN(kaddr);
		kunmap(user_page);
		put_page(user_page);
		break;

	case U2MFN_GET_LAST_MFN:
		if (f->private_data)
			ret = VIRT_TO_MFN(f->private_data);
		else
			ret = 0;
		break;

	default:
		printk("Qubes u2mfn: wrong ioctl passed!\n");
		return -ENOTTY;
	}


	return ret;
}
Ejemplo n.º 2
0
static void async_pf_execute(struct work_struct *work)
{
	struct page *page = NULL;
	struct vmmr0_async_pf *apf =
		container_of(work, struct vmmr0_async_pf, work);
	struct mm_struct *mm = apf->mm;
	struct vmmr0_vcpu *vcpu = apf->vcpu;
	unsigned long addr = apf->addr;

	might_sleep();

	vmmr0_use_mm(mm);
	down_read(&mm->mmap_sem);
	get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
	up_read(&mm->mmap_sem);
	vmmr0_unuse_mm(mm);

	spin_lock(&vcpu->async_pf.lock);
	list_add_tail(&apf->link, &vcpu->async_pf.done);
	apf->page = page;
	apf->done = true;
	spin_unlock(&vcpu->async_pf.lock);

	if (waitqueue_active(&vcpu->wq))
		wake_up_interruptible(&vcpu->wq);

	mmdrop(mm);
	vmmr0_put_vm(vcpu->pvm);
}
Ejemplo n.º 3
0
/**
 * nfs_get_user_pages - find and set up pages underlying user's buffer
 * rw: direction (read or write)
 * user_addr: starting address of this segment of user's buffer
 * count: size of this segment
 * @pages: returned array of page struct pointers underlying user's buffer
 */
static inline int
nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
		struct page ***pages)
{
	int result = -ENOMEM;
	unsigned long page_count;
	size_t array_size;

	/* set an arbitrary limit to prevent arithmetic overflow */
	if (size > MAX_DIRECTIO_SIZE)
		return -EFBIG;

	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	page_count -= user_addr >> PAGE_SHIFT;

	array_size = (page_count * sizeof(struct page *));
	*pages = kmalloc(array_size, GFP_KERNEL);
	if (*pages) {
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					page_count, (rw == READ), 0,
					*pages, NULL);
		up_read(&current->mm->mmap_sem);
	}
	return result;
}
Ejemplo n.º 4
0
/*
 * vpif_uservirt_to_phys: This function is used to convert user
 * space virtual address to physical address.
 */
static u32 vpif_uservirt_to_phys(u32 virtp)
{
	struct mm_struct *mm = current->mm;
	unsigned long physp = 0;
	struct vm_area_struct *vma;

	vma = find_vma(mm, virtp);

	/* For kernel direct-mapped memory, take the easy way */
	if (virtp >= PAGE_OFFSET) {
		physp = virt_to_phys((void *)virtp);
	} else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff)) {
		/* this will catch, kernel-allocated, mmaped-to-usermode addr */
		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
	} else {
		/* otherwise, use get_user_pages() for general userland pages */
		int res, nr_pages = 1;
		struct page *pages;
		down_read(&current->mm->mmap_sem);

		res = get_user_pages(current, current->mm,
				     virtp, nr_pages, 1, 0, &pages, NULL);
		up_read(&current->mm->mmap_sem);

		if (res == nr_pages) {
			physp = __pa(page_address(&pages[0]) +
							(virtp & ~PAGE_MASK));
		} else {
			vpif_err("get_user_pages failed\n");
			return 0;
		}
	}

	return physp;
}
static inline u32 vpif_uservirt_to_phys(u32 virtp)
{
	unsigned long physp = 0;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

	vma = find_vma(mm, virtp);

	
	if (virtp >= PAGE_OFFSET)
		physp = virt_to_phys((void *)virtp);
	else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff))
		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
	else {
		
		int res, nr_pages = 1;
			struct page *pages;

		down_read(&current->mm->mmap_sem);

		res = get_user_pages(current, current->mm,
				     virtp, nr_pages, 1, 0, &pages, NULL);
		up_read(&current->mm->mmap_sem);

		if (res == nr_pages)
			physp = __pa(page_address(&pages[0]) +
				     (virtp & ~PAGE_MASK));
		else {
			vpif_err("get_user_pages failed\n");
			return 0;
		}
	}
	return physp;
}
Ejemplo n.º 6
0
static void async_pf_execute(struct work_struct *work)
{
	struct kvm_async_pf *apf =
		container_of(work, struct kvm_async_pf, work);
	struct mm_struct *mm = apf->mm;
	struct kvm_vcpu *vcpu = apf->vcpu;
	unsigned long addr = apf->addr;
	gva_t gva = apf->gva;

	might_sleep();

	use_mm(mm);
	down_read(&mm->mmap_sem);
	get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
	up_read(&mm->mmap_sem);
	unuse_mm(mm);

	spin_lock(&vcpu->async_pf.lock);
	list_add_tail(&apf->link, &vcpu->async_pf.done);
	spin_unlock(&vcpu->async_pf.lock);

	/*
	 * apf may be freed by kvm_check_async_pf_completion() after
	 * this point
	 */

	trace_kvm_async_pf_completed(addr, gva);

	if (waitqueue_active(&vcpu->wq))
		wake_up_interruptible(&vcpu->wq);

	mmput(mm);
	kvm_put_kvm(vcpu->kvm);
}
Ejemplo n.º 7
0
static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
{
	int ret;
	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 
		first_pfn + 1;
	
	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
		return DRM_ERR(ENOMEM);
	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
	down_read(&current->mm->mmap_sem);
	ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
			     vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE), 
			     0, vsg->pages, NULL);

	up_read(&current->mm->mmap_sem);
	if (ret != vsg->num_pages) {
		if (ret < 0) 
			return ret;
		vsg->state = dr_via_pages_locked;
		return DRM_ERR(EINVAL);
	}
	vsg->state = dr_via_pages_locked;
	DRM_DEBUG("DMA pages locked\n");
	return 0;
}
Ejemplo n.º 8
0
int drm_ttm_set_user(struct drm_ttm *ttm,
		     struct task_struct *tsk,
		     int write,
		     unsigned long start,
		     unsigned long num_pages,
		     struct page *dummy_read_page)
{
	struct mm_struct *mm = tsk->mm;
	int ret;
	int i;

	BUG_ON(num_pages != ttm->num_pages);

	ttm->dummy_read_page = dummy_read_page;
	ttm->page_flags |= DRM_TTM_PAGE_USER |
		((write) ? DRM_TTM_PAGE_USER_WRITE : 0);


	down_read(&mm->mmap_sem);
	ret = get_user_pages(tsk, mm, start, num_pages,
			     write, 0, ttm->pages, NULL);
	up_read(&mm->mmap_sem);

	if (ret != num_pages && write) {
		drm_ttm_free_user_pages(ttm);
		return -ENOMEM;
	}

	for (i = 0; i < num_pages; ++i) {
		if (ttm->pages[i] == NULL)
			ttm->pages[i] = ttm->dummy_read_page;
	}

	return 0;
}
Ejemplo n.º 9
0
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages)
{
	struct mm_struct *mm = current->mm;
	int nr, ret;

	start &= PAGE_MASK;
	nr = __get_user_pages_fast(start, nr_pages, write, pages);
	ret = nr;

	if (nr < nr_pages) {
		pr_devel("  slow path ! nr = %d\n", nr);

		/* Try to get the remaining pages with get_user_pages */
		start += nr << PAGE_SHIFT;
		pages += nr;

		down_read(&mm->mmap_sem);
		ret = get_user_pages(current, mm, start,
				     nr_pages - nr, write, 0, pages, NULL);
		up_read(&mm->mmap_sem);

		/* Have to be a bit careful with return values */
		if (nr > 0) {
			if (ret < 0)
				ret = nr;
			else
				ret += nr;
		}
	}

	return ret;
}
Ejemplo n.º 10
0
Archivo: rw26.c Proyecto: rread/lustre
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
                                    size_t size, struct page ***pages,
                                    int *max_pages)
{
    int result = -ENOMEM;

    /* set an arbitrary limit to prevent arithmetic overflow */
    if (size > MAX_DIRECTIO_SIZE) {
        *pages = NULL;
        return -EFBIG;
    }

    *max_pages = (user_addr + size + PAGE_SIZE - 1) >>
                 PAGE_SHIFT;
    *max_pages -= user_addr >> PAGE_SHIFT;

    OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
    if (*pages) {
        down_read(&current->mm->mmap_sem);
        result = get_user_pages(current, current->mm, user_addr,
                                *max_pages, (rw == READ), 0, *pages,
                                NULL);
        up_read(&current->mm->mmap_sem);
        if (unlikely(result <= 0))
            OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
    }

    return result;
}
Ejemplo n.º 11
0
int
mic_pin_user_pages (void *data, struct page **pages, uint32_t len, int32_t *nf_pages, int32_t nr_pages)
{

	int32_t status = 0;


	if (!(pages)) {
		printk("%s Failed to allocate memory for pages\n", __func__);
		status = -ENOMEM;
		return status;

	}

	// pin the user pages; use semaphores on linux for doing the same
	down_read(&current->mm->mmap_sem);
	*nf_pages = (int32_t)get_user_pages(current, current->mm, (uint64_t)data,
			  nr_pages, PROT_WRITE, 1, pages, NULL);
	up_read(&current->mm->mmap_sem);

	// compare if the no of final pages is equal to no of requested pages
	if ((*nf_pages) < nr_pages) {
		printk("%s failed to do _get_user_pages\n", __func__);
		status = -EFAULT;
		mic_unpin_user_pages(pages, *nf_pages);
		return status;
	}


	return status;

}
Ejemplo n.º 12
0
static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
			       unsigned nbytes, int write)
{
	unsigned long user_addr = (unsigned long) buf;
	unsigned offset = user_addr & ~PAGE_MASK;
	int npages;

	/* This doesn't work with nfsd */
	if (!current->mm)
		return -EPERM;

	nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
	npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
	npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
	down_read(&current->mm->mmap_sem);
	npages = get_user_pages(current, current->mm, user_addr, npages, write,
				0, req->pages, NULL);
	up_read(&current->mm->mmap_sem);
	if (npages < 0)
		return npages;

	req->num_pages = npages;
	req->page_offset = offset;
	return 0;
}
Ejemplo n.º 13
0
/* fetch the pages addr resides in into pg and initialise sg with them */
int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
		unsigned int pgcount, struct page **pg, struct scatterlist *sg,
		struct task_struct *task, struct mm_struct *mm)
{
	int ret, pglen, i = 0;
	struct scatterlist *sgp;

	if (unlikely(!pgcount || !len || !addr)) {
		sg_mark_end(sg);
		return 0;
	}

	down_read(&mm->mmap_sem);
	ret = get_user_pages(task, mm,
			(unsigned long)addr, pgcount, write, 0, pg, NULL);
	up_read(&mm->mmap_sem);
	if (ret != pgcount)
		return -EINVAL;

	sg_init_table(sg, pgcount);

	pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
	sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));

	len -= pglen;
	for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
		pglen = min((uint32_t)PAGE_SIZE, len);
		sg_set_page(sgp, pg[i++], pglen, 0);
		len -= pglen;
	}
	sg_mark_end(sg_last(sg, pgcount));
	return 0;
}
Ejemplo n.º 14
0
/*
 * Get another pagefull of userspace buffer, and map it to kernel
 * address space, and lock request
 */
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
	unsigned long offset;
	int err;

	unlock_request(cs->fc, cs->req);
	fuse_copy_finish(cs);
	if (!cs->seglen) {
		BUG_ON(!cs->nr_segs);
		cs->seglen = cs->iov[0].iov_len;
		cs->addr = (unsigned long) cs->iov[0].iov_base;
		cs->iov ++;
		cs->nr_segs --;
	}
	down_read(&current->mm->mmap_sem);
	err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
			     &cs->pg, NULL);
	up_read(&current->mm->mmap_sem);
	if (err < 0)
		return err;
	BUG_ON(err != 1);
	offset = cs->addr % PAGE_SIZE;
	cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
	cs->buf = cs->mapaddr + offset;
	cs->len = min(PAGE_SIZE - offset, cs->seglen);
	cs->seglen -= cs->len;
	cs->addr += cs->len;

	return lock_request(cs->fc, cs->req);
}
Ejemplo n.º 15
0
static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp,
					    const struct iovec *cur_iov,
					    size_t iov_offset)
{
	int ret;
	char *buf;
	struct page *src_page = NULL;

	buf = cur_iov->iov_base + iov_offset;

	if (!segment_eq(get_fs(), KERNEL_DS)) {
		/*
		 * Pull in the user page. We want to do this outside
		 * of the meta data locks in order to preserve locking
		 * order in case of page fault.
		 */
		ret = get_user_pages(current, current->mm,
				     (unsigned long)buf & PAGE_CACHE_MASK, 1,
				     0, 0, &src_page, NULL);
		if (ret == 1)
			bp->b_src_buf = kmap(src_page);
		else
			src_page = ERR_PTR(-EFAULT);
	} else {
		bp->b_src_buf = (char *)((unsigned long)buf & PAGE_CACHE_MASK);
	}

	return src_page;
}
Ejemplo n.º 16
0
unsigned long __copy_to_user_ll(void __user *to, const void *from,
				unsigned long n)
{
	BUG_ON((long) n < 0);
#ifndef CONFIG_X86_WP_WORKS_OK
	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
			((unsigned long )to) < TASK_SIZE) {
		/* 
		 * CPU does not honor the WP bit when writing
		 * from supervisory mode, and due to preemption or SMP,
		 * the page tables can change at any time.
		 * Do it manually.	Manfred <*****@*****.**>
		 */
		while (n) {
		      	unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
			unsigned long len = PAGE_SIZE - offset;
			int retval;
			struct page *pg;
			void *maddr;
			
			if (len > n)
				len = n;

survive:
			down_read(&current->mm->mmap_sem);
			retval = get_user_pages(current, current->mm,
					(unsigned long )to, 1, 1, 0, &pg, NULL);

			if (retval == -ENOMEM && current->pid == 1) {
				up_read(&current->mm->mmap_sem);
				blk_congestion_wait(WRITE, HZ/50);
				goto survive;
			}

			if (retval != 1) {
				up_read(&current->mm->mmap_sem);
		       		break;
		       	}

			maddr = kmap_atomic(pg, KM_USER0);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr, KM_USER0);
			set_page_dirty_lock(pg);
			put_page(pg);
			up_read(&current->mm->mmap_sem);

			from += len;
			to += len;
			n -= len;
		}
		return n;
	}
#endif
	if (movsl_is_ok(to, from, n))
		__copy_user(to, from, n);
	else
		n = __copy_user_intel(to, from, n);
	return n;
}
Ejemplo n.º 17
0
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
    struct mm_struct *mm;
    struct vm_area_struct *vma;
    struct page *page;
    void *old_buf = buf;

    /* Worry about races with exit() */
    task_lock(tsk);
    mm = tsk->mm;
    if (!tsk->task_dumpable || (&init_mm == mm))
        mm = NULL;
    if (mm)
        atomic_inc(&mm->mm_users);
    task_unlock(tsk);
    if (!mm)
        return 0;

    down_read(&mm->mmap_sem);
    /* ignore errors, just check how much was sucessfully transfered */
    while (len) {
        int bytes, ret, offset;
        void *maddr;

        ret = get_user_pages(current, mm, addr, 1,
                             write, 1, &page, &vma);
        if (ret <= 0)
            break;

        bytes = len;
        offset = addr & (PAGE_SIZE-1);
        if (bytes > PAGE_SIZE-offset)
            bytes = PAGE_SIZE-offset;

        flush_cache_page(vma, addr);

        maddr = kmap(page);
        if (write) {
            memcpy(maddr + offset, buf, bytes);
#ifdef CONFIG_SUPERH
            flush_dcache_page(page);
#endif
            flush_page_to_ram(page);
            flush_icache_user_range(vma, page, addr, len);
        } else {
            memcpy(buf, maddr + offset, bytes);
            flush_page_to_ram(page);
        }
        kunmap(page);
        put_page(page);
        len -= bytes;
        buf += bytes;
        addr += bytes;
    }
    up_read(&mm->mmap_sem);
    mmput(mm);

    return buf - old_buf;
}
Ejemplo n.º 18
0
int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
		       void __user *userbuf, int size_in_bytes)
{
	struct ivtv_dma_page_info user_dma;
	struct ivtv_user_dma *dma = &itv->udma;
	int i, err;

	IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);

	/* Still in USE */
	if (dma->SG_length || dma->page_count) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
			   dma->SG_length, dma->page_count);
		return -EBUSY;
	}

	ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);

	if (user_dma.page_count <= 0) {
		IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
			   user_dma.page_count, size_in_bytes, user_dma.offset);
		return -EINVAL;
	}

	/* Get user pages for DMA Xfer */
	down_read(&current->mm->mmap_sem);
	err = get_user_pages(current, current->mm,
			user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
	up_read(&current->mm->mmap_sem);

	if (user_dma.page_count != err) {
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
			   err, user_dma.page_count);
		return -EINVAL;
	}

	dma->page_count = user_dma.page_count;

	/* Fill SG List with new values */
	if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
		for (i = 0; i < dma->page_count; i++) {
			put_page(dma->map[i]);
		}
		dma->page_count = 0;
		return -ENOMEM;
	}

	/* Map SG List */
	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);

	/* Fill SG Array with new values */
	ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);

	/* Tag SG Array with Interrupt Bit */
	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);

	ivtv_udma_sync_for_device(itv);
	return dma->page_count;
}
Ejemplo n.º 19
0
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
			   unsigned long start, unsigned long nr_pages,
			   int write, int force, struct page **pages,
			   int *locked)
{
	return get_user_pages(tsk, mm, start, nr_pages, write, force,
			      pages, NULL);
}
Ejemplo n.º 20
0
unsigned long __copy_to_user_ll(void __user *to, const void *from,
				unsigned long n)
{
#ifndef CONFIG_X86_WP_WORKS_OK
	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
			((unsigned long)to) < TASK_SIZE) {
		
		if (in_atomic())
			return n;

		
		while (n) {
			unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
			unsigned long len = PAGE_SIZE - offset;
			int retval;
			struct page *pg;
			void *maddr;

			if (len > n)
				len = n;

survive:
			down_read(&current->mm->mmap_sem);
			retval = get_user_pages(current, current->mm,
					(unsigned long)to, 1, 1, 0, &pg, NULL);

			if (retval == -ENOMEM && is_global_init(current)) {
				up_read(&current->mm->mmap_sem);
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto survive;
			}

			if (retval != 1) {
				up_read(&current->mm->mmap_sem);
				break;
			}

			maddr = kmap_atomic(pg, KM_USER0);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr, KM_USER0);
			set_page_dirty_lock(pg);
			put_page(pg);
			up_read(&current->mm->mmap_sem);

			from += len;
			to += len;
			n -= len;
		}
		return n;
	}
#endif
	if (movsl_is_ok(to, from, n))
		__copy_user(to, from, n);
	else
		n = __copy_user_intel(to, from, n);
	return n;
}
Ejemplo n.º 21
0
unsigned long __copy_to_user_ll(void __user *to, const void *from,
				unsigned long n)
{
#ifndef CONFIG_X86_WP_WORKS_OK
	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
			((unsigned long)to) < TASK_SIZE) {
		/*
		 * When we are in an atomic section (see
		 * mm/filemap.c:file_read_actor), return the full
		 * length to take the slow path.
		 */
		if (in_atomic())
			return n;

		/*
		 * CPU does not honor the WP bit when writing
		 * from supervisory mode, and due to preemption or SMP,
		 * the page tables can change at any time.
		 * Do it manually.	Manfred <*****@*****.**>
		 */
		while (n) {
			unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
			unsigned long len = PAGE_SIZE - offset;
			int retval;
			struct page *pg;
			void *maddr;

			if (len > n)
				len = n;

survive:
			down_read(&current->mm->mmap_sem);
			retval = get_user_pages(current, current->mm,
					(unsigned long)to, 1, 1, 0, &pg, NULL);

			if (retval == -ENOMEM && is_global_init(current)) {
				up_read(&current->mm->mmap_sem);
				congestion_wait(BLK_RW_ASYNC, HZ/50);
				goto survive;
			}

			if (retval != 1) {
				up_read(&current->mm->mmap_sem);
				break;
			}

<<<<<<< HEAD
			maddr = kmap_atomic(pg);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr);
=======
<<<<<<< HEAD
			maddr = kmap_atomic(pg);
			memcpy(maddr + offset, from, len);
			kunmap_atomic(maddr);
=======
Ejemplo n.º 22
0
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
			       unsigned long start, unsigned long nr_pages,
			       int write, int force, struct page **pages,
			       unsigned int gup_flags)
{
	long ret;
	down_read(&mm->mmap_sem);
	ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
			     pages, NULL);
	up_read(&mm->mmap_sem);
	return ret;
}
Ejemplo n.º 23
0
int pindown_pages(struct page **pg_list, 
                        struct iovec *iovec, 
                        int iovec_count, int flushDcache, int rw){
	int count,err=0;
	struct iovec *tiovec;	
        int x=0,i;
        struct page **page_list=pg_list;
        int pg_count=0;
        char *iov_base;

        /* Acquire the mm page semaphore. */
        down_read(&current->mm->mmap_sem);
	for ( count = 0, tiovec = iovec; count < iovec_count; count++, tiovec++){
                int nr_pages = (((u_long)tiovec->iov_base + 
                                tiovec->iov_len + PAGE_SIZE - 1)/PAGE_SIZE)-
                                ((u_long)tiovec->iov_base/PAGE_SIZE);
                if ( rw == READ ){
                        iov_base = (u_long) tiovec->iov_base - 
                                ((u_long) tiovec->iov_base & ~PAGE_MASK); 
                        for ( i = 0; i < nr_pages; i++, iov_base += PAGE_SIZE){
                                if ( __get_user(x, iov_base) || __put_user(x, iov_base))
                                        BUG();
                        }
                }
                err = get_user_pages(current,
                                     current->mm,
                                     (unsigned long int)tiovec->iov_base,
                                     nr_pages,
                                     rw==READ,  /* read access only for in data */
                                     0, /* no force */
                                     &page_list[pg_count],
                                     NULL);
                if (err < 0 || err < nr_pages )
                        goto err_out;
                pg_count += err;
        }
        page_list[pg_count]=NULL;
        if ( flushDcache ) {
                flush_dcache_all();
        }
err_out:	
        if (err < 0) {
                unlock_pages(pg_list);
                if (flushDcache ) {
                        flush_dcache_all();
                }
                up_read(&current->mm->mmap_sem);
                return err;
        }
	up_read(&current->mm->mmap_sem);

	return 0;
}
Ejemplo n.º 24
0
static int cfs_access_process_vm(struct task_struct *tsk,
				 struct mm_struct *mm,
				 unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
	 * which is already holding mmap_sem for writes.  If some other
	 * thread gets the write lock in the meantime, this thread will
	 * block, but at least it won't deadlock on itself.  LU-1735 */
	if (down_read_trylock(&mm->mmap_sem) == 0)
		return -EDEADLK;

	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);

	return buf - old_buf;
}
Ejemplo n.º 25
0
int __attribute__((weak)) get_user_pages_fast(unsigned long start,
				int nr_pages, int write, struct page **pages)
{
	struct mm_struct *mm = current->mm;
	int ret;

	down_read(&mm->mmap_sem);
	ret = get_user_pages(current, mm, start, nr_pages,
					write, 0, pages, NULL);
	up_read(&mm->mmap_sem);

	return ret;
}
Ejemplo n.º 26
0
static int sgl_fill_user_pages(struct page **pages, unsigned long uaddr,
			const unsigned int nr_pages, int rw)
{
	int ret;

	/* Get user pages for the DMA transfer */
	down_read(&current->mm->mmap_sem);
	ret = get_user_pages(current, current->mm, uaddr, nr_pages, rw, 0,
			    pages, NULL);
	up_read(&current->mm->mmap_sem);

	return ret;
}
Ejemplo n.º 27
0
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);

	return buf - old_buf;
}
Ejemplo n.º 28
0
static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
{
	const char __user *name = vma_get_anon_name(vma);
	struct mm_struct *mm = vma->vm_mm;

	unsigned long page_start_vaddr;
	unsigned long page_offset;
	unsigned long num_pages;
	unsigned long max_len = NAME_MAX;
	int i;

	page_start_vaddr = (unsigned long)name & PAGE_MASK;
	page_offset = (unsigned long)name - page_start_vaddr;
	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);

	seq_puts(m, "[anon:");

	for (i = 0; i < num_pages; i++) {
		int len;
		int write_len;
		const char *kaddr;
		long pages_pinned;
		struct page *page;

		pages_pinned = get_user_pages(current, mm, page_start_vaddr,
				1, 0, 0, &page, NULL);
		if (pages_pinned < 1) {
			seq_puts(m, "<fault>]");
			return;
		}

		kaddr = (const char *)kmap(page);
		len = min(max_len, PAGE_SIZE - page_offset);
		write_len = strnlen(kaddr + page_offset, len);
		seq_write(m, kaddr + page_offset, write_len);
		kunmap(page);
		put_page(page);

		/* if strnlen hit a null terminator then we're done */
		if (write_len != len)
			break;

		max_len -= len;
		page_offset = 0;
		page_start_vaddr += PAGE_SIZE;
	}

	seq_putc(m, ']');
}
Ejemplo n.º 29
0
/**
 * \<\<private\>\> Writes a specified memory area into the checkpoint
 * file - heavy version.  It fills out the rest of the header, sets
 * pathname size to 0 (not used). and writes the header into the
 * checkpoint file.
 *
 * It is necessary to align the file to page size. This is required
 * otherwise we wouldn't be able to mmap pages from the file upon
 * restoring the checkpoint.
 *
 * Following algorithm dumps the pages, the idea comes from a regular
 * core dump handler (e.g. in fs/binfmt_elf.c)
 *
 \verbatim
  For each page in the region do:
    - get the page - might trigger pagefaults to load the pages
    - map the page descriptor to a valid linear address (user pages might
    be in high memory)
    - if the page is zero or marked untouched, advance the offset in the
    checkpoint only - there is no need to write 0's to disk, just create
    the gap for them.
    - otherwise write the entire page into the checkpoint file.
 \endverbatim
 *
 * @param *ckpt - checkpoint file where the area is to be stored
 * @param *vma - the actual VM area that is being processed
 * @param *hdr - partially filled VM area header
 * @return 0 upon success.
 */
static int tcmi_ckpt_vm_area_write_h(struct tcmi_ckpt *ckpt, 
				     struct vm_area_struct *vma, 
				     struct tcmi_ckpt_vm_area_hdr *hdr)
{
	/* page for the filepathname */
	unsigned long addr;

	/* finish the header */
	hdr->type = TCMI_CKPT_VM_AREA_HEAVY;
	hdr->pathname_size = 0;
	/* write the header into the checkpoint */
	if (tcmi_ckpt_write(ckpt, hdr, sizeof(*hdr)) < 0) {
		mdbg(ERR3, "Error writing VM area header chunk");
		goto exit0;
	}
	/* align the current file position to page size boundary */
	tcmi_ckpt_page_align(ckpt);
	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
		struct page *page;
		struct vm_area_struct *vma;
		void *kaddr;
		if (get_user_pages(current, current->mm, addr, 1, 0, 1,
				   &page, &vma) <= 0) {
			mdbg(INFO4, "Skipping untouched page at %08lx", addr);
			tcmi_ckpt_seek(ckpt, PAGE_SIZE, 1);
			continue;
		} 
		/*if (page == ZERO_PAGE(addr)) {
			mdbg(INFO4, "Skipping zero page at %08lx", addr);
			tcmi_ckpt_seek(ckpt, PAGE_SIZE, 1);
			continue;
		}
		*/
		/* actual page, that needs to be written. */
		flush_cache_page(vma, addr,  page_to_pfn(page));  /* TODO: check this fix is correct */
		kaddr = tcmi_ckpt_vm_area_kmap(page);
		/* write the page into the checkpoint */
		if (tcmi_ckpt_write(ckpt, kaddr, PAGE_SIZE) < 0) {
			mdbg(ERR3, "Error writing page at %08lx", addr);
			goto exit0;
		}
		tcmi_ckpt_vm_area_kunmap(page);
	}
	return 0;

	/* error handling */
 exit0:
	return -EINVAL;
}
Ejemplo n.º 30
0
static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
				     struct ipath_user_sdma_pkt *pkt,
				     unsigned long addr, int tlen, int npages)
{
	struct page *pages[2];
	int j;
	int ret;

	ret = get_user_pages(current, current->mm, addr,
			     npages, 0, 1, pages, NULL);

	if (ret != npages) {
		int i;

		for (i = 0; i < ret; i++)
			put_page(pages[i]);

		ret = -ENOMEM;
		goto done;
	}

	for (j = 0; j < npages; j++) {
		
		const int flen =
			ipath_user_sdma_page_length(addr, tlen);
		dma_addr_t dma_addr =
			dma_map_page(&dd->pcidev->dev,
				     pages[j], 0, flen, DMA_TO_DEVICE);
		unsigned long fofs = addr & ~PAGE_MASK;

		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
			ret = -ENOMEM;
			goto done;
		}

		ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
					  pages[j], kmap(pages[j]),
					  dma_addr);

		pkt->naddr++;
		addr += flen;
		tlen -= flen;
	}

done:
	return ret;
}