Exemple #1
0
/*
 * Copy a binary buffer from kernel space to user space.
 *
 * Returns 0 on success, EFAULT on failure.
 */
int
copyout(const void *kaddr, void *udaddr, size_t len)
{
	struct vmspace *vm = curproc->p_vmspace;
	struct lwbuf *lwb;
	struct lwbuf lwb_cache;
	vm_page_t m;
	int error;
	size_t n;

	error = 0;
	while (len) {
		m = vm_fault_page(&vm->vm_map, trunc_page((vm_offset_t)udaddr),
				  VM_PROT_READ|VM_PROT_WRITE,
				  VM_FAULT_NORMAL, &error);
		if (error)
			break;
		n = PAGE_SIZE - ((vm_offset_t)udaddr & PAGE_MASK);
		if (n > len)
			n = len;
		lwb = lwbuf_alloc(m, &lwb_cache);
		bcopy(kaddr, (char *)lwbuf_kva(lwb) +
			     ((vm_offset_t)udaddr & PAGE_MASK), n);
		len -= n;
		udaddr = (char *)udaddr + n;
		kaddr = (const char *)kaddr + n;
		vm_page_dirty(m);
		lwbuf_free(lwb);
		vm_page_unhold(m);
	}
	return (error);
}
Exemple #2
0
/*
 * p->p_token is held on entry.
 */
static int
procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio)
{
	int error;
	int writing;
	struct vmspace *vm;
	vm_map_t map;
	vm_offset_t pageno = 0;		/* page number */
	vm_prot_t reqprot;
	vm_offset_t kva;

	/*
	 * if the vmspace is in the midst of being allocated or deallocated,
	 * or the process is exiting, don't try to grab anything.  The
	 * page table usage in that process may be messed up.
	 */
	vm = p->p_vmspace;
	if (p->p_stat == SIDL || p->p_stat == SZOMB)
		return EFAULT;
	if ((p->p_flags & (P_WEXIT | P_INEXEC)) ||
	    sysref_isinactive(&vm->vm_sysref))
		return EFAULT;

	/*
	 * The map we want...
	 */
	vmspace_hold(vm);
	map = &vm->vm_map;

	writing = (uio->uio_rw == UIO_WRITE);
	reqprot = VM_PROT_READ;
	if (writing)
		reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE;

	kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);

	/*
	 * Only map in one page at a time.  We don't have to, but it
	 * makes things easier.  This way is trivial - right?
	 */
	do {
		vm_offset_t uva;
		vm_offset_t page_offset;	/* offset into page */
		size_t len;
		vm_page_t m;

		uva = (vm_offset_t) uio->uio_offset;

		/*
		 * Get the page number of this segment.
		 */
		pageno = trunc_page(uva);
		page_offset = uva - pageno;

		/*
		 * How many bytes to copy
		 */
		len = szmin(PAGE_SIZE - page_offset, uio->uio_resid);

		/*
		 * Fault the page on behalf of the process
		 */
		m = vm_fault_page(map, pageno, reqprot,
				  VM_FAULT_NORMAL, &error);
		if (error) {
			KKASSERT(m == NULL);
			error = EFAULT;
			break;
		}

		/*
		 * Cleanup tmap then create a temporary KVA mapping and
		 * do the I/O.  We can switch between cpus so don't bother
		 * synchronizing across all cores.
		 */
		pmap_kenter_quick(kva, VM_PAGE_TO_PHYS(m));
		error = uiomove((caddr_t)(kva + page_offset), len, uio);
		pmap_kremove_quick(kva);

		/*
		 * release the page and we are done
		 */
		vm_page_unhold(m);
	} while (error == 0 && uio->uio_resid > 0);

	vmspace_drop(vm);
	kmem_free(&kernel_map, kva, PAGE_SIZE);

	return (error);
}