コード例 #1
0
int
sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval)
{
	/* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
	} */
	struct proc *p = l->l_proc;
	vaddr_t addr;
	vsize_t size, pageoff;
	struct vm_map *map;
	struct vm_map_entry *dead_entries;
	int error;

	/*
	 * get syscall args.
	 */

	addr = (vaddr_t)SCARG(uap, addr);
	size = (vsize_t)SCARG(uap, len);

	/*
	 * align the address to a page boundary and adjust the size accordingly.
	 */

	pageoff = (addr & PAGE_MASK);
	addr -= pageoff;
	size += pageoff;
	size = (vsize_t)round_page(size);

	if (size == 0)
		return (0);

	error = range_test(addr, size, false);
	if (error)
		return error;

	map = &p->p_vmspace->vm_map;

	/*
	 * interesting system call semantic: make sure entire range is
	 * allocated before allowing an unmap.
	 */

	vm_map_lock(map);
#if 0
	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
		vm_map_unlock(map);
		return (EINVAL);
	}
#endif
	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
	vm_map_unlock(map);
	if (dead_entries != NULL)
		uvm_unmap_detach(dead_entries, 0);
	return (0);
}
コード例 #2
0
ファイル: mem.c プロジェクト: lacombar/netbsd-alc
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	register vaddr_t o, v;
	register int c;
	register struct iovec *iov;
	int error = 0;
	vm_prot_t prot;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

		case DEV_MEM:
			mutex_enter(&mm_lock);
			v = uio->uio_offset;
			prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
			    VM_PROT_WRITE;
			error = check_pa_acc(uio->uio_offset, prot);
			if (error) {
				mutex_exit(&mm_lock);
				break;
			}
			pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
			    trunc_page(v), prot, PMAP_WIRED|prot);
			o = uio->uio_offset & PGOFSET;
			c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
			error = uiomove((char *)vmmap + o, c, uio);
			pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
			    (vaddr_t)vmmap + PAGE_SIZE);
			mutex_exit(&mm_lock);
			break;

		case DEV_KMEM:
			v = uio->uio_offset;
			c = min(iov->iov_len, MAXPHYS);
			if (v >= (vaddr_t)&start && v <
			    (vaddr_t)kern_end) {
				if (v < (vaddr_t)&__data_start &&
				    uio->uio_rw == UIO_WRITE)
					return EFAULT;
			} else if (v >= lkm_start && v < lkm_end) {
				if (!uvm_map_checkprot(lkm_map, v, v + c,
				    uio->uio_rw == UIO_READ ?
				    VM_PROT_READ: VM_PROT_WRITE))
					return EFAULT;
			} else {
				if (!uvm_kernacc((void *)v, c,
				    uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
					return EFAULT;
			}
			error = uiomove((void *)v, c, uio);
			break;

		case DEV_NULL:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return (0);

		case DEV_ZERO:
			if (uio->uio_rw == UIO_WRITE) {
				uio->uio_resid = 0;
				return (0);
			}
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zeropage, c, uio);
			break;

		default:
			return (ENXIO);
		}
	}
	return (error);
}
コード例 #3
0
ファイル: mem.c プロジェクト: SylvestreG/bitrig
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	extern vaddr_t kern_end;
	vaddr_t v;
	int c;
	struct iovec *iov;
	int error = 0;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

/* minor device 0 is physical memory */
		case 0:
			v = PMAP_DIRECT_MAP(uio->uio_offset);
			error = uiomove((caddr_t)v, uio->uio_resid, uio);
			continue;

/* minor device 1 is kernel memory */
		case 1:
			v = uio->uio_offset;
			c = min(iov->iov_len, MAXPHYS);
			if (v >= (vaddr_t)&start && v < kern_end) {
                                if (v < (vaddr_t)&etext &&
                                    uio->uio_rw == UIO_WRITE)
                                        return EFAULT;
#ifdef LKM
			} else if (v >= lkm_start && v < lkm_end) {
				if (!uvm_map_checkprot(lkm_map, v, v + c,
				    uio->uio_rw == UIO_READ ?
				    UVM_PROT_READ: UVM_PROT_WRITE))
					return (EFAULT);
#endif
                        } else if ((!uvm_kernacc((caddr_t)v, c,
			    uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) &&
			    (v < PMAP_DIRECT_BASE && v > PMAP_DIRECT_END))
				return (EFAULT);
			error = uiomove((caddr_t)v, c, uio);
			continue;

/* minor device 2 is EOF/RATHOLE */
		case 2:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return (0);

/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
		case 12:
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			if (zeropage == NULL)
				zeropage = (caddr_t)
				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO);
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zeropage, c, uio);
			continue;

		default:
			return (ENXIO);
		}
		iov->iov_base = (int8_t *)iov->iov_base + c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}

	return (error);
}
コード例 #4
0
ファイル: uvm_mmap.c プロジェクト: genua/anoubis_os
int
sys_munmap(struct proc *p, void *v, register_t *retval)
{
	struct sys_munmap_args /* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
	} */ *uap = v;
	vaddr_t addr;
	vsize_t size, pageoff;
	vm_map_t map;
	vaddr_t vm_min_address = VM_MIN_ADDRESS;
	struct vm_map_entry *dead_entries;

	/*
	 * get syscall args...
	 */

	addr = (vaddr_t) SCARG(uap, addr);
	size = (vsize_t) SCARG(uap, len);
	
	/*
	 * align the address to a page boundary, and adjust the size accordingly
	 */
	ALIGN_ADDR(addr, size, pageoff);

	/*
	 * Check for illegal addresses.  Watch out for address wrap...
	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (addr > SIZE_MAX - size)
		return (EINVAL);
	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
		return (EINVAL);
	if (vm_min_address > 0 && addr < vm_min_address)
		return (EINVAL);
	map = &p->p_vmspace->vm_map;


	vm_map_lock(map);	/* lock map so we can checkprot */

	/*
	 * interesting system call semantic: make sure entire range is 
	 * allocated before allowing an unmap.
	 */

	if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
		vm_map_unlock(map);
		return (EINVAL);
	}

	/*
	 * doit!
	 */
	uvm_unmap_remove(map, addr, addr + size, &dead_entries, p);

	vm_map_unlock(map);	/* and unlock */

	if (dead_entries != NULL)
		uvm_unmap_detach(dead_entries, 0);

	return (0);
}