Exemplo n.º 1
0
int
uvm_mmap_dev(struct proc *p, void **addrp, size_t len, dev_t dev,
    off_t off)
{
	struct uvm_object *uobj;
	int error, flags, prot;

	flags = MAP_SHARED;
	prot = VM_PROT_READ | VM_PROT_WRITE;
	if (*addrp)
		flags |= MAP_FIXED;
	else
		*addrp = (void *)p->p_emul->e_vm_default_addr(p,
		    (vaddr_t)p->p_vmspace->vm_daddr, len,
		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);

	uobj = udv_attach(dev, prot, off, len);
	if (uobj == NULL)
		return EINVAL;

	error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
	    (vsize_t)len, prot, prot, flags, UVM_ADV_RANDOM, uobj, off,
	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
	return error;
}
Exemplo n.º 2
0
int
uvm_mmap_anon(struct proc *p, void **addrp, size_t len)
{
	int error, flags, prot;

	flags = MAP_PRIVATE | MAP_ANON;
	prot = VM_PROT_READ | VM_PROT_WRITE;
	if (*addrp)
		flags |= MAP_FIXED;
	else
		*addrp = (void *)p->p_emul->e_vm_default_addr(p,
		    (vaddr_t)p->p_vmspace->vm_daddr, len,
		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);

	error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
	    (vsize_t)len, prot, prot, flags, UVM_ADV_NORMAL, NULL, 0,
	    p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
	return error;
}
Exemplo n.º 3
0
int
grfmap(dev_t dev, struct macfb_softc *sc, void **addrp, struct proc *p)
{
	struct vnode vn;
	u_long len;
	int error, flags;

	len = m68k_round_page(sc->sc_dc->dc_offset + sc->sc_dc->dc_size);
	*addrp = (void *)VM_DEFAULT_ADDRESS(p->p_vmspace->vm_daddr, len);
	flags = MAP_SHARED | MAP_FIXED;

	vn.v_type = VCHR;		/* XXX */
	vn.v_rdev = dev;		/* XXX */

	error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
	    (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
	    flags, (void *)&vn, 0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);

	/* Offset into page: */
	*addrp = (char*)*addrp + sc->sc_dc->dc_offset;

	return (error);
}
Exemplo n.º 4
0
int
sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
{
	/* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */
	struct proc *p = l->l_proc;
	vaddr_t addr;
	off_t pos;
	vsize_t size, pageoff, newsize;
	vm_prot_t prot, maxprot;
	int flags, fd, advice;
	vaddr_t defaddr;
	struct file *fp = NULL;
	struct uvm_object *uobj;
	int error;
#ifdef PAX_ASLR
	vaddr_t orig_addr;
#endif /* PAX_ASLR */

	/*
	 * first, extract syscall args from the uap.
	 */

	addr = (vaddr_t)SCARG(uap, addr);
	size = (vsize_t)SCARG(uap, len);
	prot = SCARG(uap, prot) & VM_PROT_ALL;
	flags = SCARG(uap, flags);
	fd = SCARG(uap, fd);
	pos = SCARG(uap, pos);

#ifdef PAX_ASLR
	orig_addr = addr;
#endif /* PAX_ASLR */

	/*
	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
	 * validate the flags.
	 */
	if (flags & MAP_COPY) {
		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
#if defined(COMPAT_10) && defined(__i386__)
		/*
		 * Ancient kernel on x86 did not obey PROT_EXEC on i386 at least
		 * and ld.so did not turn it on. We take care of this on amd64
		 * in compat32.
		 */
		prot |= PROT_EXEC;
#endif
	}
	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
		return EINVAL;

	/*
	 * align file position and save offset.  adjust size.
	 */

	pageoff = (pos & PAGE_MASK);
	pos    -= pageoff;
	newsize = size + pageoff;		/* add offset */
	newsize = (vsize_t)round_page(newsize);	/* round up */

	if (newsize < size)
		return ENOMEM;
	size = newsize;

	/*
	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
	 */
	if (flags & MAP_FIXED) {
		/* ensure address and file offset are aligned properly */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return EINVAL;

		error = range_test(&p->p_vmspace->vm_map, addr, size, true);
		if (error) {
			return error;
		}
	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
		/*
		 * not fixed: make sure we skip over the largest
		 * possible heap for non-topdown mapping arrangements.
		 * we will refine our guess later (e.g. to account for
		 * VAC, etc)
		 */

		defaddr = p->p_emul->e_vm_default_addr(p,
		    (vaddr_t)p->p_vmspace->vm_daddr, size,
		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);

		if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
			addr = MAX(addr, defaddr);
		else
			addr = MIN(addr, defaddr);
	}

	/*
	 * check for file mappings (i.e. not anonymous) and verify file.
	 */

	advice = UVM_ADV_NORMAL;
	if ((flags & MAP_ANON) == 0) {
		if ((fp = fd_getfile(fd)) == NULL)
			return EBADF;

		if (fp->f_ops->fo_mmap == NULL) {
			error = ENODEV;
			goto out;
		}
		error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
		    &advice, &uobj, &maxprot);
		if (error) {
			goto out;
		}
		if (uobj == NULL) {
			flags |= MAP_ANON;
			fd_putfile(fd);
			fp = NULL;
			goto is_anon;
		}
	} else {		/* MAP_ANON case */
		/*
		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
		 */
		if (fd != -1)
			return EINVAL;

 is_anon:		/* label for SunOS style /dev/zero */
		uobj = NULL;
		maxprot = VM_PROT_ALL;
		pos = 0;
	}

	PAX_MPROTECT_ADJUST(l, &prot, &maxprot);

	pax_aslr_mmap(l, &addr, orig_addr, flags);

	/*
	 * now let kernel internal function uvm_mmap do the work.
	 */

	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
	    flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);

	/* remember to add offset */
	*retval = (register_t)(addr + pageoff);

 out:
	if (fp != NULL)
		fd_putfile(fd);

	return error;
}
Exemplo n.º 5
0
int
sys_mmap(struct proc *p, void *v, register_t *retval)
{
	struct sys_mmap_args /* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */ *uap = v;
	vaddr_t addr;
	struct vattr va;
	off_t pos;
	vsize_t size, pageoff;
	vm_prot_t prot, maxprot;
	int flags, fd;
	vaddr_t vm_min_address = VM_MIN_ADDRESS;
	struct filedesc *fdp = p->p_fd;
	struct file *fp = NULL;
	struct vnode *vp;
	caddr_t handle;
	int error;

	/*
	 * first, extract syscall args from the uap.
	 */

	addr = (vaddr_t) SCARG(uap, addr);
	size = (vsize_t) SCARG(uap, len);
	prot = SCARG(uap, prot);
	flags = SCARG(uap, flags);
	fd = SCARG(uap, fd);
	pos = SCARG(uap, pos);

	/*
	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
	 * validate the flags.
	 */
	if ((prot & VM_PROT_ALL) != prot)
		return (EINVAL);
	if ((flags & MAP_FLAGMASK) != flags)
		return (EINVAL);
	if (flags & MAP_COPY)
		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
		return (EINVAL);
	if (flags & MAP_DENYWRITE)
		return (EINVAL);

	/*
	 * align file position and save offset.  adjust size.
	 */
	ALIGN_ADDR(pos, size, pageoff);

	/*
	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" 
	 */

	if (flags & MAP_FIXED) {

		/* adjust address by the same amount as we did the offset */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return (EINVAL);		/* not page aligned */

		if (addr > SIZE_MAX - size)
			return (EINVAL);		/* no wrapping! */
		if (VM_MAXUSER_ADDRESS > 0 &&
		    (addr + size) > VM_MAXUSER_ADDRESS)
			return (EINVAL);
		if (vm_min_address > 0 && addr < vm_min_address)
			return (EINVAL);

	} else {

		/*
		 * not fixed: make sure we skip over the largest possible heap.
		 * we will refine our guess later (e.g. to account for VAC, etc)
		 */
		if (addr == 0)
			addr = uvm_map_hint(p, prot);
		else if (!(flags & MAP_TRYFIXED) &&
		    addr < (vaddr_t)p->p_vmspace->vm_daddr)
			addr = uvm_map_hint(p, prot);
	}

	/*
	 * check for file mappings (i.e. not anonymous) and verify file.
	 */
	if ((flags & MAP_ANON) == 0) {

		if ((fp = fd_getfile(fdp, fd)) == NULL)
			return (EBADF);

		FREF(fp);

		if (fp->f_type != DTYPE_VNODE) {
			error = ENODEV;		/* only mmap vnodes! */
			goto out;
		}
		vp = (struct vnode *)fp->f_data;	/* convert to vnode */

		if (vp->v_type != VREG && vp->v_type != VCHR &&
		    vp->v_type != VBLK) {
			error = ENODEV; /* only REG/CHR/BLK support mmap */
			goto out;
		}

		if (vp->v_type == VREG && (pos + size) < pos) {
			error = EINVAL;		/* no offset wrapping */
			goto out;
		}

		/* special case: catch SunOS style /dev/zero */
		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
			flags |= MAP_ANON;
			FRELE(fp);
			fp = NULL;
			goto is_anon;
		}

		/*
		 * Old programs may not select a specific sharing type, so
		 * default to an appropriate one.
		 *
		 * XXX: how does MAP_ANON fit in the picture?
		 */
		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
#if defined(DEBUG)
			printf("WARNING: defaulted mmap() share type to "
			   "%s (pid %d comm %s)\n", vp->v_type == VCHR ?
			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
			    p->p_comm);
#endif
			if (vp->v_type == VCHR)
				flags |= MAP_SHARED;	/* for a device */
			else
				flags |= MAP_PRIVATE;	/* for a file */
		}

		/* 
		 * MAP_PRIVATE device mappings don't make sense (and aren't
		 * supported anyway).  However, some programs rely on this,
		 * so just change it to MAP_SHARED.
		 */
		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
		}

#ifdef ANOUBIS
		/* Force DENYWRITE mappings if file->denywrite is set. */
		if (fp->denywrite)
			flags |= MAP_DENYWRITE;
#endif

		/*
		 * now check protection
		 */

		/*
		 * Don't allow the file to be mapped into executable memory if
		 * the underlying file system is marked as 'noexec'.
		 */
		if (prot & PROT_EXEC && vp->v_mount->mnt_flag & MNT_NOEXEC) {
			error = EACCES;
			goto out;
		}

		maxprot = VM_PROT_EXECUTE;

		/* check read access */
		if (fp->f_flag & FREAD)
			maxprot |= VM_PROT_READ;
		else if (prot & PROT_READ) {
			error = EACCES;
			goto out;
		}

		/* PROT_EXEC only makes sense if the descriptor is readable. */
		if (!(fp->f_flag & FREAD) && prot & PROT_EXEC) {
			error = EACCES;
			goto out;
		}

		/* check write access, shared case first */
		if (flags & MAP_SHARED) {
			/*
			 * if the file is writable, only add PROT_WRITE to
			 * maxprot if the file is not immutable, append-only.
			 * otherwise, if we have asked for PROT_WRITE, return
			 * EPERM.
			 */
			if (fp->f_flag & FWRITE) {
				if ((error =
				    VOP_GETATTR(vp, &va, p->p_ucred, p)))
					goto out;
				if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
					maxprot |= VM_PROT_WRITE;
				else if (prot & PROT_WRITE) {
					error = EPERM;
					goto out;
				}
			} else if (prot & PROT_WRITE) {
				error = EACCES;
				goto out;
			}
		} else {
			/* MAP_PRIVATE mappings can always write to */
			maxprot |= VM_PROT_WRITE;
		}

#ifdef MAC
		error = mac_vnode_check_mmap(p->p_ucred, vp, prot, flags);
		if (error)
			goto out;
#endif

		vfs_mark_atime(vp, p->p_ucred);

		/*
		 * set handle to vnode
		 */

		handle = (caddr_t)vp;

	} else {		/* MAP_ANON case */
		/*
		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
		 */
		if (fd != -1) {
			error = EINVAL;
			goto out;
		}

 is_anon:		/* label for SunOS style /dev/zero */
		handle = NULL;
		maxprot = VM_PROT_ALL;
		pos = 0;
	}

	if ((flags & MAP_ANON) != 0 ||
	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
		if (size >
		    (p->p_rlimit[RLIMIT_DATA].rlim_cur - ptoa(p->p_vmspace->vm_dused))) {
			error = ENOMEM;
			goto out;
		}
	}

	/*
	 * now let kernel internal function uvm_mmap do the work.
	 */

	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur, p);

	if (error == 0)
		/* remember to add offset */
		*retval = (register_t)(addr + pageoff);

out:
	if (fp)
		FRELE(fp);	
	return (error);
}
Exemplo n.º 6
0
int
sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
{
	/* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */
	struct proc *p = l->l_proc;
	vaddr_t addr;
	struct vattr va;
	off_t pos;
	vsize_t size, pageoff;
	vm_prot_t prot, maxprot;
	int flags, fd;
	vaddr_t defaddr;
	struct file *fp = NULL;
	struct vnode *vp;
	void *handle;
	int error;
#ifdef PAX_ASLR
	vaddr_t orig_addr;
#endif /* PAX_ASLR */

	/*
	 * first, extract syscall args from the uap.
	 */

	addr = (vaddr_t)SCARG(uap, addr);
	size = (vsize_t)SCARG(uap, len);
	prot = SCARG(uap, prot) & VM_PROT_ALL;
	flags = SCARG(uap, flags);
	fd = SCARG(uap, fd);
	pos = SCARG(uap, pos);

#ifdef PAX_ASLR
	orig_addr = addr;
#endif /* PAX_ASLR */

	/*
	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
	 * validate the flags.
	 */
	if (flags & MAP_COPY)
		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
		return (EINVAL);

	/*
	 * align file position and save offset.  adjust size.
	 */

	pageoff = (pos & PAGE_MASK);
	pos  -= pageoff;
	size += pageoff;			/* add offset */
	size = (vsize_t)round_page(size);	/* round up */

	/*
	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
	 */
	if (flags & MAP_FIXED) {

		/* ensure address and file offset are aligned properly */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return (EINVAL);

		error = range_test(addr, size, true);
		if (error)
			return error;
	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {

		/*
		 * not fixed: make sure we skip over the largest
		 * possible heap for non-topdown mapping arrangements.
		 * we will refine our guess later (e.g. to account for
		 * VAC, etc)
		 */

		defaddr = p->p_emul->e_vm_default_addr(p,
		    (vaddr_t)p->p_vmspace->vm_daddr, size);

		if (addr == 0 ||
		    !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
			addr = MAX(addr, defaddr);
		else
			addr = MIN(addr, defaddr);
	}

	/*
	 * check for file mappings (i.e. not anonymous) and verify file.
	 */

	if ((flags & MAP_ANON) == 0) {
		if ((fp = fd_getfile(fd)) == NULL)
			return (EBADF);
		if (fp->f_type != DTYPE_VNODE) {
			fd_putfile(fd);
			return (ENODEV);		/* only mmap vnodes! */
		}
		vp = fp->f_data;		/* convert to vnode */
		if (vp->v_type != VREG && vp->v_type != VCHR &&
		    vp->v_type != VBLK) {
			fd_putfile(fd);
			return (ENODEV);  /* only REG/CHR/BLK support mmap */
		}
		if (vp->v_type != VCHR && pos < 0) {
			fd_putfile(fd);
			return (EINVAL);
		}
		if (vp->v_type != VCHR && (pos + size) < pos) {
			fd_putfile(fd);
			return (EOVERFLOW);		/* no offset wrapping */
		}

		/* special case: catch SunOS style /dev/zero */
		if (vp->v_type == VCHR
		    && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
			flags |= MAP_ANON;
			fd_putfile(fd);
			fp = NULL;
			goto is_anon;
		}

		/*
		 * Old programs may not select a specific sharing type, so
		 * default to an appropriate one.
		 *
		 * XXX: how does MAP_ANON fit in the picture?
		 */
		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
#if defined(DEBUG)
			printf("WARNING: defaulted mmap() share type to "
			   "%s (pid %d command %s)\n", vp->v_type == VCHR ?
			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
			    p->p_comm);
#endif
			if (vp->v_type == VCHR)
				flags |= MAP_SHARED;	/* for a device */
			else
				flags |= MAP_PRIVATE;	/* for a file */
		}

		/*
		 * MAP_PRIVATE device mappings don't make sense (and aren't
		 * supported anyway).  However, some programs rely on this,
		 * so just change it to MAP_SHARED.
		 */
		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
		}

		/*
		 * now check protection
		 */

		maxprot = VM_PROT_EXECUTE;

		/* check read access */
		if (fp->f_flag & FREAD)
			maxprot |= VM_PROT_READ;
		else if (prot & PROT_READ) {
			fd_putfile(fd);
			return (EACCES);
		}

		/* check write access, shared case first */
		if (flags & MAP_SHARED) {
			/*
			 * if the file is writable, only add PROT_WRITE to
			 * maxprot if the file is not immutable, append-only.
			 * otherwise, if we have asked for PROT_WRITE, return
			 * EPERM.
			 */
			if (fp->f_flag & FWRITE) {
				if ((error =
				    VOP_GETATTR(vp, &va, l->l_cred))) {
					fd_putfile(fd);
					return (error);
				}
				if ((va.va_flags &
				    (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
					maxprot |= VM_PROT_WRITE;
				else if (prot & PROT_WRITE) {
					fd_putfile(fd);
					return (EPERM);
				}
			}
			else if (prot & PROT_WRITE) {
				fd_putfile(fd);
				return (EACCES);
			}
		} else {
			/* MAP_PRIVATE mappings can always write to */
			maxprot |= VM_PROT_WRITE;
		}
		handle = vp;

	} else {		/* MAP_ANON case */
		/*
		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
		 */
		if (fd != -1)
			return (EINVAL);

 is_anon:		/* label for SunOS style /dev/zero */
		handle = NULL;
		maxprot = VM_PROT_ALL;
		pos = 0;
	}

#if NVERIEXEC > 0
	if (handle != NULL) {
		/*
		 * Check if the file can be executed indirectly.
		 *
		 * XXX: This gives false warnings about "Incorrect access type"
		 * XXX: if the mapping is not executable. Harmless, but will be
		 * XXX: fixed as part of other changes.
		 */
		if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT,
		    NULL)) {
			/*
			 * Don't allow executable mappings if we can't
			 * indirectly execute the file.
			 */
			if (prot & VM_PROT_EXECUTE) {
			     	if (fp != NULL)
					fd_putfile(fd);
				return (EPERM);
			}

			/*
			 * Strip the executable bit from 'maxprot' to make sure
			 * it can't be made executable later.
			 */
			maxprot &= ~VM_PROT_EXECUTE;
		}
	}
#endif /* NVERIEXEC > 0 */

#ifdef PAX_MPROTECT
	pax_mprotect(l, &prot, &maxprot);
#endif /* PAX_MPROTECT */

#ifdef PAX_ASLR
	pax_aslr(l, &addr, orig_addr, flags);
#endif /* PAX_ASLR */

	/*
	 * now let kernel internal function uvm_mmap do the work.
	 */

	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);

	if (error == 0)
		/* remember to add offset */
		*retval = (register_t)(addr + pageoff);

     	if (fp != NULL)
		fd_putfile(fd);

	return (error);
}