Пример #1
0
int
vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
{
	struct proc *p = l->l_proc;
	int error;
	long diff;
	vm_prot_t prot, maxprot;

	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
	cmd->ev_addr -= diff;			/* required by uvm_map */
	cmd->ev_len += diff;

	prot = cmd->ev_prot;
	maxprot = UVM_PROT_ALL;
	PAX_MPROTECT_ADJUST(l, &prot, &maxprot);

	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
			UVM_ADV_NORMAL,
			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
	if (cmd->ev_flags & VMCMD_STACK)
		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
	return error;
}
Пример #2
0
int
vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
{
	struct uvm_object *uobj;
	struct vnode *vp = cmd->ev_vp;
	struct proc *p = l->l_proc;
	int error;
	vm_prot_t prot, maxprot;

	KASSERT(vp->v_iflag & VI_TEXT);

	/*
	 * map the vnode in using uvm_map.
	 */

	if (cmd->ev_len == 0)
		return 0;
	if (cmd->ev_offset & PAGE_MASK)
		return EINVAL;
	if (cmd->ev_addr & PAGE_MASK)
		return EINVAL;
	if (cmd->ev_len & PAGE_MASK)
		return EINVAL;

	prot = cmd->ev_prot;
	maxprot = UVM_PROT_ALL;
	PAX_MPROTECT_ADJUST(l, &prot, &maxprot);

	/*
	 * check the file system's opinion about mmapping the file
	 */

	error = VOP_MMAP(vp, prot, l->l_cred);
	if (error)
		return error;

	if ((vp->v_vflag & VV_MAPPED) == 0) {
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		vp->v_vflag |= VV_MAPPED;
		VOP_UNLOCK(vp);
	}

	/*
	 * do the map, reference the object for this map entry
	 */
	uobj = &vp->v_uobj;
	vref(vp);

	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
		uobj, cmd->ev_offset, 0,
		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
	if (error) {
		uobj->pgops->pgo_detach(uobj);
	}
	return error;
}
Пример #3
0
int
vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
{
	struct proc *p = l->l_proc;
	int error;
	vm_prot_t prot, maxprot;

	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
	    l->l_cred, NULL, l);
	if (error)
		return error;

	prot = cmd->ev_prot;
	maxprot = VM_PROT_ALL;
	PAX_MPROTECT_ADJUST(l, &prot, &maxprot);

#ifdef PMAP_NEED_PROCWR
	/*
	 * we had to write the process, make sure the pages are synched
	 * with the instruction cache.
	 */
	if (prot & VM_PROT_EXECUTE)
		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
#endif

	/*
	 * we had to map in the area at PROT_ALL so that vn_rdwr()
	 * could write to it.   however, the caller seems to want
	 * it mapped read-only, so now we are going to have to call
	 * uvm_map_protect() to fix up the protection.  ICK.
	 */
	if (maxprot != VM_PROT_ALL) {
		error = uvm_map_protect(&p->p_vmspace->vm_map,
				trunc_page(cmd->ev_addr),
				round_page(cmd->ev_addr + cmd->ev_len),
				maxprot, true);
		if (error)
			return error;
	}

	if (prot != maxprot) {
		error = uvm_map_protect(&p->p_vmspace->vm_map,
				trunc_page(cmd->ev_addr),
				round_page(cmd->ev_addr + cmd->ev_len),
				prot, false);
		if (error)
			return error;
	}

	return 0;
}
Пример #4
0
int
sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval)
{
	/* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */
	struct proc *p = l->l_proc;
	vaddr_t addr;
	off_t pos;
	vsize_t size, pageoff, newsize;
	vm_prot_t prot, maxprot;
	int flags, fd, advice;
	vaddr_t defaddr;
	struct file *fp = NULL;
	struct uvm_object *uobj;
	int error;
#ifdef PAX_ASLR
	vaddr_t orig_addr;
#endif /* PAX_ASLR */

	/*
	 * first, extract syscall args from the uap.
	 */

	addr = (vaddr_t)SCARG(uap, addr);
	size = (vsize_t)SCARG(uap, len);
	prot = SCARG(uap, prot) & VM_PROT_ALL;
	flags = SCARG(uap, flags);
	fd = SCARG(uap, fd);
	pos = SCARG(uap, pos);

#ifdef PAX_ASLR
	orig_addr = addr;
#endif /* PAX_ASLR */

	/*
	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
	 * validate the flags.
	 */
	if (flags & MAP_COPY) {
		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
#if defined(COMPAT_10) && defined(__i386__)
		/*
		 * Ancient kernel on x86 did not obey PROT_EXEC on i386 at least
		 * and ld.so did not turn it on. We take care of this on amd64
		 * in compat32.
		 */
		prot |= PROT_EXEC;
#endif
	}
	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
		return EINVAL;

	/*
	 * align file position and save offset.  adjust size.
	 */

	pageoff = (pos & PAGE_MASK);
	pos    -= pageoff;
	newsize = size + pageoff;		/* add offset */
	newsize = (vsize_t)round_page(newsize);	/* round up */

	if (newsize < size)
		return ENOMEM;
	size = newsize;

	/*
	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr"
	 */
	if (flags & MAP_FIXED) {
		/* ensure address and file offset are aligned properly */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return EINVAL;

		error = range_test(&p->p_vmspace->vm_map, addr, size, true);
		if (error) {
			return error;
		}
	} else if (addr == 0 || !(flags & MAP_TRYFIXED)) {
		/*
		 * not fixed: make sure we skip over the largest
		 * possible heap for non-topdown mapping arrangements.
		 * we will refine our guess later (e.g. to account for
		 * VAC, etc)
		 */

		defaddr = p->p_emul->e_vm_default_addr(p,
		    (vaddr_t)p->p_vmspace->vm_daddr, size,
		    p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);

		if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN))
			addr = MAX(addr, defaddr);
		else
			addr = MIN(addr, defaddr);
	}

	/*
	 * check for file mappings (i.e. not anonymous) and verify file.
	 */

	advice = UVM_ADV_NORMAL;
	if ((flags & MAP_ANON) == 0) {
		if ((fp = fd_getfile(fd)) == NULL)
			return EBADF;

		if (fp->f_ops->fo_mmap == NULL) {
			error = ENODEV;
			goto out;
		}
		error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags,
		    &advice, &uobj, &maxprot);
		if (error) {
			goto out;
		}
		if (uobj == NULL) {
			flags |= MAP_ANON;
			fd_putfile(fd);
			fp = NULL;
			goto is_anon;
		}
	} else {		/* MAP_ANON case */
		/*
		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
		 */
		if (fd != -1)
			return EINVAL;

 is_anon:		/* label for SunOS style /dev/zero */
		uobj = NULL;
		maxprot = VM_PROT_ALL;
		pos = 0;
	}

	PAX_MPROTECT_ADJUST(l, &prot, &maxprot);

	pax_aslr_mmap(l, &addr, orig_addr, flags);

	/*
	 * now let kernel internal function uvm_mmap do the work.
	 */

	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
	    flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);

	/* remember to add offset */
	*retval = (register_t)(addr + pageoff);

 out:
	if (fp != NULL)
		fd_putfile(fd);

	return error;
}