Esempio n. 1
0
int
exec_sigcode_map(struct proc *p, struct emul *e)
{
	vsize_t sz;

	sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;

	/*
	 * If we don't have a sigobject for this emulation, create one.
	 *
	 * sigobject is an anonymous memory object (just like SYSV shared
	 * memory) that we keep a permanent reference to and that we map
	 * in all processes that need this sigcode. The creation is simple,
	 * we create an object, add a permanent reference to it, map it in
	 * kernel space, copy out the sigcode to it and unmap it.
	 * Then we map it with PROT_READ|PROT_EXEC into the process just
	 * the way sys_mmap would map it.
	 */
	if (e->e_sigobject == NULL) {
		vaddr_t va;
		int r;

		e->e_sigobject = uao_create(sz, 0);
		uao_reference(e->e_sigobject);	/* permanent reference */

		va = vm_map_min(kernel_map);	/* hint */
		if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
		    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
		    UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
			uao_detach(e->e_sigobject);
			return (ENOMEM);
		}
		memcpy((void *)va, e->e_sigcode, sz);
		uvm_unmap(kernel_map, va, va + round_page(sz));
	}

	/* Just a hint to uvm_mmap where to put it. */
	p->p_sigcode = uvm_map_hint(p, VM_PROT_READ|VM_PROT_EXECUTE);
	uao_reference(e->e_sigobject);
	if (uvm_map(&p->p_vmspace->vm_map, &p->p_sigcode, round_page(sz),
	    e->e_sigobject, 0, 0, UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX,
	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) {
		uao_detach(e->e_sigobject);
		return (ENOMEM);
	}

	return (0);
}
Esempio n. 2
0
int
sys_shmat(struct proc *p, void *v, register_t *retval)
{
	struct sys_shmat_args /* {
		syscallarg(int) shmid;
		syscallarg(const void *) shmaddr;
		syscallarg(int) shmflg;
	} */ *uap = v;
	int error, i, flags;
	struct ucred *cred = p->p_ucred;
	struct shmid_ds *shmseg;
	struct shmmap_head *shmmap_h;
	struct shmmap_state *shmmap_s;
	struct shm_handle *shm_handle;
	vaddr_t attach_va;
	vm_prot_t prot;
	vsize_t size;

	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
	if (shmmap_h == NULL) {
		size = sizeof(int) +
		    shminfo.shmseg * sizeof(struct shmmap_state);
		shmmap_h = malloc(size, M_SHM, M_WAITOK);
		shmmap_h->shmseg = shminfo.shmseg;
		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
		    i++, shmmap_s++)
			shmmap_s->shmid = -1;
		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
	}
	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
	if (shmseg == NULL)
		return (EINVAL);
	error = ipcperm(cred, &shmseg->shm_perm,
		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
	if (error)
		return (error);
	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
		if (shmmap_s->shmid == -1)
			break;
		shmmap_s++;
	}
	if (i >= shmmap_h->shmseg)
		return (EMFILE);
	size = round_page(shmseg->shm_segsz);
	prot = VM_PROT_READ;
	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
		prot |= VM_PROT_WRITE;
	flags = MAP_ANON | MAP_SHARED;
	if (SCARG(uap, shmaddr)) {
		flags |= MAP_FIXED;
		if (SCARG(uap, shmflg) & SHM_RND) 
			attach_va =
			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
			attach_va = (vaddr_t)SCARG(uap, shmaddr);
		else
			return (EINVAL);
	} else {
		/* This is just a hint to uvm_map() about where to put it. */
		attach_va = uvm_map_hint(p, prot);
	}
	shm_handle = shmseg->shm_internal;
	uao_reference(shm_handle->shm_object);
	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
	if (error) {
		uao_detach(shm_handle->shm_object);
		return (error);
	}

	shmmap_s->va = attach_va;
	shmmap_s->shmid = SCARG(uap, shmid);
	shmseg->shm_lpid = p->p_p->ps_mainproc->p_pid;
	shmseg->shm_atime = time_second;
	shmseg->shm_nattch++;
	*retval = attach_va;
	return (0);
}
Esempio n. 3
0
int
sys_mmap(struct proc *p, void *v, register_t *retval)
{
	struct sys_mmap_args /* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */ *uap = v;
	vaddr_t addr;
	struct vattr va;
	off_t pos;
	vsize_t size, pageoff;
	vm_prot_t prot, maxprot;
	int flags, fd;
	vaddr_t vm_min_address = VM_MIN_ADDRESS;
	struct filedesc *fdp = p->p_fd;
	struct file *fp = NULL;
	struct vnode *vp;
	caddr_t handle;
	int error;

	/*
	 * first, extract syscall args from the uap.
	 */

	addr = (vaddr_t) SCARG(uap, addr);
	size = (vsize_t) SCARG(uap, len);
	prot = SCARG(uap, prot);
	flags = SCARG(uap, flags);
	fd = SCARG(uap, fd);
	pos = SCARG(uap, pos);

	/*
	 * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
	 * validate the flags.
	 */
	if ((prot & VM_PROT_ALL) != prot)
		return (EINVAL);
	if ((flags & MAP_FLAGMASK) != flags)
		return (EINVAL);
	if (flags & MAP_COPY)
		flags = (flags & ~MAP_COPY) | MAP_PRIVATE;
	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
		return (EINVAL);
	if (flags & MAP_DENYWRITE)
		return (EINVAL);

	/*
	 * align file position and save offset.  adjust size.
	 */
	ALIGN_ADDR(pos, size, pageoff);

	/*
	 * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" 
	 */

	if (flags & MAP_FIXED) {

		/* adjust address by the same amount as we did the offset */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return (EINVAL);		/* not page aligned */

		if (addr > SIZE_MAX - size)
			return (EINVAL);		/* no wrapping! */
		if (VM_MAXUSER_ADDRESS > 0 &&
		    (addr + size) > VM_MAXUSER_ADDRESS)
			return (EINVAL);
		if (vm_min_address > 0 && addr < vm_min_address)
			return (EINVAL);

	} else {

		/*
		 * not fixed: make sure we skip over the largest possible heap.
		 * we will refine our guess later (e.g. to account for VAC, etc)
		 */
		if (addr == 0)
			addr = uvm_map_hint(p, prot);
		else if (!(flags & MAP_TRYFIXED) &&
		    addr < (vaddr_t)p->p_vmspace->vm_daddr)
			addr = uvm_map_hint(p, prot);
	}

	/*
	 * check for file mappings (i.e. not anonymous) and verify file.
	 */
	if ((flags & MAP_ANON) == 0) {

		if ((fp = fd_getfile(fdp, fd)) == NULL)
			return (EBADF);

		FREF(fp);

		if (fp->f_type != DTYPE_VNODE) {
			error = ENODEV;		/* only mmap vnodes! */
			goto out;
		}
		vp = (struct vnode *)fp->f_data;	/* convert to vnode */

		if (vp->v_type != VREG && vp->v_type != VCHR &&
		    vp->v_type != VBLK) {
			error = ENODEV; /* only REG/CHR/BLK support mmap */
			goto out;
		}

		if (vp->v_type == VREG && (pos + size) < pos) {
			error = EINVAL;		/* no offset wrapping */
			goto out;
		}

		/* special case: catch SunOS style /dev/zero */
		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
			flags |= MAP_ANON;
			FRELE(fp);
			fp = NULL;
			goto is_anon;
		}

		/*
		 * Old programs may not select a specific sharing type, so
		 * default to an appropriate one.
		 *
		 * XXX: how does MAP_ANON fit in the picture?
		 */
		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
#if defined(DEBUG)
			printf("WARNING: defaulted mmap() share type to "
			   "%s (pid %d comm %s)\n", vp->v_type == VCHR ?
			   "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
			    p->p_comm);
#endif
			if (vp->v_type == VCHR)
				flags |= MAP_SHARED;	/* for a device */
			else
				flags |= MAP_PRIVATE;	/* for a file */
		}

		/* 
		 * MAP_PRIVATE device mappings don't make sense (and aren't
		 * supported anyway).  However, some programs rely on this,
		 * so just change it to MAP_SHARED.
		 */
		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
		}

#ifdef ANOUBIS
		/* Force DENYWRITE mappings if file->denywrite is set. */
		if (fp->denywrite)
			flags |= MAP_DENYWRITE;
#endif

		/*
		 * now check protection
		 */

		/*
		 * Don't allow the file to be mapped into executable memory if
		 * the underlying file system is marked as 'noexec'.
		 */
		if (prot & PROT_EXEC && vp->v_mount->mnt_flag & MNT_NOEXEC) {
			error = EACCES;
			goto out;
		}

		maxprot = VM_PROT_EXECUTE;

		/* check read access */
		if (fp->f_flag & FREAD)
			maxprot |= VM_PROT_READ;
		else if (prot & PROT_READ) {
			error = EACCES;
			goto out;
		}

		/* PROT_EXEC only makes sense if the descriptor is readable. */
		if (!(fp->f_flag & FREAD) && prot & PROT_EXEC) {
			error = EACCES;
			goto out;
		}

		/* check write access, shared case first */
		if (flags & MAP_SHARED) {
			/*
			 * if the file is writable, only add PROT_WRITE to
			 * maxprot if the file is not immutable, append-only.
			 * otherwise, if we have asked for PROT_WRITE, return
			 * EPERM.
			 */
			if (fp->f_flag & FWRITE) {
				if ((error =
				    VOP_GETATTR(vp, &va, p->p_ucred, p)))
					goto out;
				if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
					maxprot |= VM_PROT_WRITE;
				else if (prot & PROT_WRITE) {
					error = EPERM;
					goto out;
				}
			} else if (prot & PROT_WRITE) {
				error = EACCES;
				goto out;
			}
		} else {
			/* MAP_PRIVATE mappings can always write to */
			maxprot |= VM_PROT_WRITE;
		}

#ifdef MAC
		error = mac_vnode_check_mmap(p->p_ucred, vp, prot, flags);
		if (error)
			goto out;
#endif

		vfs_mark_atime(vp, p->p_ucred);

		/*
		 * set handle to vnode
		 */

		handle = (caddr_t)vp;

	} else {		/* MAP_ANON case */
		/*
		 * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0?
		 */
		if (fd != -1) {
			error = EINVAL;
			goto out;
		}

 is_anon:		/* label for SunOS style /dev/zero */
		handle = NULL;
		maxprot = VM_PROT_ALL;
		pos = 0;
	}

	if ((flags & MAP_ANON) != 0 ||
	    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
		if (size >
		    (p->p_rlimit[RLIMIT_DATA].rlim_cur - ptoa(p->p_vmspace->vm_dused))) {
			error = ENOMEM;
			goto out;
		}
	}

	/*
	 * now let kernel internal function uvm_mmap do the work.
	 */

	error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot,
	    flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur, p);

	if (error == 0)
		/* remember to add offset */
		*retval = (register_t)(addr + pageoff);

out:
	if (fp)
		FRELE(fp);	
	return (error);
}
Esempio n. 4
0
/*
 * Load a file (interpreter/library) pointed to by path [stolen from
 * coff_load_shlib()]. Made slightly generic so it might be used externally.
 */
int
ELFNAME(load_file)(struct proc *p, char *path, struct exec_package *epp,
	struct elf_args *ap, Elf_Addr *last)
{
	int error, i;
	struct nameidata nd;
	Elf_Ehdr eh;
	Elf_Phdr *ph = NULL;
	u_long phsize;
	char *bp = NULL;
	Elf_Addr addr;
	struct vnode *vp;
	u_int8_t os;			/* Just a dummy in this routine */
	Elf_Phdr *base_ph = NULL;
	struct interp_ld_sec {
		Elf_Addr vaddr;
		u_long memsz;
	} loadmap[ELF_MAX_VALID_PHDR];
	int nload, idx = 0;
	Elf_Addr pos = *last;
	int file_align;

	bp = path;
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
	if ((error = namei(&nd)) != 0) {
		return (error);
	}
	vp = nd.ni_vp;
	if (vp->v_type != VREG) {
		error = EACCES;
		goto bad;
	}
	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
		goto bad;
	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
		error = EACCES;
		goto bad;
	}
	if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0)
		goto bad1;
	if ((error = ELFNAME(read_from)(p, nd.ni_vp, 0,
				    (caddr_t)&eh, sizeof(eh))) != 0)
		goto bad1;

	if (ELFNAME(check_header)(&eh, ET_DYN) &&
	    ELFNAME(olf_check_header)(&eh, ET_DYN, &os)) {
		error = ENOEXEC;
		goto bad1;
	}

	phsize = eh.e_phnum * sizeof(Elf_Phdr);
	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);

	if ((error = ELFNAME(read_from)(p, nd.ni_vp, eh.e_phoff, (caddr_t)ph,
	    phsize)) != 0)
		goto bad1;

	for (i = 0; i < eh.e_phnum; i++) {
		if (ph[i].p_type == PT_LOAD) {
			loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr);
			loadmap[idx].memsz = round_page (ph[i].p_vaddr +
			    ph[i].p_memsz - loadmap[idx].vaddr);
			file_align = ph[i].p_align;
			idx++;
		}
	}
	nload = idx;

	/*
	 * If no position to load the interpreter was set by a probe
	 * function, pick the same address that a non-fixed mmap(0, ..)
	 * would (i.e. something safely out of the way).
	 */
	if (pos == ELFDEFNNAME(NO_ADDR)) {
		pos = uvm_map_hint(p, VM_PROT_EXECUTE);
	}

	pos = ELF_ROUND(pos, file_align);
	*last = epp->ep_interp_pos = pos;
	for (i = 0; i < nload;/**/) {
		vaddr_t	addr;
		struct	uvm_object *uobj;
		off_t	uoff;
		size_t	size;

#ifdef this_needs_fixing
		if (i == 0) {
			uobj = &vp->v_uvm.u_obj;
			/* need to fix uoff */
		} else {
#endif
			uobj = NULL;
			uoff = 0;
#ifdef this_needs_fixing
		}
#endif

		addr = trunc_page(pos + loadmap[i].vaddr);
		size =  round_page(addr + loadmap[i].memsz) - addr;

		/* CRAP - map_findspace does not avoid daddr+MAXDSIZ */
		if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) &&
		    (addr < (vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ))
			addr = round_page((vaddr_t)p->p_vmspace->vm_daddr +
			    MAXDSIZ);

		if (uvm_map_findspace(&p->p_vmspace->vm_map, addr, size,
		    &addr, uobj, uoff, 0, UVM_FLAG_FIXED) == NULL) {
			if (uvm_map_findspace(&p->p_vmspace->vm_map, addr, size,
			    &addr, uobj, uoff, 0, 0) == NULL) {
				error = ENOMEM; /* XXX */
				goto bad1;
			}
		} 
		if (addr != pos + loadmap[i].vaddr) {
			/* base changed. */
			pos = addr - trunc_page(loadmap[i].vaddr);
			pos = ELF_ROUND(pos,file_align);
			epp->ep_interp_pos = *last = pos;
			i = 0;
			continue;
		}

		i++;
	}

	/*
	 * Load all the necessary sections
	 */
	for (i = 0; i < eh.e_phnum; i++) {
		Elf_Addr size = 0;
		int prot = 0;
		int flags;

		switch (ph[i].p_type) {
		case PT_LOAD:
			if (base_ph == NULL) {
				flags = VMCMD_BASE;
				addr = *last;
				base_ph = &ph[i];
			} else {
				flags = VMCMD_RELATIVE;
				addr = ph[i].p_vaddr - base_ph->p_vaddr;
			}
			ELFNAME(load_psection)(&epp->ep_vmcmds, nd.ni_vp,
			    &ph[i], &addr, &size, &prot, flags);
			/* If entry is within this section it must be text */
			if (eh.e_entry >= ph[i].p_vaddr &&
			    eh.e_entry < (ph[i].p_vaddr + size)) {
 				epp->ep_entry = addr + eh.e_entry -
				    ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align);
				ap->arg_interp = addr;
			}
			addr += size;
			break;

		case PT_DYNAMIC:
		case PT_PHDR:
		case PT_NOTE:
			break;

		default:
			break;
		}
	}

	vn_marktext(nd.ni_vp);

bad1:
	VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p);
bad:
	if (ph != NULL)
		free((char *)ph, M_TEMP);

	*last = addr;
	vput(nd.ni_vp);
	return (error);
}
Esempio n. 5
0
int
sys_mquery(struct proc *p, void *v, register_t *retval)
{
	struct sys_mquery_args /* {
		syscallarg(void *) addr;
		syscallarg(size_t) len;
		syscallarg(int) prot;
		syscallarg(int) flags;
		syscallarg(int) fd;
		syscallarg(long) pad;
		syscallarg(off_t) pos;
	} */ *uap = v;
	struct file *fp;
	struct uvm_object *uobj;
	voff_t uoff;
	int error;
	vaddr_t vaddr;
	int flags = 0;
	vsize_t size;
	vm_prot_t prot;
	int fd;

	vaddr = (vaddr_t) SCARG(uap, addr);
	prot = SCARG(uap, prot);
	size = (vsize_t) SCARG(uap, len);
	fd = SCARG(uap, fd);

	if ((prot & VM_PROT_ALL) != prot)
		return (EINVAL);

	if (SCARG(uap, flags) & MAP_FIXED)
		flags |= UVM_FLAG_FIXED;

	if (fd >= 0) {
		if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
			return (error);
		uobj = &((struct vnode *)fp->f_data)->v_uvm.u_obj;
		uoff = SCARG(uap, pos);
	} else {
		fp = NULL;
		uobj = NULL;
		uoff = 0;
	}

	if (vaddr == 0)
		vaddr = uvm_map_hint(p, prot);

	/* prevent a user requested address from falling in heap space */
	if ((vaddr + size > (vaddr_t)p->p_vmspace->vm_daddr) &&
	    (vaddr < (vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ)) {
		if (flags & UVM_FLAG_FIXED) {
			error = EINVAL;
			goto done;
		}
		vaddr = round_page((vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ);
	}
	vm_map_lock(&p->p_vmspace->vm_map);

again:
	if (uvm_map_findspace(&p->p_vmspace->vm_map, vaddr, size,
	    &vaddr, uobj, uoff, 0, flags) == NULL) {
		if (flags & UVM_FLAG_FIXED)
			error = EINVAL;
		else
			error = ENOMEM;
	} else {
		/* prevent a returned address from falling in heap space */
		if ((vaddr + size > (vaddr_t)p->p_vmspace->vm_daddr)
		    && (vaddr < (vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ)) {
			vaddr = round_page((vaddr_t)p->p_vmspace->vm_daddr +
			    MAXDSIZ);
			goto again;
		}
		error = 0;
		*retval = (register_t)(vaddr);
	}
	vm_map_unlock(&p->p_vmspace->vm_map);
done:
	if (fp != NULL)
		FRELE(fp);
	return (error);
}