예제 #1
0
/*
 * Grow the stack to include sp.  Return 1 if successful, 0 otherwise.
 * This routine assumes that the stack grows downward.
 */
int
grow(caddr_t sp)
{
	struct proc *p = curproc;
	struct as *as = p->p_as;
	size_t oldsize = p->p_stksize;
	size_t newsize;
	int err;

	/*
	 * Serialize grow operations on an address space.
	 * This also serves as the lock protecting p_stksize
	 * and p_stkpageszc.
	 */
	as_rangelock(as);
	if (use_stk_lpg && (p->p_flag & SAUTOLPG) != 0) {
		err = grow_lpg(sp);
	} else {
		err = grow_internal(sp, p->p_stkpageszc);
	}
	as_rangeunlock(as);

	if (err == 0 && (newsize = p->p_stksize) > oldsize) {
		ASSERT(IS_P2ALIGNED(oldsize, PAGESIZE));
		ASSERT(IS_P2ALIGNED(newsize, PAGESIZE));
		/*
		 * Set up translations so the process doesn't have to fault in
		 * the stack pages we just gave it.
		 */
		(void) as_fault(as->a_hat, as, p->p_usrstack - newsize,
		    newsize - oldsize, F_INVAL, S_WRITE);
	}
	return ((err == 0 ? 1 : 0));
}
예제 #2
0
파일: smbios.c 프로젝트: andreiw/polaris
/*ARGSUSED*/
static int
smb_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred)
{
	smb_clone_t *cp = &smb_clones[getminor(dev)];

	size_t alen = P2ROUNDUP(len, PAGESIZE);
	caddr_t addr;

	iovec_t iov;
	uio_t uio;
	int err;

	if (len <= 0 || (flags & MAP_FIXED))
		return (EINVAL);

	if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
		return (EACCES);

	if (off < 0 || off + len < off || off + len > cp->c_eplen + cp->c_stlen)
		return (ENXIO);

	as_rangelock(as);
	map_addr(&addr, alen, 0, 1, 0);

	if (addr != NULL)
		err = as_map(as, addr, alen, segvn_create, zfod_argsp);
	else
		err = ENOMEM;

	as_rangeunlock(as);
	*addrp = addr;

	if (err != 0)
		return (err);

	iov.iov_base = addr;
	iov.iov_len = len;

	bzero(&uio, sizeof (uio_t));
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = off;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_extflg = UIO_COPY_DEFAULT;
	uio.uio_resid = len;

	if ((err = smb_uiomove(cp, &uio)) != 0)
		(void) as_unmap(as, addr, alen);

	return (err);
}
예제 #3
0
파일: ksyms.c 프로젝트: bahamas10/openzfs
/*
 * rlen is in multiples of PAGESIZE
 */
static char *
ksyms_asmap(struct as *as, size_t rlen)
{
	char *addr = NULL;

	as_rangelock(as);
	map_addr(&addr, rlen, 0, 1, 0);
	if (addr == NULL || as_map(as, addr, rlen, segvn_create, zfod_argsp)) {
		as_rangeunlock(as);
		return (NULL);
	}
	as_rangeunlock(as);
	return (addr);
}
예제 #4
0
/*ARGSUSED*/
static int
bootfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
    caller_context_t *ct)
{
	int ret;
	segvn_crargs_t vn_a;

#ifdef	_ILP32
	if (len > MAXOFF_T)
		return (ENOMEM);
#endif

	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0 || off > MAXOFFSET_T - off)
		return (ENXIO);

	if (vp->v_type != VREG)
		return (ENODEV);

	if (prot & PROT_WRITE)
		return (ENOTSUP);

	as_rangelock(as);
	ret = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
	if (ret != 0) {
		as_rangeunlock(as);
		return (ret);
	}

	vn_a.vp = vp;
	vn_a.offset = (u_offset_t)off;
	vn_a.type = flags & MAP_TYPE;
	vn_a.prot = prot;
	vn_a.maxprot = maxprot;
	vn_a.cred = cr;
	vn_a.amp = NULL;
	vn_a.flags = flags & ~MAP_TYPE;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;

	ret = as_map(as, *addrp, len, segvn_create, &vn_a);

	as_rangeunlock(as);
	return (ret);

}
예제 #5
0
/*ARGSUSED8*/
static int
privcmd_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
{
	struct segmf_crargs a;
	int error;

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, (offset_t)off, 0, flags);
		if (*addrp == NULL) {
			error = ENOMEM;
			goto rangeunlock;
		}
	} else {
		/*
		 * User specified address
		 */
		(void) as_unmap(as, *addrp, len);
	}

	/*
	 * The mapping *must* be MAP_SHARED at offset 0.
	 *
	 * (Foreign pages are treated like device memory; the
	 * ioctl interface allows the backing objects to be
	 * arbitrarily redefined to point at any machine frame.)
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED || off != 0) {
		error = EINVAL;
		goto rangeunlock;
	}

	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	error = as_map(as, *addrp, len, segmf_create, &a);

rangeunlock:
	as_rangeunlock(as);
	return (error);
}
예제 #6
0
int
brk(caddr_t nva)
{
	int error;
	proc_t *p = curproc;

	/*
	 * Serialize brk operations on an address space.
	 * This also serves as the lock protecting p_brksize
	 * and p_brkpageszc.
	 */
	as_rangelock(p->p_as);
	if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
		error = brk_lpg(nva);
	} else {
		error = brk_internal(nva, p->p_brkpageszc);
	}
	as_rangeunlock(p->p_as);
	return ((error != 0 ? set_errno(error) : 0));
}
예제 #7
0
/*
 * This function is called when a page needs to be mapped into a
 * process's address space.  Allocate the user address space and
 * set up the mapping to the page.  Assumes the page has already
 * been allocated and locked in memory via schedctl_getpage.
 */
static int
schedctl_map(struct anon_map *amp, caddr_t *uaddrp, caddr_t kaddr)
{
	caddr_t addr = NULL;
	struct as *as = curproc->p_as;
	struct segvn_crargs vn_a;
	int error;

	as_rangelock(as);
	/* pass address of kernel mapping as offset to avoid VAC conflicts */
	map_addr(&addr, PAGESIZE, (offset_t)(uintptr_t)kaddr, 1, 0);
	if (addr == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	/*
	 * Use segvn to set up the mapping to the page.
	 */
	vn_a.vp = NULL;
	vn_a.offset = 0;
	vn_a.cred = NULL;
	vn_a.type = MAP_SHARED;
	vn_a.prot = vn_a.maxprot = PROT_ALL;
	vn_a.flags = 0;
	vn_a.amp = amp;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;
	error = as_map(as, addr, PAGESIZE, segvn_create, &vn_a);
	as_rangeunlock(as);

	if (error)
		return (error);

	*uaddrp = addr;
	return (0);
}
예제 #8
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
예제 #9
0
/*
 * This function is called when a memory device is mmap'ed.
 * Set up the mapping to the correct device driver.
 */
static int
mmsegmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
{
	struct segvn_crargs vn_a;
	struct segdev_crargs dev_a;
	int error;
	minor_t minor;
	off_t i;

	minor = getminor(dev);

	as_rangelock(as);
	/*
	 * No need to worry about vac alignment on /dev/zero
	 * since this is a "clone" object that doesn't yet exist.
	 */
	error = choose_addr(as, addrp, len, off,
	    (minor == M_MEM) || (minor == M_KMEM), flags);
	if (error != 0) {
		as_rangeunlock(as);
		return (error);
	}

	switch (minor) {
	case M_MEM:
		/* /dev/mem cannot be mmap'ed with MAP_PRIVATE */
		if ((flags & MAP_TYPE) != MAP_SHARED) {
			as_rangeunlock(as);
			return (EINVAL);
		}

		/*
		 * Check to ensure that the entire range is
		 * legal and we are not trying to map in
		 * more than the device will let us.
		 */
		for (i = 0; i < len; i += PAGESIZE) {
			if (mmmmap(dev, off + i, maxprot) == -1) {
				as_rangeunlock(as);
				return (ENXIO);
			}
		}

		/*
		 * Use seg_dev segment driver for /dev/mem mapping.
		 */
		dev_a.mapfunc = mmmmap;
		dev_a.dev = dev;
		dev_a.offset = off;
		dev_a.type = (flags & MAP_TYPE);
		dev_a.prot = (uchar_t)prot;
		dev_a.maxprot = (uchar_t)maxprot;
		dev_a.hat_attr = 0;

		/*
		 * Make /dev/mem mappings non-consistent since we can't
		 * alias pages that don't have page structs behind them,
		 * such as kernel stack pages. If someone mmap()s a kernel
		 * stack page and if we give him a tte with cv, a line from
		 * that page can get into both pages of the spitfire d$.
		 * But snoop from another processor will only invalidate
		 * the first page. This later caused kernel (xc_attention)
		 * to go into an infinite loop at pil 13 and no interrupts
		 * could come in. See 1203630.
		 *
		 */
		dev_a.hat_flags = HAT_LOAD_NOCONSIST;
		dev_a.devmap_data = NULL;

		error = as_map(as, *addrp, len, segdev_create, &dev_a);
		break;

	case M_ZERO:
		/*
		 * Use seg_vn segment driver for /dev/zero mapping.
		 * Passing in a NULL amp gives us the "cloning" effect.
		 */
		vn_a.vp = NULL;
		vn_a.offset = 0;
		vn_a.type = (flags & MAP_TYPE);
		vn_a.prot = prot;
		vn_a.maxprot = maxprot;
		vn_a.flags = flags & ~MAP_TYPE;
		vn_a.cred = cred;
		vn_a.amp = NULL;
		vn_a.szc = 0;
		vn_a.lgrp_mem_policy_flags = 0;
		error = as_map(as, *addrp, len, segvn_create, &vn_a);
		break;

	case M_KMEM:
	case M_ALLKMEM:
		/* No longer supported with KPR. */
		error = ENXIO;
		break;

	case M_NULL:
		/*
		 * Use seg_dev segment driver for /dev/null mapping.
		 */
		dev_a.mapfunc = mmmmap;
		dev_a.dev = dev;
		dev_a.offset = off;
		dev_a.type = 0;		/* neither PRIVATE nor SHARED */
		dev_a.prot = dev_a.maxprot = (uchar_t)PROT_NONE;
		dev_a.hat_attr = 0;
		dev_a.hat_flags = 0;
		error = as_map(as, *addrp, len, segdev_create, &dev_a);
		break;

	default:
		error = ENXIO;
	}

	as_rangeunlock(as);
	return (error);
}
예제 #10
0
파일: gfs.c 프로젝트: pcd1193182/openzfs
/* ARGSUSED */
int
gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred,
    caller_context_t *ct)
{
	int rv;
	ssize_t resid = len;

	/*
	 * Check for bad parameters
	 */
#ifdef _ILP32
	if (len > MAXOFF_T)
		return (ENOMEM);
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOTSUP);
	if (off > MAXOFF_T)
		return (EFBIG);
	if ((long)off < 0 || (long)(off + len) < 0)
		return (EINVAL);
	if (vp->v_type != VREG)
		return (ENODEV);
	if ((prot & (PROT_EXEC | PROT_WRITE)) != 0)
		return (EACCES);

	/*
	 * Find appropriate address if needed, otherwise clear address range.
	 */
	as_rangelock(as);
	rv = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
	if (rv != 0) {
		as_rangeunlock(as);
		return (rv);
	}

	/*
	 * Create mapping
	 */
	rv = as_map(as, *addrp, len, segvn_create, zfod_argsp);
	as_rangeunlock(as);
	if (rv != 0)
		return (rv);

	/*
	 * Fill with data from read()
	 */
	rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE,
	    0, (rlim64_t)0, cred, &resid);

	if (rv == 0 && resid != 0)
		rv = ENXIO;

	if (rv != 0) {
		as_rangelock(as);
		(void) as_unmap(as, *addrp, len);
		as_rangeunlock(as);
	}

	return (rv);
}
예제 #11
0
파일: xpvtap.c 프로젝트: pcd1193182/openzfs
/*ARGSUSED*/
static int
xpvtap_segmap(dev_t dev, off_t off, struct as *asp, caddr_t *addrp,
    off_t len, unsigned int prot, unsigned int maxprot, unsigned int flags,
    cred_t *cred_p)
{
	struct segmf_crargs a;
	xpvtap_state_t *state;
	int instance;
	int e;


	if (secpolicy_xvm_control(cred_p)) {
		return (EPERM);
	}

	instance = getminor(dev);
	state = ddi_get_soft_state(xpvtap_statep, instance);
	if (state == NULL) {
		return (EBADF);
	}

	/* the user app should be doing a MAP_SHARED mapping */
	if ((flags & MAP_TYPE) != MAP_SHARED) {
		return (EINVAL);
	}

	/*
	 * if this is the user ring (offset = 0), devmap it (which ends up in
	 * xpvtap_devmap). devmap will alloc and map the ring into the
	 * app's VA space.
	 */
	if (off == 0) {
		e = devmap_setup(dev, (offset_t)off, asp, addrp, (size_t)len,
		    prot, maxprot, flags, cred_p);
		return (e);
	}

	/* this should be the mmap for the gref pages (offset = PAGESIZE) */
	if (off != PAGESIZE) {
		return (EINVAL);
	}

	/* make sure we get the size we're expecting */
	if (len != XPVTAP_GREF_BUFSIZE) {
		return (EINVAL);
	}

	/*
	 * reserve user app VA space for the gref pages and use segmf to
	 * manage the backing store for the physical memory. segmf will
	 * map in/out the grefs and fault them in/out.
	 */
	ASSERT(asp == state->bt_map.um_as);
	as_rangelock(asp);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, 0, 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(asp);
			return (ENOMEM);
		}
	} else {
		/* User specified address */
		(void) as_unmap(asp, *addrp, len);
	}
	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	e = as_map(asp, *addrp, len, segmf_create, &a);
	if (e != 0) {
		as_rangeunlock(asp);
		return (e);
	}
	as_rangeunlock(asp);

	/*
	 * Stash user base address, and compute address where the request
	 * array will end up.
	 */
	state->bt_map.um_guest_pages = (caddr_t)*addrp;
	state->bt_map.um_guest_size = (size_t)len;

	/* register an as callback so we can cleanup when the app goes away */
	e = as_add_callback(asp, xpvtap_segmf_unregister, state,
	    AS_UNMAP_EVENT, *addrp, len, KM_SLEEP);
	if (e != 0) {
		(void) as_unmap(asp, *addrp, len);
		return (EINVAL);
	}

	/* wake thread to see if there are requests already queued up */
	mutex_enter(&state->bt_thread.ut_mutex);
	state->bt_thread.ut_wake = B_TRUE;
	cv_signal(&state->bt_thread.ut_wake_cv);
	mutex_exit(&state->bt_thread.ut_mutex);

	return (0);
}
예제 #12
0
/*ARGSUSED*/
int
gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
    ddi_device_acc_attr_t *accattrp, uint_t rnumber)
{
	struct segdev_crargs dev_a;
	int (*mapfunc)(dev_t dev, off_t off, int prot);
	uint_t hat_attr;
	pfn_t pfn;
	int error, i;

	if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
		return (ENODEV);

	/*
	 * Character devices that support the d_mmap
	 * interface can only be mmap'ed shared.
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED)
		return (EINVAL);

	/*
	 * Check that this region is indeed mappable on this platform.
	 * Use the mapping function.
	 */
	if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
		return (ENXIO);

	if (accattrp != NULL) {
		switch (accattrp->devacc_attr_dataorder) {
		case DDI_STRICTORDER_ACC:
			/* Want UC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_STRICTORDER | HAT_PLAT_NOCACHE);
			break;
		case DDI_MERGING_OK_ACC:
			/* Want WC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_MERGING_OK | HAT_PLAT_NOCACHE);
			break;
		}
	}

	/*
	 * Check to ensure that the entire range is
	 * legal and we are not trying to map in
	 * more than the device will let us.
	 */
	for (i = 0; i < len; i += PAGESIZE) {
		if (i == 0) {
			/*
			 * Save the pfn at offset here. This pfn will be
			 * used later to get user address.
			 */
			if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
					maxprot)) == PFN_INVALID)
				return (ENXIO);
		} else {
			if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
				PFN_INVALID)
				return (ENXIO);
		}
	}

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		/*
		 * Pick an address w/o worrying about
		 * any vac alignment constraints.
		 */
		map_addr(addrp, len, ptob(pfn), 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(as);
			return (ENOMEM);
		}
	} else {
		/*
		 * User-specified address; blow away any previous mappings.
		 */
		(void) as_unmap(as, *addrp, len);
	}

	dev_a.mapfunc = mapfunc;
	dev_a.dev = dev;
	dev_a.offset = (offset_t)offset;
	dev_a.type = flags & MAP_TYPE;
	dev_a.prot = (uchar_t)prot;
	dev_a.maxprot = (uchar_t)maxprot;
	dev_a.hat_attr = hat_attr;
#if DEBUG
	dev_a.hat_flags = 0;
#else
	dev_a.hat_flags = HAT_LOAD_LOCK;
#endif
	dev_a.devmap_data = NULL;

	error = as_map(as, *addrp, len, segdev_create, &dev_a);
	as_rangeunlock(as);

	return (error);
}
예제 #13
0
static int
smmap_common(caddr_t *addrp, size_t len,
    int prot, int flags, struct file *fp, offset_t pos)
{
	struct vnode *vp;
	struct as *as = curproc->p_as;
	uint_t uprot, maxprot, type;
	int error;
	int in_crit = 0;

	if ((flags & ~(MAP_SHARED | MAP_PRIVATE | MAP_FIXED | _MAP_NEW |
	    _MAP_LOW32 | MAP_NORESERVE | MAP_ANON | MAP_ALIGN |
	    MAP_TEXT | MAP_INITDATA)) != 0) {
		/* | MAP_RENAME */	/* not implemented, let user know */
		return (EINVAL);
	}

	if ((flags & MAP_TEXT) && !(prot & PROT_EXEC)) {
		return (EINVAL);
	}

	if ((flags & (MAP_TEXT | MAP_INITDATA)) == (MAP_TEXT | MAP_INITDATA)) {
		return (EINVAL);
	}

#if defined(__sparc)
	/*
	 * See if this is an "old mmap call".  If so, remember this
	 * fact and convert the flags value given to mmap to indicate
	 * the specified address in the system call must be used.
	 * _MAP_NEW is turned set by all new uses of mmap.
	 */
	if ((flags & _MAP_NEW) == 0)
		flags |= MAP_FIXED;
#endif
	flags &= ~_MAP_NEW;

	type = flags & MAP_TYPE;
	if (type != MAP_PRIVATE && type != MAP_SHARED)
		return (EINVAL);


	if (flags & MAP_ALIGN) {

		if (flags & MAP_FIXED)
			return (EINVAL);

		/* alignment needs to be a power of 2 >= page size */
		if (((uintptr_t)*addrp < PAGESIZE && (uintptr_t)*addrp != 0) ||
		    !ISP2((uintptr_t)*addrp))
			return (EINVAL);
	}
	/*
	 * Check for bad lengths and file position.
	 * We let the VOP_MAP routine check for negative lengths
	 * since on some vnode types this might be appropriate.
	 */
	if (len == 0 || (pos & (u_offset_t)PAGEOFFSET) != 0)
		return (EINVAL);

	maxprot = PROT_ALL;		/* start out allowing all accesses */
	uprot = prot | PROT_USER;

	if (fp == NULL) {
		ASSERT(flags & MAP_ANON);
		/* discard lwpchan mappings, like munmap() */
		if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
			lwpchan_delete_mapping(curproc, *addrp, *addrp + len);
		as_rangelock(as);
		error = zmap(as, addrp, len, uprot, flags, pos);
		as_rangeunlock(as);
		/*
		 * Tell machine specific code that lwp has mapped shared memory
		 */
		if (error == 0 && (flags & MAP_SHARED)) {
			/* EMPTY */
			LWP_MMODEL_SHARED_AS(*addrp, len);
		}
		return (error);
	} else if ((flags & MAP_ANON) != 0)
		return (EINVAL);

	vp = fp->f_vnode;

	/* Can't execute code from "noexec" mounted filesystem. */
	if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0)
		maxprot &= ~PROT_EXEC;

	/*
	 * These checks were added as part of large files.
	 *
	 * Return ENXIO if the initial position is negative; return EOVERFLOW
	 * if (offset + len) would overflow the maximum allowed offset for the
	 * type of file descriptor being used.
	 */
	if (vp->v_type == VREG) {
		if (pos < 0)
			return (ENXIO);
		if ((offset_t)len > (OFFSET_MAX(fp) - pos))
			return (EOVERFLOW);
	}

	if (type == MAP_SHARED && (fp->f_flag & FWRITE) == 0) {
		/* no write access allowed */
		maxprot &= ~PROT_WRITE;
	}

	/*
	 * XXX - Do we also adjust maxprot based on protections
	 * of the vnode?  E.g. if no execute permission is given
	 * on the vnode for the current user, maxprot probably
	 * should disallow PROT_EXEC also?  This is different
	 * from the write access as this would be a per vnode
	 * test as opposed to a per fd test for writability.
	 */

	/*
	 * Verify that the specified protections are not greater than
	 * the maximum allowable protections.  Also test to make sure
	 * that the file descriptor does allows for read access since
	 * "write only" mappings are hard to do since normally we do
	 * the read from the file before the page can be written.
	 */
	if (((maxprot & uprot) != uprot) || (fp->f_flag & FREAD) == 0)
		return (EACCES);

	/*
	 * If the user specified an address, do some simple checks here
	 */
	if ((flags & MAP_FIXED) != 0) {
		caddr_t userlimit;

		/*
		 * Use the user address.  First verify that
		 * the address to be used is page aligned.
		 * Then make some simple bounds checks.
		 */
		if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
			return (EINVAL);

		userlimit = flags & _MAP_LOW32 ?
		    (caddr_t)USERLIMIT32 : as->a_userlimit;
		switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
		case RANGE_OKAY:
			break;
		case RANGE_BADPROT:
			return (ENOTSUP);
		case RANGE_BADADDR:
		default:
			return (ENOMEM);
		}
	}

	if ((prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) &&
	    nbl_need_check(vp)) {
		int svmand;
		nbl_op_t nop;

		nbl_start_crit(vp, RW_READER);
		in_crit = 1;
		error = nbl_svmand(vp, fp->f_cred, &svmand);
		if (error != 0)
			goto done;
		if ((prot & PROT_WRITE) && (type == MAP_SHARED)) {
			if (prot & (PROT_READ | PROT_EXEC)) {
				nop = NBL_READWRITE;
			} else {
				nop = NBL_WRITE;
			}
		} else {
			nop = NBL_READ;
		}
		if (nbl_conflict(vp, nop, 0, LONG_MAX, svmand, NULL)) {
			error = EACCES;
			goto done;
		}
	}

	/* discard lwpchan mappings, like munmap() */
	if ((flags & MAP_FIXED) && curproc->p_lcp != NULL)
		lwpchan_delete_mapping(curproc, *addrp, *addrp + len);

	/*
	 * Ok, now let the vnode map routine do its thing to set things up.
	 */
	error = VOP_MAP(vp, pos, as,
	    addrp, len, uprot, maxprot, flags, fp->f_cred, NULL);

	if (error == 0) {
		/*
		 * Tell machine specific code that lwp has mapped shared memory
		 */
		if (flags & MAP_SHARED) {
			/* EMPTY */
			LWP_MMODEL_SHARED_AS(*addrp, len);
		}
		if (vp->v_type == VREG &&
		    (flags & (MAP_TEXT | MAP_INITDATA)) != 0) {
			/*
			 * Mark this as an executable vnode
			 */
			mutex_enter(&vp->v_lock);
			vp->v_flag |= VVMEXEC;
			mutex_exit(&vp->v_lock);
		}
	}

done:
	if (in_crit)
		nbl_end_crit(vp);
	return (error);
}