Example #1
0
/*
 * Find address for user to map.
 * If MAP_FIXED is not specified, we can pick any address we want, but we will
 * first try the value in *addrp if it is non-NULL.  Thus this is implementing
 * a way to try and get a preferred address.
 */
int
choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
    int vacalign, uint_t flags)
{
	caddr_t basep = (caddr_t)(uintptr_t)((uintptr_t)*addrp & PAGEMASK);
	size_t lenp = len;

	ASSERT(AS_ISCLAIMGAP(as));	/* searches should be serialized */
	if (flags & MAP_FIXED) {
		(void) as_unmap(as, *addrp, len);
		return (0);
	} else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
	    !as_gap(as, len, &basep, &lenp, 0, *addrp)) {
		/* User supplied address was available */
		*addrp = basep;
	} else {
		/*
		 * No user supplied address or the address supplied was not
		 * available.
		 */
		map_addr(addrp, len, off, vacalign, flags);
	}
	if (*addrp == NULL)
		return (ENOMEM);
	return (0);
}
Example #2
0
/*
 * Cleanup the list of schedctl shared pages for the process.
 * Called from exec() and exit() system calls.
 */
void
schedctl_proc_cleanup(void)
{
	proc_t *p = curproc;
	sc_page_ctl_t *pagep;
	sc_page_ctl_t *next;

	ASSERT(p->p_lwpcnt == 1);	/* we are single-threaded now */
	ASSERT(curthread->t_schedctl == NULL);

	/*
	 * Since we are single-threaded, we don't have to hold p->p_sc_lock.
	 */
	pagep = p->p_pagep;
	p->p_pagep = NULL;
	while (pagep != NULL) {
		ASSERT(pagep->spc_space == sc_pagesize);
		next = pagep->spc_next;
		/*
		 * Unmap the user space and free the mapping structure.
		 */
		(void) as_unmap(p->p_as, pagep->spc_uaddr, PAGESIZE);
		schedctl_freepage(pagep->spc_amp, (caddr_t)(pagep->spc_base));
		kmem_free(pagep->spc_map, sizeof (ulong_t) * sc_bitmap_words);
		kmem_free(pagep, sizeof (sc_page_ctl_t));
		pagep = next;
	}
}
Example #3
0
static char *
ksyms_mapin(ksyms_buflist_hdr_t *hdr, size_t size)
{
	size_t sz, rlen = roundup(size, PAGESIZE);
	struct as *as = curproc->p_as;
	char *addr, *raddr;
	ksyms_buflist_t *list = list_head(&hdr->blist);

	if ((addr = ksyms_asmap(as, rlen)) == NULL)
		return (NULL);

	raddr = addr;
	while (size > 0 && list != NULL) {
		sz = MIN(size, BUF_SIZE);

		if (copyout(list->buf, raddr, sz)) {
			(void) as_unmap(as, addr, rlen);
			return (NULL);
		}
		list = list_next(&hdr->blist, list);
		raddr += sz;
		size -= sz;
	}
	return (addr);
}
Example #4
0
/* ARGSUSED */
static int
ksyms_close(dev_t dev, int flag, int otyp, struct cred *cred)
{
	minor_t clone = getminor(dev);

	(void) as_unmap(curproc->p_as, ksyms_clones[clone].ksyms_base,
	    roundup(ksyms_clones[clone].ksyms_size, PAGESIZE));
	ksyms_clones[clone].ksyms_base = 0;
	modunload_enable();
	(void) ddi_prop_remove(dev, ksyms_devi, "size");
	return (0);
}
Example #5
0
/*ARGSUSED*/
static int
smb_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred)
{
	smb_clone_t *cp = &smb_clones[getminor(dev)];

	size_t alen = P2ROUNDUP(len, PAGESIZE);
	caddr_t addr;

	iovec_t iov;
	uio_t uio;
	int err;

	if (len <= 0 || (flags & MAP_FIXED))
		return (EINVAL);

	if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
		return (EACCES);

	if (off < 0 || off + len < off || off + len > cp->c_eplen + cp->c_stlen)
		return (ENXIO);

	as_rangelock(as);
	map_addr(&addr, alen, 0, 1, 0);

	if (addr != NULL)
		err = as_map(as, addr, alen, segvn_create, zfod_argsp);
	else
		err = ENOMEM;

	as_rangeunlock(as);
	*addrp = addr;

	if (err != 0)
		return (err);

	iov.iov_base = addr;
	iov.iov_len = len;

	bzero(&uio, sizeof (uio_t));
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = off;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_extflg = UIO_COPY_DEFAULT;
	uio.uio_resid = len;

	if ((err = smb_uiomove(cp, &uio)) != 0)
		(void) as_unmap(as, addr, alen);

	return (err);
}
Example #6
0
/* ARGSUSED */
static int
ksyms_open(dev_t *devp, int flag, int otyp, struct cred *cred)
{
	minor_t clone;
	size_t size = 0;
	size_t realsize;
	char *addr;
	void *hptr = NULL;
	ksyms_buflist_hdr_t hdr;
	bzero(&hdr, sizeof (struct ksyms_buflist_hdr));
	list_create(&hdr.blist, PAGESIZE,
	    offsetof(ksyms_buflist_t, buflist_node));

	if (getminor(*devp) != 0)
		return (ENXIO);

	for (;;) {
		realsize = ksyms_snapshot(ksyms_bcopy, hptr, size);
		if (realsize <= size)
			break;
		size = realsize;
		size = ksyms_buflist_alloc(&hdr, size);
		if (size == 0)
			return (ENOMEM);
		hptr = (void *)&hdr;
	}

	addr = ksyms_mapin(&hdr, realsize);
	ksyms_buflist_free(&hdr);
	if (addr == NULL)
		return (EOVERFLOW);

	/*
	 * Reserve a clone entry.  Note that we don't use clone 0
	 * since that's the "real" minor number.
	 */
	for (clone = 1; clone < nksyms_clones; clone++) {
		if (atomic_cas_ptr(&ksyms_clones[clone].ksyms_base, 0, addr) ==
		    0) {
			ksyms_clones[clone].ksyms_size = realsize;
			*devp = makedevice(getemajor(*devp), clone);
			(void) ddi_prop_update_int(*devp, ksyms_devi,
			    "size", realsize);
			modunload_disable();
			return (0);
		}
	}
	cmn_err(CE_NOTE, "ksyms: too many open references");
	(void) as_unmap(curproc->p_as, addr, roundup(realsize, PAGESIZE));
	return (ENXIO);
}
Example #7
0
File: elf.c Project: Nakrez/zOS
uintptr_t process_load_elf(struct process *p, uintptr_t elf)
{
    int need_kernel = 0;
    Elf32_Ehdr *hdr = (void *)elf;
    Elf32_Phdr *phdr = (void *)((char *)hdr + hdr->e_phoff);
    struct thread *t = thread_current();

    /*
     * If the process address space is not the current one we need extra
     * mapping in the kernel to write on the pages
     */
    if (!t || t->parent->as != p->as)
        need_kernel = 1;

    for (uint32_t i = 0; i < hdr->e_phnum; ++i)
    {
        if (phdr[i].p_type != PT_LOAD)
            continue;

        vaddr_t vaddr;
        paddr_t paddr;
        size_t page_size = align(phdr[i].p_memsz, PAGE_SIZE) / PAGE_SIZE;

        /* TODO: Error handling */
        vaddr = region_reserve(p->as, phdr[i].p_vaddr, page_size);

        /* TODO: Error handling */
        paddr = segment_alloc_address(page_size);

        /* TODO: Error handling */
        vaddr = as_map(p->as, vaddr, paddr, phdr[i].p_memsz,
                       AS_MAP_USER | AS_MAP_WRITE);

        /* TODO: Error handling */
        if (need_kernel)
            vaddr = as_map(&kernel_as, 0, paddr, phdr[i].p_memsz,
                           AS_MAP_WRITE);

        memcpy((void *)vaddr, (void *)(elf + phdr[i].p_offset),
               phdr[i].p_filesz);

        memset((char *)vaddr + phdr[i].p_filesz, 0,
               phdr[i].p_memsz - phdr[i].p_filesz);

        if (need_kernel)
            as_unmap(&kernel_as, vaddr, AS_UNMAP_NORELEASE);
    }

    return hdr->e_entry;
}
Example #8
0
/*ARGSUSED8*/
static int
privcmd_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
{
	struct segmf_crargs a;
	int error;

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, (offset_t)off, 0, flags);
		if (*addrp == NULL) {
			error = ENOMEM;
			goto rangeunlock;
		}
	} else {
		/*
		 * User specified address
		 */
		(void) as_unmap(as, *addrp, len);
	}

	/*
	 * The mapping *must* be MAP_SHARED at offset 0.
	 *
	 * (Foreign pages are treated like device memory; the
	 * ioctl interface allows the backing objects to be
	 * arbitrarily redefined to point at any machine frame.)
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED || off != 0) {
		error = EINVAL;
		goto rangeunlock;
	}

	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	error = as_map(as, *addrp, len, segmf_create, &a);

rangeunlock:
	as_rangeunlock(as);
	return (error);
}
Example #9
0
/* ARGSUSED */
static int
ksyms_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
{
	ksyms_image_t *kip = &ksyms_clones[getminor(dev)];
	int error = 0;
	char *addr = NULL;
	size_t rlen = 0;
	struct iovec aiov;
	struct uio auio;

	if (flags & MAP_FIXED)
		return (ENOTSUP);

	if (off < 0 || len <= 0 || off > kip->ksyms_size ||
	    len > kip->ksyms_size - off)
		return (EINVAL);

	rlen = roundup(len, PAGESIZE);
	if ((addr = ksyms_asmap(as, rlen)) == NULL)
		return (EOVERFLOW);

	aiov.iov_base = addr;
	aiov.iov_len = len;
	auio.uio_offset = off;
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	auio.uio_resid = len;
	auio.uio_segflg = UIO_USERSPACE;
	auio.uio_llimit = MAXOFFSET_T;
	auio.uio_fmode = FREAD;
	auio.uio_extflg = UIO_COPY_CACHED;

	error = ksyms_symtbl_copy(kip, &auio, len);

	if (error)
		(void) as_unmap(as, addr, rlen);
	else
		*addrp = addr;
	return (error);
}
Example #10
0
/*
 * On fork, remove inherited mappings from the child's address space.
 * The child's threads must call schedctl() to get new shared mappings.
 */
static void
schedctl_fork(kthread_t *pt, kthread_t *ct)
{
	proc_t *pp = ttoproc(pt);
	proc_t *cp = ttoproc(ct);
	sc_page_ctl_t *pagep;

	ASSERT(ct->t_schedctl == NULL);

	/*
	 * Do this only once, whether we are doing fork1() or forkall().
	 * Don't do it at all if the child process is a child of vfork()
	 * because a child of vfork() borrows the parent's address space.
	 */
	if (pt != curthread || (cp->p_flag & SVFORK))
		return;

	mutex_enter(&pp->p_sc_lock);
	for (pagep = pp->p_pagep; pagep != NULL; pagep = pagep->spc_next)
		(void) as_unmap(cp->p_as, pagep->spc_uaddr, PAGESIZE);
	mutex_exit(&pp->p_sc_lock);
}
Example #11
0
int
munmap(caddr_t addr, size_t len)
{
	struct proc *p = curproc;
	struct as *as = p->p_as;

	if (((uintptr_t)addr & PAGEOFFSET) != 0 || len == 0)
		return (set_errno(EINVAL));

	if (valid_usr_range(addr, len, 0, as, as->a_userlimit) != RANGE_OKAY)
		return (set_errno(EINVAL));

	/*
	 * Discard lwpchan mappings.
	 */
	if (p->p_lcp != NULL)
		lwpchan_delete_mapping(p, addr, addr + len);
	if (as_unmap(as, addr, len) != 0)
		return (set_errno(EINVAL));

	return (0);
}
Example #12
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
Example #13
0
/* ARGSUSED */
int
gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred,
    caller_context_t *ct)
{
	int rv;
	ssize_t resid = len;

	/*
	 * Check for bad parameters
	 */
#ifdef _ILP32
	if (len > MAXOFF_T)
		return (ENOMEM);
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOTSUP);
	if (off > MAXOFF_T)
		return (EFBIG);
	if ((long)off < 0 || (long)(off + len) < 0)
		return (EINVAL);
	if (vp->v_type != VREG)
		return (ENODEV);
	if ((prot & (PROT_EXEC | PROT_WRITE)) != 0)
		return (EACCES);

	/*
	 * Find appropriate address if needed, otherwise clear address range.
	 */
	as_rangelock(as);
	rv = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
	if (rv != 0) {
		as_rangeunlock(as);
		return (rv);
	}

	/*
	 * Create mapping
	 */
	rv = as_map(as, *addrp, len, segvn_create, zfod_argsp);
	as_rangeunlock(as);
	if (rv != 0)
		return (rv);

	/*
	 * Fill with data from read()
	 */
	rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE,
	    0, (rlim64_t)0, cred, &resid);

	if (rv == 0 && resid != 0)
		rv = ENXIO;

	if (rv != 0) {
		as_rangelock(as);
		(void) as_unmap(as, *addrp, len);
		as_rangeunlock(as);
	}

	return (rv);
}
Example #14
0
/*ARGSUSED*/
static int
xpvtap_segmap(dev_t dev, off_t off, struct as *asp, caddr_t *addrp,
    off_t len, unsigned int prot, unsigned int maxprot, unsigned int flags,
    cred_t *cred_p)
{
	struct segmf_crargs a;
	xpvtap_state_t *state;
	int instance;
	int e;


	if (secpolicy_xvm_control(cred_p)) {
		return (EPERM);
	}

	instance = getminor(dev);
	state = ddi_get_soft_state(xpvtap_statep, instance);
	if (state == NULL) {
		return (EBADF);
	}

	/* the user app should be doing a MAP_SHARED mapping */
	if ((flags & MAP_TYPE) != MAP_SHARED) {
		return (EINVAL);
	}

	/*
	 * if this is the user ring (offset = 0), devmap it (which ends up in
	 * xpvtap_devmap). devmap will alloc and map the ring into the
	 * app's VA space.
	 */
	if (off == 0) {
		e = devmap_setup(dev, (offset_t)off, asp, addrp, (size_t)len,
		    prot, maxprot, flags, cred_p);
		return (e);
	}

	/* this should be the mmap for the gref pages (offset = PAGESIZE) */
	if (off != PAGESIZE) {
		return (EINVAL);
	}

	/* make sure we get the size we're expecting */
	if (len != XPVTAP_GREF_BUFSIZE) {
		return (EINVAL);
	}

	/*
	 * reserve user app VA space for the gref pages and use segmf to
	 * manage the backing store for the physical memory. segmf will
	 * map in/out the grefs and fault them in/out.
	 */
	ASSERT(asp == state->bt_map.um_as);
	as_rangelock(asp);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, 0, 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(asp);
			return (ENOMEM);
		}
	} else {
		/* User specified address */
		(void) as_unmap(asp, *addrp, len);
	}
	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	e = as_map(asp, *addrp, len, segmf_create, &a);
	if (e != 0) {
		as_rangeunlock(asp);
		return (e);
	}
	as_rangeunlock(asp);

	/*
	 * Stash user base address, and compute address where the request
	 * array will end up.
	 */
	state->bt_map.um_guest_pages = (caddr_t)*addrp;
	state->bt_map.um_guest_size = (size_t)len;

	/* register an as callback so we can cleanup when the app goes away */
	e = as_add_callback(asp, xpvtap_segmf_unregister, state,
	    AS_UNMAP_EVENT, *addrp, len, KM_SLEEP);
	if (e != 0) {
		(void) as_unmap(asp, *addrp, len);
		return (EINVAL);
	}

	/* wake thread to see if there are requests already queued up */
	mutex_enter(&state->bt_thread.ut_mutex);
	state->bt_thread.ut_wake = B_TRUE;
	cv_signal(&state->bt_thread.ut_wake_cv);
	mutex_exit(&state->bt_thread.ut_mutex);

	return (0);
}
/*ARGSUSED*/
int
gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
    ddi_device_acc_attr_t *accattrp, uint_t rnumber)
{
	struct segdev_crargs dev_a;
	int (*mapfunc)(dev_t dev, off_t off, int prot);
	uint_t hat_attr;
	pfn_t pfn;
	int error, i;

	if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
		return (ENODEV);

	/*
	 * Character devices that support the d_mmap
	 * interface can only be mmap'ed shared.
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED)
		return (EINVAL);

	/*
	 * Check that this region is indeed mappable on this platform.
	 * Use the mapping function.
	 */
	if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
		return (ENXIO);

	if (accattrp != NULL) {
		switch (accattrp->devacc_attr_dataorder) {
		case DDI_STRICTORDER_ACC:
			/* Want UC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_STRICTORDER | HAT_PLAT_NOCACHE);
			break;
		case DDI_MERGING_OK_ACC:
			/* Want WC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_MERGING_OK | HAT_PLAT_NOCACHE);
			break;
		}
	}

	/*
	 * Check to ensure that the entire range is
	 * legal and we are not trying to map in
	 * more than the device will let us.
	 */
	for (i = 0; i < len; i += PAGESIZE) {
		if (i == 0) {
			/*
			 * Save the pfn at offset here. This pfn will be
			 * used later to get user address.
			 */
			if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
					maxprot)) == PFN_INVALID)
				return (ENXIO);
		} else {
			if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
				PFN_INVALID)
				return (ENXIO);
		}
	}

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		/*
		 * Pick an address w/o worrying about
		 * any vac alignment constraints.
		 */
		map_addr(addrp, len, ptob(pfn), 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(as);
			return (ENOMEM);
		}
	} else {
		/*
		 * User-specified address; blow away any previous mappings.
		 */
		(void) as_unmap(as, *addrp, len);
	}

	dev_a.mapfunc = mapfunc;
	dev_a.dev = dev;
	dev_a.offset = (offset_t)offset;
	dev_a.type = flags & MAP_TYPE;
	dev_a.prot = (uchar_t)prot;
	dev_a.maxprot = (uchar_t)maxprot;
	dev_a.hat_attr = hat_attr;
#if DEBUG
	dev_a.hat_flags = 0;
#else
	dev_a.hat_flags = HAT_LOAD_LOCK;
#endif
	dev_a.devmap_data = NULL;

	error = as_map(as, *addrp, len, segdev_create, &dev_a);
	as_rangeunlock(as);

	return (error);
}
Example #16
0
/*
 * Returns 0 on success.
 */
int
brk_internal(caddr_t nva, uint_t brkszc)
{
	caddr_t ova;			/* current break address */
	size_t size;
	int	error;
	struct proc *p = curproc;
	struct as *as = p->p_as;
	size_t pgsz;
	uint_t szc;
	rctl_qty_t as_rctl;

	/*
	 * extend heap to brkszc alignment but use current p->p_brkpageszc
	 * for the newly created segment. This allows the new extension
	 * segment to be concatenated successfully with the existing brk
	 * segment.
	 */
	if ((szc = brkszc) != 0) {
		pgsz = page_get_pagesize(szc);
		ASSERT(pgsz > PAGESIZE);
	} else {
		pgsz = PAGESIZE;
	}

	mutex_enter(&p->p_lock);
	as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
	    p->p_rctls, p);
	mutex_exit(&p->p_lock);

	/*
	 * If p_brkbase has not yet been set, the first call
	 * to brk() will initialize it.
	 */
	if (p->p_brkbase == 0)
		p->p_brkbase = nva;

	/*
	 * Before multiple page size support existed p_brksize was the value
	 * not rounded to the pagesize (i.e. it stored the exact user request
	 * for heap size). If pgsz is greater than PAGESIZE calculate the
	 * heap size as the real new heap size by rounding it up to pgsz.
	 * This is useful since we may want to know where the heap ends
	 * without knowing heap pagesize (e.g. some old code) and also if
	 * heap pagesize changes we can update p_brkpageszc but delay adding
	 * new mapping yet still know from p_brksize where the heap really
	 * ends. The user requested heap end is stored in libc variable.
	 */
	if (pgsz > PAGESIZE) {
		caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
		size = tnva - p->p_brkbase;
		if (tnva < p->p_brkbase || (size > p->p_brksize &&
		    size > (size_t)as_rctl)) {
			szc = 0;
			pgsz = PAGESIZE;
			size = nva - p->p_brkbase;
		}
	} else {
		size = nva - p->p_brkbase;
	}

	/*
	 * use PAGESIZE to roundup ova because we want to know the real value
	 * of the current heap end in case p_brkpageszc changes since the last
	 * p_brksize was computed.
	 */
	nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
	ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
	    PAGESIZE);

	if ((nva < p->p_brkbase) || (size > p->p_brksize &&
	    size > as_rctl)) {
		mutex_enter(&p->p_lock);
		(void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
		    RCA_SAFE);
		mutex_exit(&p->p_lock);
		return (ENOMEM);
	}

	if (nva > ova) {
		struct segvn_crargs crargs =
		    SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);

		if (!(p->p_datprot & PROT_EXEC)) {
			crargs.prot &= ~PROT_EXEC;
		}

		/*
		 * Add new zfod mapping to extend UNIX data segment
		 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
		 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
		 * page sizes if ova is not aligned to szc's pgsz.
		 */
		if (szc > 0) {
			caddr_t rbss;

			rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
			    pgsz);
			if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
				crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
				    AS_MAP_NO_LPOOB;
			} else if (ova == rbss) {
				crargs.szc = szc;
			} else {
				crargs.szc = AS_MAP_HEAP;
			}
		} else {
			crargs.szc = AS_MAP_NO_LPOOB;
		}
		crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
		error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
		    &crargs);
		if (error) {
			return (error);
		}

	} else if (nva < ova) {
		/*
		 * Release mapping to shrink UNIX data segment.
		 */
		(void) as_unmap(as, nva, (size_t)(ova - nva));
	}
	p->p_brksize = size;
	return (0);
}