Exemple #1
0
Fichier : elf.c Projet : Nakrez/zOS
uintptr_t process_load_elf(struct process *p, uintptr_t elf)
{
    int need_kernel = 0;
    Elf32_Ehdr *hdr = (void *)elf;
    Elf32_Phdr *phdr = (void *)((char *)hdr + hdr->e_phoff);
    struct thread *t = thread_current();

    /*
     * If the process address space is not the current one we need extra
     * mapping in the kernel to write on the pages
     */
    if (!t || t->parent->as != p->as)
        need_kernel = 1;

    for (uint32_t i = 0; i < hdr->e_phnum; ++i)
    {
        if (phdr[i].p_type != PT_LOAD)
            continue;

        vaddr_t vaddr;
        paddr_t paddr;
        size_t page_size = align(phdr[i].p_memsz, PAGE_SIZE) / PAGE_SIZE;

        /* TODO: Error handling */
        vaddr = region_reserve(p->as, phdr[i].p_vaddr, page_size);

        /* TODO: Error handling */
        paddr = segment_alloc_address(page_size);

        /* TODO: Error handling */
        vaddr = as_map(p->as, vaddr, paddr, phdr[i].p_memsz,
                       AS_MAP_USER | AS_MAP_WRITE);

        /* TODO: Error handling */
        if (need_kernel)
            vaddr = as_map(&kernel_as, 0, paddr, phdr[i].p_memsz,
                           AS_MAP_WRITE);

        memcpy((void *)vaddr, (void *)(elf + phdr[i].p_offset),
               phdr[i].p_filesz);

        memset((char *)vaddr + phdr[i].p_filesz, 0,
               phdr[i].p_memsz - phdr[i].p_filesz);

        if (need_kernel)
            as_unmap(&kernel_as, vaddr, AS_UNMAP_NORELEASE);
    }

    return hdr->e_entry;
}
/*
 * Used for MAP_ANON - fast way to get anonymous pages
 */
static int
zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags,
    offset_t pos)
{
	struct segvn_crargs vn_a;
	int error;

	if (((PROT_ALL & uprot) != uprot))
		return (EACCES);

	if ((flags & MAP_FIXED) != 0) {
		caddr_t userlimit;

		/*
		 * Use the user address.  First verify that
		 * the address to be used is page aligned.
		 * Then make some simple bounds checks.
		 */
		if (((uintptr_t)*addrp & PAGEOFFSET) != 0)
			return (EINVAL);

		userlimit = flags & _MAP_LOW32 ?
		    (caddr_t)USERLIMIT32 : as->a_userlimit;
		switch (valid_usr_range(*addrp, len, uprot, as, userlimit)) {
		case RANGE_OKAY:
			break;
		case RANGE_BADPROT:
			return (ENOTSUP);
		case RANGE_BADADDR:
		default:
			return (ENOMEM);
		}
	}
	/*
	 * No need to worry about vac alignment for anonymous
	 * pages since this is a "clone" object that doesn't
	 * yet exist.
	 */
	error = choose_addr(as, addrp, len, pos, ADDR_NOVACALIGN, flags);
	if (error != 0) {
		return (error);
	}

	/*
	 * Use the seg_vn segment driver; passing in the NULL amp
	 * gives the desired "cloning" effect.
	 */
	vn_a.vp = NULL;
	vn_a.offset = 0;
	vn_a.type = flags & MAP_TYPE;
	vn_a.prot = uprot;
	vn_a.maxprot = PROT_ALL;
	vn_a.flags = flags & ~MAP_TYPE;
	vn_a.cred = CRED();
	vn_a.amp = NULL;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;

	return (as_map(as, *addrp, len, segvn_create, &vn_a));
}
Exemple #3
0
/*ARGSUSED*/
static int
smb_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred)
{
	smb_clone_t *cp = &smb_clones[getminor(dev)];

	size_t alen = P2ROUNDUP(len, PAGESIZE);
	caddr_t addr;

	iovec_t iov;
	uio_t uio;
	int err;

	if (len <= 0 || (flags & MAP_FIXED))
		return (EINVAL);

	if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
		return (EACCES);

	if (off < 0 || off + len < off || off + len > cp->c_eplen + cp->c_stlen)
		return (ENXIO);

	as_rangelock(as);
	map_addr(&addr, alen, 0, 1, 0);

	if (addr != NULL)
		err = as_map(as, addr, alen, segvn_create, zfod_argsp);
	else
		err = ENOMEM;

	as_rangeunlock(as);
	*addrp = addr;

	if (err != 0)
		return (err);

	iov.iov_base = addr;
	iov.iov_len = len;

	bzero(&uio, sizeof (uio_t));
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = off;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_extflg = UIO_COPY_DEFAULT;
	uio.uio_resid = len;

	if ((err = smb_uiomove(cp, &uio)) != 0)
		(void) as_unmap(as, addr, alen);

	return (err);
}
Exemple #4
0
/*
 * rlen is in multiples of PAGESIZE
 */
static char *
ksyms_asmap(struct as *as, size_t rlen)
{
	char *addr = NULL;

	as_rangelock(as);
	map_addr(&addr, rlen, 0, 1, 0);
	if (addr == NULL || as_map(as, addr, rlen, segvn_create, zfod_argsp)) {
		as_rangeunlock(as);
		return (NULL);
	}
	as_rangeunlock(as);
	return (addr);
}
/*ARGSUSED*/
static int
bootfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
    caller_context_t *ct)
{
	int ret;
	segvn_crargs_t vn_a;

#ifdef	_ILP32
	if (len > MAXOFF_T)
		return (ENOMEM);
#endif

	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0 || off > MAXOFFSET_T - off)
		return (ENXIO);

	if (vp->v_type != VREG)
		return (ENODEV);

	if (prot & PROT_WRITE)
		return (ENOTSUP);

	as_rangelock(as);
	ret = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
	if (ret != 0) {
		as_rangeunlock(as);
		return (ret);
	}

	vn_a.vp = vp;
	vn_a.offset = (u_offset_t)off;
	vn_a.type = flags & MAP_TYPE;
	vn_a.prot = prot;
	vn_a.maxprot = maxprot;
	vn_a.cred = cr;
	vn_a.amp = NULL;
	vn_a.flags = flags & ~MAP_TYPE;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;

	ret = as_map(as, *addrp, len, segvn_create, &vn_a);

	as_rangeunlock(as);
	return (ret);

}
/*ARGSUSED8*/
static int
privcmd_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
{
	struct segmf_crargs a;
	int error;

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, (offset_t)off, 0, flags);
		if (*addrp == NULL) {
			error = ENOMEM;
			goto rangeunlock;
		}
	} else {
		/*
		 * User specified address
		 */
		(void) as_unmap(as, *addrp, len);
	}

	/*
	 * The mapping *must* be MAP_SHARED at offset 0.
	 *
	 * (Foreign pages are treated like device memory; the
	 * ioctl interface allows the backing objects to be
	 * arbitrarily redefined to point at any machine frame.)
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED || off != 0) {
		error = EINVAL;
		goto rangeunlock;
	}

	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	error = as_map(as, *addrp, len, segmf_create, &a);

rangeunlock:
	as_rangeunlock(as);
	return (error);
}
Exemple #7
0
/*
 * This function is called when a page needs to be mapped into a
 * process's address space.  Allocate the user address space and
 * set up the mapping to the page.  Assumes the page has already
 * been allocated and locked in memory via schedctl_getpage.
 */
static int
schedctl_map(struct anon_map *amp, caddr_t *uaddrp, caddr_t kaddr)
{
	caddr_t addr = NULL;
	struct as *as = curproc->p_as;
	struct segvn_crargs vn_a;
	int error;

	as_rangelock(as);
	/* pass address of kernel mapping as offset to avoid VAC conflicts */
	map_addr(&addr, PAGESIZE, (offset_t)(uintptr_t)kaddr, 1, 0);
	if (addr == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	/*
	 * Use segvn to set up the mapping to the page.
	 */
	vn_a.vp = NULL;
	vn_a.offset = 0;
	vn_a.cred = NULL;
	vn_a.type = MAP_SHARED;
	vn_a.prot = vn_a.maxprot = PROT_ALL;
	vn_a.flags = 0;
	vn_a.amp = amp;
	vn_a.szc = 0;
	vn_a.lgrp_mem_policy_flags = 0;
	error = as_map(as, addr, PAGESIZE, segvn_create, &vn_a);
	as_rangeunlock(as);

	if (error)
		return (error);

	*uaddrp = addr;
	return (0);
}
Exemple #8
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
Exemple #9
0
/*
 * This function is called when a memory device is mmap'ed.
 * Set up the mapping to the correct device driver.
 */
static int
mmsegmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len,
    uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
{
	struct segvn_crargs vn_a;
	struct segdev_crargs dev_a;
	int error;
	minor_t minor;
	off_t i;

	minor = getminor(dev);

	as_rangelock(as);
	/*
	 * No need to worry about vac alignment on /dev/zero
	 * since this is a "clone" object that doesn't yet exist.
	 */
	error = choose_addr(as, addrp, len, off,
	    (minor == M_MEM) || (minor == M_KMEM), flags);
	if (error != 0) {
		as_rangeunlock(as);
		return (error);
	}

	switch (minor) {
	case M_MEM:
		/* /dev/mem cannot be mmap'ed with MAP_PRIVATE */
		if ((flags & MAP_TYPE) != MAP_SHARED) {
			as_rangeunlock(as);
			return (EINVAL);
		}

		/*
		 * Check to ensure that the entire range is
		 * legal and we are not trying to map in
		 * more than the device will let us.
		 */
		for (i = 0; i < len; i += PAGESIZE) {
			if (mmmmap(dev, off + i, maxprot) == -1) {
				as_rangeunlock(as);
				return (ENXIO);
			}
		}

		/*
		 * Use seg_dev segment driver for /dev/mem mapping.
		 */
		dev_a.mapfunc = mmmmap;
		dev_a.dev = dev;
		dev_a.offset = off;
		dev_a.type = (flags & MAP_TYPE);
		dev_a.prot = (uchar_t)prot;
		dev_a.maxprot = (uchar_t)maxprot;
		dev_a.hat_attr = 0;

		/*
		 * Make /dev/mem mappings non-consistent since we can't
		 * alias pages that don't have page structs behind them,
		 * such as kernel stack pages. If someone mmap()s a kernel
		 * stack page and if we give him a tte with cv, a line from
		 * that page can get into both pages of the spitfire d$.
		 * But snoop from another processor will only invalidate
		 * the first page. This later caused kernel (xc_attention)
		 * to go into an infinite loop at pil 13 and no interrupts
		 * could come in. See 1203630.
		 *
		 */
		dev_a.hat_flags = HAT_LOAD_NOCONSIST;
		dev_a.devmap_data = NULL;

		error = as_map(as, *addrp, len, segdev_create, &dev_a);
		break;

	case M_ZERO:
		/*
		 * Use seg_vn segment driver for /dev/zero mapping.
		 * Passing in a NULL amp gives us the "cloning" effect.
		 */
		vn_a.vp = NULL;
		vn_a.offset = 0;
		vn_a.type = (flags & MAP_TYPE);
		vn_a.prot = prot;
		vn_a.maxprot = maxprot;
		vn_a.flags = flags & ~MAP_TYPE;
		vn_a.cred = cred;
		vn_a.amp = NULL;
		vn_a.szc = 0;
		vn_a.lgrp_mem_policy_flags = 0;
		error = as_map(as, *addrp, len, segvn_create, &vn_a);
		break;

	case M_KMEM:
	case M_ALLKMEM:
		/* No longer supported with KPR. */
		error = ENXIO;
		break;

	case M_NULL:
		/*
		 * Use seg_dev segment driver for /dev/null mapping.
		 */
		dev_a.mapfunc = mmmmap;
		dev_a.dev = dev;
		dev_a.offset = off;
		dev_a.type = 0;		/* neither PRIVATE nor SHARED */
		dev_a.prot = dev_a.maxprot = (uchar_t)PROT_NONE;
		dev_a.hat_attr = 0;
		dev_a.hat_flags = 0;
		error = as_map(as, *addrp, len, segdev_create, &dev_a);
		break;

	default:
		error = ENXIO;
	}

	as_rangeunlock(as);
	return (error);
}
Exemple #10
0
/* ARGSUSED */
int
gfs_vop_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
    size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cred,
    caller_context_t *ct)
{
	int rv;
	ssize_t resid = len;

	/*
	 * Check for bad parameters
	 */
#ifdef _ILP32
	if (len > MAXOFF_T)
		return (ENOMEM);
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOTSUP);
	if (off > MAXOFF_T)
		return (EFBIG);
	if ((long)off < 0 || (long)(off + len) < 0)
		return (EINVAL);
	if (vp->v_type != VREG)
		return (ENODEV);
	if ((prot & (PROT_EXEC | PROT_WRITE)) != 0)
		return (EACCES);

	/*
	 * Find appropriate address if needed, otherwise clear address range.
	 */
	as_rangelock(as);
	rv = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
	if (rv != 0) {
		as_rangeunlock(as);
		return (rv);
	}

	/*
	 * Create mapping
	 */
	rv = as_map(as, *addrp, len, segvn_create, zfod_argsp);
	as_rangeunlock(as);
	if (rv != 0)
		return (rv);

	/*
	 * Fill with data from read()
	 */
	rv = vn_rdwr(UIO_READ, vp, *addrp, len, off, UIO_USERSPACE,
	    0, (rlim64_t)0, cred, &resid);

	if (rv == 0 && resid != 0)
		rv = ENXIO;

	if (rv != 0) {
		as_rangelock(as);
		(void) as_unmap(as, *addrp, len);
		as_rangeunlock(as);
	}

	return (rv);
}
Exemple #11
0
/*ARGSUSED*/
static int
xpvtap_segmap(dev_t dev, off_t off, struct as *asp, caddr_t *addrp,
    off_t len, unsigned int prot, unsigned int maxprot, unsigned int flags,
    cred_t *cred_p)
{
	struct segmf_crargs a;
	xpvtap_state_t *state;
	int instance;
	int e;


	if (secpolicy_xvm_control(cred_p)) {
		return (EPERM);
	}

	instance = getminor(dev);
	state = ddi_get_soft_state(xpvtap_statep, instance);
	if (state == NULL) {
		return (EBADF);
	}

	/* the user app should be doing a MAP_SHARED mapping */
	if ((flags & MAP_TYPE) != MAP_SHARED) {
		return (EINVAL);
	}

	/*
	 * if this is the user ring (offset = 0), devmap it (which ends up in
	 * xpvtap_devmap). devmap will alloc and map the ring into the
	 * app's VA space.
	 */
	if (off == 0) {
		e = devmap_setup(dev, (offset_t)off, asp, addrp, (size_t)len,
		    prot, maxprot, flags, cred_p);
		return (e);
	}

	/* this should be the mmap for the gref pages (offset = PAGESIZE) */
	if (off != PAGESIZE) {
		return (EINVAL);
	}

	/* make sure we get the size we're expecting */
	if (len != XPVTAP_GREF_BUFSIZE) {
		return (EINVAL);
	}

	/*
	 * reserve user app VA space for the gref pages and use segmf to
	 * manage the backing store for the physical memory. segmf will
	 * map in/out the grefs and fault them in/out.
	 */
	ASSERT(asp == state->bt_map.um_as);
	as_rangelock(asp);
	if ((flags & MAP_FIXED) == 0) {
		map_addr(addrp, len, 0, 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(asp);
			return (ENOMEM);
		}
	} else {
		/* User specified address */
		(void) as_unmap(asp, *addrp, len);
	}
	a.dev = dev;
	a.prot = (uchar_t)prot;
	a.maxprot = (uchar_t)maxprot;
	e = as_map(asp, *addrp, len, segmf_create, &a);
	if (e != 0) {
		as_rangeunlock(asp);
		return (e);
	}
	as_rangeunlock(asp);

	/*
	 * Stash user base address, and compute address where the request
	 * array will end up.
	 */
	state->bt_map.um_guest_pages = (caddr_t)*addrp;
	state->bt_map.um_guest_size = (size_t)len;

	/* register an as callback so we can cleanup when the app goes away */
	e = as_add_callback(asp, xpvtap_segmf_unregister, state,
	    AS_UNMAP_EVENT, *addrp, len, KM_SLEEP);
	if (e != 0) {
		(void) as_unmap(asp, *addrp, len);
		return (EINVAL);
	}

	/* wake thread to see if there are requests already queued up */
	mutex_enter(&state->bt_thread.ut_mutex);
	state->bt_thread.ut_wake = B_TRUE;
	cv_signal(&state->bt_thread.ut_wake_cv);
	mutex_exit(&state->bt_thread.ut_mutex);

	return (0);
}
/*ARGSUSED*/
int
gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
    off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
    ddi_device_acc_attr_t *accattrp, uint_t rnumber)
{
	struct segdev_crargs dev_a;
	int (*mapfunc)(dev_t dev, off_t off, int prot);
	uint_t hat_attr;
	pfn_t pfn;
	int error, i;

	if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
		return (ENODEV);

	/*
	 * Character devices that support the d_mmap
	 * interface can only be mmap'ed shared.
	 */
	if ((flags & MAP_TYPE) != MAP_SHARED)
		return (EINVAL);

	/*
	 * Check that this region is indeed mappable on this platform.
	 * Use the mapping function.
	 */
	if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
		return (ENXIO);

	if (accattrp != NULL) {
		switch (accattrp->devacc_attr_dataorder) {
		case DDI_STRICTORDER_ACC:
			/* Want UC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_STRICTORDER | HAT_PLAT_NOCACHE);
			break;
		case DDI_MERGING_OK_ACC:
			/* Want WC */
			hat_attr &= ~HAT_ORDER_MASK;
			hat_attr |= (HAT_MERGING_OK | HAT_PLAT_NOCACHE);
			break;
		}
	}

	/*
	 * Check to ensure that the entire range is
	 * legal and we are not trying to map in
	 * more than the device will let us.
	 */
	for (i = 0; i < len; i += PAGESIZE) {
		if (i == 0) {
			/*
			 * Save the pfn at offset here. This pfn will be
			 * used later to get user address.
			 */
			if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
					maxprot)) == PFN_INVALID)
				return (ENXIO);
		} else {
			if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
				PFN_INVALID)
				return (ENXIO);
		}
	}

	as_rangelock(as);
	if ((flags & MAP_FIXED) == 0) {
		/*
		 * Pick an address w/o worrying about
		 * any vac alignment constraints.
		 */
		map_addr(addrp, len, ptob(pfn), 0, flags);
		if (*addrp == NULL) {
			as_rangeunlock(as);
			return (ENOMEM);
		}
	} else {
		/*
		 * User-specified address; blow away any previous mappings.
		 */
		(void) as_unmap(as, *addrp, len);
	}

	dev_a.mapfunc = mapfunc;
	dev_a.dev = dev;
	dev_a.offset = (offset_t)offset;
	dev_a.type = flags & MAP_TYPE;
	dev_a.prot = (uchar_t)prot;
	dev_a.maxprot = (uchar_t)maxprot;
	dev_a.hat_attr = hat_attr;
#if DEBUG
	dev_a.hat_flags = 0;
#else
	dev_a.hat_flags = HAT_LOAD_LOCK;
#endif
	dev_a.devmap_data = NULL;

	error = as_map(as, *addrp, len, segdev_create, &dev_a);
	as_rangeunlock(as);

	return (error);
}
Exemple #13
0
/*
 * This routine assumes that the stack grows downward.
 * Returns 0 on success, errno on failure.
 */
int
grow_internal(caddr_t sp, uint_t growszc)
{
	struct proc *p = curproc;
	size_t newsize;
	size_t oldsize;
	int    error;
	size_t pgsz;
	uint_t szc;
	struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);

	ASSERT(sp < p->p_usrstack);
	sp = (caddr_t)P2ALIGN((uintptr_t)sp, PAGESIZE);

	/*
	 * grow to growszc alignment but use current p->p_stkpageszc for
	 * the segvn_crargs szc passed to segvn_create. For memcntl to
	 * increase the szc, this allows the new extension segment to be
	 * concatenated successfully with the existing stack segment.
	 */
	if ((szc = growszc) != 0) {
		pgsz = page_get_pagesize(szc);
		ASSERT(pgsz > PAGESIZE);
		newsize = p->p_usrstack - (caddr_t)P2ALIGN((uintptr_t)sp, pgsz);
		if (newsize > (size_t)p->p_stk_ctl) {
			szc = 0;
			pgsz = PAGESIZE;
			newsize = p->p_usrstack - sp;
		}
	} else {
		pgsz = PAGESIZE;
		newsize = p->p_usrstack - sp;
	}

	if (newsize > (size_t)p->p_stk_ctl) {
		(void) rctl_action(rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p,
		    RCA_UNSAFE_ALL);

		return (ENOMEM);
	}

	oldsize = p->p_stksize;
	ASSERT(P2PHASE(oldsize, PAGESIZE) == 0);

	if (newsize <= oldsize) {	/* prevent the stack from shrinking */
		return (0);
	}

	if (!(p->p_stkprot & PROT_EXEC)) {
		crargs.prot &= ~PROT_EXEC;
	}
	/*
	 * extend stack with the proposed new growszc, which is different
	 * than p_stkpageszc only on a memcntl to increase the stack pagesize.
	 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies via
	 * map_pgszcvec(). Use AS_MAP_STACK to get intermediate page sizes
	 * if not aligned to szc's pgsz.
	 */
	if (szc > 0) {
		caddr_t oldsp = p->p_usrstack - oldsize;
		caddr_t austk = (caddr_t)P2ALIGN((uintptr_t)p->p_usrstack,
		    pgsz);

		if (IS_P2ALIGNED(p->p_usrstack, pgsz) || oldsp < austk) {
			crargs.szc = p->p_stkpageszc ? p->p_stkpageszc :
			    AS_MAP_NO_LPOOB;
		} else if (oldsp == austk) {
			crargs.szc = szc;
		} else {
			crargs.szc = AS_MAP_STACK;
		}
	} else {
		crargs.szc = AS_MAP_NO_LPOOB;
	}
	crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_DOWN;

	if ((error = as_map(p->p_as, p->p_usrstack - newsize, newsize - oldsize,
	    segvn_create, &crargs)) != 0) {
		if (error == EAGAIN) {
			cmn_err(CE_WARN, "Sorry, no swap space to grow stack "
			    "for pid %d (%s)", p->p_pid, PTOU(p)->u_comm);
		}
		return (error);
	}
	p->p_stksize = newsize;
	return (0);
}
Exemple #14
0
/*
 * Returns 0 on success.
 */
int
brk_internal(caddr_t nva, uint_t brkszc)
{
	caddr_t ova;			/* current break address */
	size_t size;
	int	error;
	struct proc *p = curproc;
	struct as *as = p->p_as;
	size_t pgsz;
	uint_t szc;
	rctl_qty_t as_rctl;

	/*
	 * extend heap to brkszc alignment but use current p->p_brkpageszc
	 * for the newly created segment. This allows the new extension
	 * segment to be concatenated successfully with the existing brk
	 * segment.
	 */
	if ((szc = brkszc) != 0) {
		pgsz = page_get_pagesize(szc);
		ASSERT(pgsz > PAGESIZE);
	} else {
		pgsz = PAGESIZE;
	}

	mutex_enter(&p->p_lock);
	as_rctl = rctl_enforced_value(rctlproc_legacy[RLIMIT_DATA],
	    p->p_rctls, p);
	mutex_exit(&p->p_lock);

	/*
	 * If p_brkbase has not yet been set, the first call
	 * to brk() will initialize it.
	 */
	if (p->p_brkbase == 0)
		p->p_brkbase = nva;

	/*
	 * Before multiple page size support existed p_brksize was the value
	 * not rounded to the pagesize (i.e. it stored the exact user request
	 * for heap size). If pgsz is greater than PAGESIZE calculate the
	 * heap size as the real new heap size by rounding it up to pgsz.
	 * This is useful since we may want to know where the heap ends
	 * without knowing heap pagesize (e.g. some old code) and also if
	 * heap pagesize changes we can update p_brkpageszc but delay adding
	 * new mapping yet still know from p_brksize where the heap really
	 * ends. The user requested heap end is stored in libc variable.
	 */
	if (pgsz > PAGESIZE) {
		caddr_t tnva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
		size = tnva - p->p_brkbase;
		if (tnva < p->p_brkbase || (size > p->p_brksize &&
		    size > (size_t)as_rctl)) {
			szc = 0;
			pgsz = PAGESIZE;
			size = nva - p->p_brkbase;
		}
	} else {
		size = nva - p->p_brkbase;
	}

	/*
	 * use PAGESIZE to roundup ova because we want to know the real value
	 * of the current heap end in case p_brkpageszc changes since the last
	 * p_brksize was computed.
	 */
	nva = (caddr_t)P2ROUNDUP((uintptr_t)nva, pgsz);
	ova = (caddr_t)P2ROUNDUP((uintptr_t)(p->p_brkbase + p->p_brksize),
	    PAGESIZE);

	if ((nva < p->p_brkbase) || (size > p->p_brksize &&
	    size > as_rctl)) {
		mutex_enter(&p->p_lock);
		(void) rctl_action(rctlproc_legacy[RLIMIT_DATA], p->p_rctls, p,
		    RCA_SAFE);
		mutex_exit(&p->p_lock);
		return (ENOMEM);
	}

	if (nva > ova) {
		struct segvn_crargs crargs =
		    SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);

		if (!(p->p_datprot & PROT_EXEC)) {
			crargs.prot &= ~PROT_EXEC;
		}

		/*
		 * Add new zfod mapping to extend UNIX data segment
		 * AS_MAP_NO_LPOOB means use 0, and don't reapply OOB policies
		 * via map_pgszcvec(). Use AS_MAP_HEAP to get intermediate
		 * page sizes if ova is not aligned to szc's pgsz.
		 */
		if (szc > 0) {
			caddr_t rbss;

			rbss = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase,
			    pgsz);
			if (IS_P2ALIGNED(p->p_bssbase, pgsz) || ova > rbss) {
				crargs.szc = p->p_brkpageszc ? p->p_brkpageszc :
				    AS_MAP_NO_LPOOB;
			} else if (ova == rbss) {
				crargs.szc = szc;
			} else {
				crargs.szc = AS_MAP_HEAP;
			}
		} else {
			crargs.szc = AS_MAP_NO_LPOOB;
		}
		crargs.lgrp_mem_policy_flags = LGRP_MP_FLAG_EXTEND_UP;
		error = as_map(as, ova, (size_t)(nva - ova), segvn_create,
		    &crargs);
		if (error) {
			return (error);
		}

	} else if (nva < ova) {
		/*
		 * Release mapping to shrink UNIX data segment.
		 */
		(void) as_unmap(as, nva, (size_t)(ova - nva));
	}
	p->p_brksize = size;
	return (0);
}