Exemplo n.º 1
0
/*
 * xpvtap_segmf_register()
 */
static int
xpvtap_segmf_register(xpvtap_state_t *state)
{
	struct seg *seg;
	uint64_t pte_ma;
	struct as *as;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	as = state->bt_map.um_as;
	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	if (pgcnt == 0) {
		return (DDI_FAILURE);
	}

	AS_LOCK_ENTER(as, RW_READER);

	seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
	if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) >
	    (seg->s_base + seg->s_size))) {
		AS_LOCK_EXIT(as);
		return (DDI_FAILURE);
	}

	/*
	 * lock down the htables so the HAT can't steal them. Register the
	 * PTE MA's for each gref page with seg_mf so we can do user space
	 * gref mappings.
	 */
	for (i = 0; i < pgcnt; i++) {
		hat_prepare_mapping(as->a_hat, uaddr, &pte_ma);
		hat_devload(as->a_hat, uaddr, PAGESIZE, (pfn_t)0,
		    PROT_READ | PROT_WRITE | PROT_USER | HAT_UNORDERED_OK,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
		hat_release_mapping(as->a_hat, uaddr);
		segmf_add_gref_pte(seg, uaddr, pte_ma);
		uaddr += PAGESIZE;
	}

	state->bt_map.um_registered = B_TRUE;

	AS_LOCK_EXIT(as);

	return (DDI_SUCCESS);
}
Exemplo n.º 2
0
static void
xpvtap_user_request_unmap(xpvtap_state_t *state, uint_t uid)
{
	blkif_request_t *req;
	struct seg *seg;
	struct as *as;
	caddr_t uaddr;
	int e;


	as = state->bt_map.um_as;
	if (as == NULL) {
		return;
	}

	/* get a copy of the original request */
	req = &state->bt_map.um_outstanding_reqs[uid];

	/* unmap the grefs for this request */
	if ((req->operation != BLKIF_OP_WRITE_BARRIER) &&
	    (req->operation != BLKIF_OP_FLUSH_DISKCACHE) &&
	    (req->nr_segments != 0)) {
		uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, uid);
		AS_LOCK_ENTER(as, RW_READER);
		seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
		if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
		    (seg->s_base + seg->s_size))) {
			AS_LOCK_EXIT(as);
			xpvtap_rs_free(state->bt_map.um_rs, uid);
			return;
		}

		e = segmf_release_grefs(seg, uaddr, req->nr_segments);
		if (e != 0) {
			cmn_err(CE_WARN, "unable to release grefs");
		}

		AS_LOCK_EXIT(as);
	}

	/* free up the user ring id */
	xpvtap_rs_free(state->bt_map.um_rs, uid);
}
Exemplo n.º 3
0
/*
 * Function called by an lwp after it resumes from stop().
 */
void
setallwatch(void)
{
	proc_t *p = curproc;
	struct as *as = curproc->p_as;
	struct watched_page *pwp, *next;
	struct seg *seg;
	caddr_t vaddr;
	uint_t prot;
	int err, retrycnt;

	if (p->p_wprot == NULL)
		return;

	ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));

	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);

	pwp = p->p_wprot;
	while (pwp != NULL) {

		vaddr = pwp->wp_vaddr;
		retrycnt = 0;
	retry:
		ASSERT(pwp->wp_flags & WP_SETPROT);
		if ((seg = as_segat(as, vaddr)) != NULL &&
		    !(pwp->wp_flags & WP_NOWATCH)) {
			prot = pwp->wp_prot;
			err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
			if (err == IE_RETRY) {
				ASSERT(retrycnt == 0);
				retrycnt++;
				goto retry;
			}
		}

		next = pwp->wp_list;

		if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
			/*
			 * No watched areas remain in this page.
			 * Free the watched_page structure.
			 */
			avl_remove(&as->a_wpage, pwp);
			kmem_free(pwp, sizeof (struct watched_page));
		} else {
			pwp->wp_flags &= ~WP_SETPROT;
		}

		pwp = next;
	}
	p->p_wprot = NULL;

	AS_LOCK_EXIT(as, &as->a_lock);
}
Exemplo n.º 4
0
static int
mmpagelock(struct as *as, caddr_t va)
{
	struct seg *seg;
	int i;

	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	seg = as_segat(as, va);
	i = (seg != NULL)? SEGOP_CAPABLE(seg, S_CAPABILITY_NOMINFLT) : 0;
	AS_LOCK_EXIT(as, &as->a_lock);

	return (i);
}
Exemplo n.º 5
0
/*
 * trap() calls here to determine if a fault is in a watched page.
 * We return nonzero if this is true and the load/store would fail.
 */
int
pr_is_watchpage(caddr_t addr, enum seg_rw rw)
{
	struct as *as = curproc->p_as;
	int rv;

	if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
		return (0);

	/* Grab the lock because of XHAT (see comment in pr_mappage()) */
	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	rv = pr_is_watchpage_as(addr, rw, as);
	AS_LOCK_EXIT(as, &as->a_lock);

	return (rv);
}
Exemplo n.º 6
0
/*ARGSUSED*/
static void
xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event)
{
	xpvtap_state_t *state;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	state = (xpvtap_state_t *)arg;
	if (!state->bt_map.um_registered) {
		/* remove the callback (which is this routine) */
		(void) as_delete_callback(as, arg);
		return;
	}

	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	/* unmap any outstanding req's grefs */
	xpvtap_rs_flush(state->bt_map.um_rs, xpvtap_user_request_unmap, state);

	/* Unlock the gref pages */
	for (i = 0; i < pgcnt; i++) {
		AS_LOCK_ENTER(as, RW_WRITER);
		hat_prepare_mapping(as->a_hat, uaddr, NULL);
		hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK);
		hat_release_mapping(as->a_hat, uaddr);
		AS_LOCK_EXIT(as);
		uaddr += PAGESIZE;
	}

	/* remove the callback (which is this routine) */
	(void) as_delete_callback(as, arg);

	state->bt_map.um_registered = B_FALSE;
}
Exemplo n.º 7
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
Exemplo n.º 8
0
/*
 * Common code for pr_mappage() and pr_unmappage().
 */
static int
pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
{
	proc_t *p = curproc;
	struct as *as = p->p_as;
	char *eaddr = addr + size;
	int prot_rw = rw_to_prot(rw);
	int xrw = rw_to_index(rw);
	int rv = 0;
	struct watched_page *pwp;
	struct watched_page tpw;
	avl_index_t where;
	uint_t prot;

	ASSERT(as != &kas);

startover:
	ASSERT(rv == 0);
	if (avl_numnodes(&as->a_wpage) == 0)
		return (0);

	/*
	 * as->a_wpage can only be changed while the process is totally stopped.
	 * Don't grab p_lock here.  Holding p_lock while grabbing the address
	 * space lock leads to deadlocks with the clock thread.  Note that if an
	 * as_fault() is servicing a fault to a watched page on behalf of an
	 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
	 * will be set to wp_oprot).  Since this is done while holding as writer
	 * lock, we need to grab as lock (reader lock is good enough).
	 *
	 * p_maplock prevents simultaneous execution of this function.  Under
	 * normal circumstances, holdwatch() will stop all other threads, so the
	 * lock isn't really needed.  But there may be multiple threads within
	 * stop() when SWATCHOK is set, so we need to handle multiple threads
	 * at once.  See holdwatch() for the details of this dance.
	 */

	mutex_enter(&p->p_maplock);
	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);

	tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
	if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
		pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);

	for (; pwp != NULL && pwp->wp_vaddr < eaddr;
		pwp = AVL_NEXT(&as->a_wpage, pwp)) {

		/*
		 * If the requested protection has not been
		 * removed, we need not remap this page.
		 */
		prot = pwp->wp_prot;
		if (kernel || (prot & PROT_USER))
			if (prot & prot_rw)
				continue;
		/*
		 * If the requested access does not exist in the page's
		 * original protections, we need not remap this page.
		 * If the page does not exist yet, we can't test it.
		 */
		if ((prot = pwp->wp_oprot) != 0) {
			if (!(kernel || (prot & PROT_USER)))
				continue;
			if (!(prot & prot_rw))
				continue;
		}

		if (mapin) {
			/*
			 * Before mapping the page in, ensure that
			 * all other lwps are held in the kernel.
			 */
			if (p->p_mapcnt == 0) {
				/*
				 * Release as lock while in holdwatch()
				 * in case other threads need to grab it.
				 */
				AS_LOCK_EXIT(as, &as->a_lock);
				mutex_exit(&p->p_maplock);
				if (holdwatch() != 0) {
					/*
					 * We stopped in holdwatch().
					 * Start all over again because the
					 * watched page list may have changed.
					 */
					goto startover;
				}
				mutex_enter(&p->p_maplock);
				AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
			}
			p->p_mapcnt++;
		}

		addr = pwp->wp_vaddr;
		rv++;

		prot = pwp->wp_prot;
		if (mapin) {
			if (kernel)
				pwp->wp_kmap[xrw]++;
			else
				pwp->wp_umap[xrw]++;
			pwp->wp_flags |= WP_NOWATCH;
			if (pwp->wp_kmap[X] + pwp->wp_umap[X])
				/* cannot have exec-only protection */
				prot |= PROT_READ|PROT_EXEC;
			if (pwp->wp_kmap[R] + pwp->wp_umap[R])
				prot |= PROT_READ;
			if (pwp->wp_kmap[W] + pwp->wp_umap[W])
				/* cannot have write-only protection */
				prot |= PROT_READ|PROT_WRITE;
#if 0	/* damned broken mmu feature! */
			if (sum(pwp->wp_umap) == 0)
				prot &= ~PROT_USER;
#endif
		} else {
			ASSERT(pwp->wp_flags & WP_NOWATCH);
			if (kernel) {
				ASSERT(pwp->wp_kmap[xrw] != 0);
				--pwp->wp_kmap[xrw];
			} else {
				ASSERT(pwp->wp_umap[xrw] != 0);
				--pwp->wp_umap[xrw];
			}
			if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
				pwp->wp_flags &= ~WP_NOWATCH;
			else {
				if (pwp->wp_kmap[X] + pwp->wp_umap[X])
					/* cannot have exec-only protection */
					prot |= PROT_READ|PROT_EXEC;
				if (pwp->wp_kmap[R] + pwp->wp_umap[R])
					prot |= PROT_READ;
				if (pwp->wp_kmap[W] + pwp->wp_umap[W])
					/* cannot have write-only protection */
					prot |= PROT_READ|PROT_WRITE;
#if 0	/* damned broken mmu feature! */
				if (sum(pwp->wp_umap) == 0)
					prot &= ~PROT_USER;
#endif
			}
		}


		if (pwp->wp_oprot != 0) {	/* if page exists */
			struct seg *seg;
			uint_t oprot;
			int err, retrycnt = 0;

			AS_LOCK_EXIT(as, &as->a_lock);
			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		retry:
			seg = as_segat(as, addr);
			ASSERT(seg != NULL);
			SEGOP_GETPROT(seg, addr, 0, &oprot);
			if (prot != oprot) {
				err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
				if (err == IE_RETRY) {
					ASSERT(retrycnt == 0);
					retrycnt++;
					goto retry;
				}
			}
			AS_LOCK_EXIT(as, &as->a_lock);
		} else
			AS_LOCK_EXIT(as, &as->a_lock);

		/*
		 * When all pages are mapped back to their normal state,
		 * continue the other lwps.
		 */
		if (!mapin) {
			ASSERT(p->p_mapcnt > 0);
			p->p_mapcnt--;
			if (p->p_mapcnt == 0) {
				mutex_exit(&p->p_maplock);
				mutex_enter(&p->p_lock);
				continuelwps(p);
				mutex_exit(&p->p_lock);
				mutex_enter(&p->p_maplock);
			}
		}

		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	}

	AS_LOCK_EXIT(as, &as->a_lock);
	mutex_exit(&p->p_maplock);

	return (rv);
}
Exemplo n.º 9
0
/*
 * Private ioctl for libkvm to support kvm_physaddr().
 * Given an address space and a VA, compute the PA.
 */
static int
mmioctl_vtop(intptr_t data)
{
#ifdef _SYSCALL32
	mem_vtop32_t vtop32;
#endif
	mem_vtop_t mem_vtop;
	proc_t *p;
	pfn_t pfn = (pfn_t)PFN_INVALID;
	pid_t pid = 0;
	struct as *as;
	struct seg *seg;

	if (get_udatamodel() == DATAMODEL_NATIVE) {
		if (copyin((void *)data, &mem_vtop, sizeof (mem_vtop_t)))
			return (EFAULT);
	}
#ifdef _SYSCALL32
	else {
		if (copyin((void *)data, &vtop32, sizeof (mem_vtop32_t)))
			return (EFAULT);
		mem_vtop.m_as = (struct as *)(uintptr_t)vtop32.m_as;
		mem_vtop.m_va = (void *)(uintptr_t)vtop32.m_va;

		if (mem_vtop.m_as != NULL)
			return (EINVAL);
	}
#endif

	if (mem_vtop.m_as == &kas) {
		pfn = hat_getpfnum(kas.a_hat, mem_vtop.m_va);
	} else {
		if (mem_vtop.m_as == NULL) {
			/*
			 * Assume the calling process's address space if the
			 * caller didn't specify one.
			 */
			p = curthread->t_procp;
			if (p == NULL)
				return (EIO);
			mem_vtop.m_as = p->p_as;
		}

		mutex_enter(&pidlock);
		for (p = practive; p != NULL; p = p->p_next) {
			if (p->p_as == mem_vtop.m_as) {
				pid = p->p_pid;
				break;
			}
		}
		mutex_exit(&pidlock);
		if (p == NULL)
			return (EIO);
		p = sprlock(pid);
		if (p == NULL)
			return (EIO);
		as = p->p_as;
		if (as == mem_vtop.m_as) {
			mutex_exit(&p->p_lock);
			AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
			for (seg = AS_SEGFIRST(as); seg != NULL;
			    seg = AS_SEGNEXT(as, seg))
				if ((uintptr_t)mem_vtop.m_va -
				    (uintptr_t)seg->s_base < seg->s_size)
					break;
			if (seg != NULL)
				pfn = hat_getpfnum(as->a_hat, mem_vtop.m_va);
			AS_LOCK_EXIT(as, &as->a_lock);
			mutex_enter(&p->p_lock);
		}
		sprunlock(p);
	}
	mem_vtop.m_pfn = pfn;
	if (pfn == PFN_INVALID)
		return (EIO);

	if (get_udatamodel() == DATAMODEL_NATIVE) {
		if (copyout(&mem_vtop, (void *)data, sizeof (mem_vtop_t)))
			return (EFAULT);
	}
#ifdef _SYSCALL32
	else {
		vtop32.m_pfn = mem_vtop.m_pfn;
		if (copyout(&vtop32, (void *)data, sizeof (mem_vtop32_t)))
			return (EFAULT);
	}
#endif

	return (0);
}
Exemplo n.º 10
0
/* ARGSUSED */
static int64_t
cfork(int isvfork, int isfork1, int flags)
{
	proc_t *p = ttoproc(curthread);
	struct as *as;
	proc_t *cp, **orphpp;
	klwp_t *clone;
	kthread_t *t;
	task_t *tk;
	rval_t	r;
	int error;
	int i;
	rctl_set_t *dup_set;
	rctl_alloc_gp_t *dup_gp;
	rctl_entity_p_t e;
	lwpdir_t *ldp;
	lwpent_t *lep;
	lwpent_t *clep;

	/*
	 * Allow only these two flags.
	 */
	if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) {
		error = EINVAL;
		goto forkerr;
	}

	/*
	 * fork is not supported for the /proc agent lwp.
	 */
	if (curthread == p->p_agenttp) {
		error = ENOTSUP;
		goto forkerr;
	}

	if ((error = secpolicy_basic_fork(CRED())) != 0)
		goto forkerr;

	/*
	 * If the calling lwp is doing a fork1() then the
	 * other lwps in this process are not duplicated and
	 * don't need to be held where their kernel stacks can be
	 * cloned.  If doing forkall(), the process is held with
	 * SHOLDFORK, so that the lwps are at a point where their
	 * stacks can be copied which is on entry or exit from
	 * the kernel.
	 */
	if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) {
		aston(curthread);
		error = EINTR;
		goto forkerr;
	}

#if defined(__sparc)
	/*
	 * Ensure that the user stack is fully constructed
	 * before creating the child process structure.
	 */
	(void) flush_user_windows_to_stack(NULL);
#endif

	mutex_enter(&p->p_lock);
	/*
	 * If this is vfork(), cancel any suspend request we might
	 * have gotten from some other thread via lwp_suspend().
	 * Otherwise we could end up with a deadlock on return
	 * from the vfork() in both the parent and the child.
	 */
	if (isvfork)
		curthread->t_proc_flag &= ~TP_HOLDLWP;
	/*
	 * Prevent our resource set associations from being changed during fork.
	 */
	pool_barrier_enter();
	mutex_exit(&p->p_lock);

	/*
	 * Create a child proc struct. Place a VN_HOLD on appropriate vnodes.
	 */
	if (getproc(&cp, 0) < 0) {
		mutex_enter(&p->p_lock);
		pool_barrier_exit();
		continuelwps(p);
		mutex_exit(&p->p_lock);
		error = EAGAIN;
		goto forkerr;
	}

	TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p);

	/*
	 * Assign an address space to child
	 */
	if (isvfork) {
		/*
		 * Clear any watched areas and remember the
		 * watched pages for restoring in vfwait().
		 */
		as = p->p_as;
		if (avl_numnodes(&as->a_wpage) != 0) {
			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
			as_clearwatch(as);
			p->p_wpage = as->a_wpage;
			avl_create(&as->a_wpage, wp_compare,
			    sizeof (struct watched_page),
			    offsetof(struct watched_page, wp_link));
			AS_LOCK_EXIT(as, &as->a_lock);
		}
		cp->p_as = as;
		cp->p_flag |= SVFORK;
	} else {
Exemplo n.º 11
0
/*
 * Perform I/O to a given process. This will return EIO if we dectect
 * corrupt memory and ENXIO if there is no such mapped address in the
 * user process's address space.
 */
static int
urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a)
{
	caddr_t addr = (caddr_t)a;
	caddr_t page;
	caddr_t vaddr;
	struct seg *seg;
	int error = 0;
	int err = 0;
	uint_t prot;
	uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
	int protchanged;
	on_trap_data_t otd;
	int retrycnt;
	struct as *as = p->p_as;
	enum seg_rw rw;

	/*
	 * Locate segment containing address of interest.
	 */
	page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
	retrycnt = 0;
	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
retry:
	if ((seg = as_segat(as, page)) == NULL ||
	    !page_valid(seg, page)) {
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	SEGOP_GETPROT(seg, page, 0, &prot);

	protchanged = 0;
	if ((prot & prot_rw) == 0) {
		protchanged = 1;
		err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);

		if (err == IE_RETRY) {
			protchanged = 0;
			ASSERT(retrycnt == 0);
			retrycnt++;
			goto retry;
		}

		if (err != 0) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (ENXIO);
		}
	}

	/*
	 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
	 * sharing to avoid a copy on write of a softlocked page by another
	 * thread. But since we locked the address space as a writer no other
	 * thread can cause a copy on write. S_READ_NOCOW is passed as the
	 * access type to tell segvn that it's ok not to do a copy-on-write
	 * for this SOFTLOCK fault.
	 */
	if (writing)
		rw = S_WRITE;
	else if (seg->s_ops == &segvn_ops)
		rw = S_READ_NOCOW;
	else
		rw = S_READ;

	if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
		if (protchanged)
			(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	CPU_STATS_ADD_K(vm, softlock, 1);

	/*
	 * Make sure we're not trying to read or write off the end of the page.
	 */
	ASSERT(len <= page + PAGESIZE - addr);

	/*
	 * Map in the locked page, copy to our local buffer,
	 * then map the page out and unlock it.
	 */
	vaddr = mapin(as, addr, writing);

	/*
	 * Since we are copying memory on behalf of the user process,
	 * protect against memory error correction faults.
	 */
	if (!on_trap(&otd, OT_DATA_EC)) {
		if (seg->s_ops == &segdev_ops) {
			/*
			 * Device memory can behave strangely; invoke
			 * a segdev-specific copy operation instead.
			 */
			if (writing) {
				if (segdev_copyto(seg, addr, buf, vaddr, len))
					error = ENXIO;
			} else {
				if (segdev_copyfrom(seg, addr, vaddr, buf, len))
					error = ENXIO;
			}
		} else {
			if (writing)
				bcopy(buf, vaddr, len);
			else
				bcopy(vaddr, buf, len);
		}
	} else {
		error = EIO;
	}
	no_trap();

	/*
	 * If we're writing to an executable page, we may need to sychronize
	 * the I$ with the modifications we made through the D$.
	 */
	if (writing && (prot & PROT_EXEC))
		sync_icache(vaddr, (uint_t)len);

	mapout(as, addr, vaddr, writing);

	if (rw == S_READ_NOCOW)
		rw = S_READ;

	(void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);

	if (protchanged)
		(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);

	AS_LOCK_EXIT(as, &as->a_lock);

	return (error);
}
Exemplo n.º 12
0
/*ARGSUSED2*/
static int
do_privcmd_mmapbatch(void *uarg, int mode, cred_t *cr)
{
	privcmd_mmapbatch_t __mmapbatch, *mmb = &__mmapbatch;
	struct as *as = curproc->p_as;
	struct seg *seg;
	int i, error = 0;
	caddr_t addr;
	ulong_t *ulp;

	if (ddi_copyin(uarg, mmb, sizeof (*mmb), mode))
		return (EFAULT);

	DTRACE_XPV3(mmapbatch__start, domid_t, mmb->dom, int, mmb->num,
	    caddr_t, mmb->addr);

	addr = (caddr_t)mmb->addr;
	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	if ((seg = as_findseg(as, addr, 0)) == NULL ||
	    addr + ptob(mmb->num) > seg->s_base + seg->s_size) {
		error = EINVAL;
		goto done;
	}

	for (i = 0, ulp = mmb->arr;
	    i < mmb->num; i++, addr += PAGESIZE, ulp++) {
		mfn_t mfn;

		if (fulword(ulp, &mfn) != 0) {
			error = EFAULT;
			break;
		}

		if (mfn == MFN_INVALID) {
			/*
			 * This mfn is invalid and should not be added to
			 * segmf, as we'd only cause an immediate EFAULT when
			 * we tried to fault it in.
			 */
			mfn |= XEN_DOMCTL_PFINFO_XTAB;
			continue;
		}

		if (segmf_add_mfns(seg, addr, mfn, 1, mmb->dom) == 0)
			continue;

		/*
		 * Tell the process that this MFN could not be mapped, so it
		 * won't later try to access it.
		 */
		mfn |= XEN_DOMCTL_PFINFO_XTAB;
		if (sulword(ulp, mfn) != 0) {
			error = EFAULT;
			break;
		}
	}

done:
	AS_LOCK_EXIT(as, &as->a_lock);

	DTRACE_XPV3(mmapbatch__end, int, error, struct seg *, seg, caddr_t,
	    mmb->addr);

	return (error);
}
Exemplo n.º 13
0
/*ARGSUSED2*/
int
do_privcmd_mmap(void *uarg, int mode, cred_t *cr)
{
	privcmd_mmap_t __mmapcmd, *mmc = &__mmapcmd;
	privcmd_mmap_entry_t *umme;
	struct as *as = curproc->p_as;
	struct seg *seg;
	int i, error = 0;

	if (ddi_copyin(uarg, mmc, sizeof (*mmc), mode))
		return (EFAULT);

	DTRACE_XPV3(mmap__start, domid_t, mmc->dom, int, mmc->num,
	    privcmd_mmap_entry_t *, mmc->entry);

	if (mmc->dom == DOMID_SELF) {
		error = ENOTSUP;	/* Too paranoid? */
		goto done;
	}

	for (umme = mmc->entry, i = 0; i < mmc->num; i++, umme++) {
		privcmd_mmap_entry_t __mmapent, *mme = &__mmapent;
		caddr_t addr;

		if (ddi_copyin(umme, mme, sizeof (*mme), mode)) {
			error = EFAULT;
			break;
		}

		DTRACE_XPV3(mmap__entry, ulong_t, mme->va, ulong_t, mme->mfn,
		    ulong_t, mme->npages);

		if (mme->mfn == MFN_INVALID) {
			error = EINVAL;
			break;
		}

		addr = (caddr_t)mme->va;

		/*
		 * Find the segment we want to mess with, then add
		 * the mfn range to the segment.
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
		if ((seg = as_findseg(as, addr, 0)) == NULL ||
		    addr + mmu_ptob(mme->npages) > seg->s_base + seg->s_size)
			error = EINVAL;
		else
			error = segmf_add_mfns(seg, addr,
			    mme->mfn, mme->npages, mmc->dom);
		AS_LOCK_EXIT(as, &as->a_lock);

		if (error != 0)
			break;
	}

done:
	DTRACE_XPV1(mmap__end, int, error);

	return (error);
}
Exemplo n.º 14
0
/*
 * xpvtap_user_request_map()
 */
static int
xpvtap_user_request_map(xpvtap_state_t *state, blkif_request_t *req,
    uint_t *uid)
{
	grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct seg *seg;
	struct as *as;
	domid_t domid;
	caddr_t uaddr;
	uint_t flags;
	int i;
	int e;


	domid = xvdi_get_oeid(state->bt_dip);

	as = state->bt_map.um_as;
	if ((as == NULL) || (state->bt_map.um_guest_pages == NULL)) {
		return (DDI_FAILURE);
	}

	/* has to happen after segmap returns */
	if (!state->bt_map.um_registered) {
		/* register the pte's with segmf */
		e = xpvtap_segmf_register(state);
		if (e != DDI_SUCCESS) {
			return (DDI_FAILURE);
		}
	}

	/* alloc an ID for the user ring */
	e = xpvtap_rs_alloc(state->bt_map.um_rs, uid);
	if (e != DDI_SUCCESS) {
		return (DDI_FAILURE);
	}

	/* if we don't have any segments to map, we're done */
	if ((req->operation == BLKIF_OP_WRITE_BARRIER) ||
	    (req->operation == BLKIF_OP_FLUSH_DISKCACHE) ||
	    (req->nr_segments == 0)) {
		return (DDI_SUCCESS);
	}

	/* get the apps gref address */
	uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, *uid);

	AS_LOCK_ENTER(as, RW_READER);
	seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
	if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
	    (seg->s_base + seg->s_size))) {
		AS_LOCK_EXIT(as);
		return (DDI_FAILURE);
	}

	/* if we are reading from disk, we are writing into memory */
	flags = 0;
	if (req->operation == BLKIF_OP_READ) {
		flags |= SEGMF_GREF_WR;
	}

	/* Load the grefs into seg_mf */
	for (i = 0; i < req->nr_segments; i++) {
		gref[i] = req->seg[i].gref;
	}
	(void) segmf_add_grefs(seg, uaddr, flags, gref, req->nr_segments,
	    domid);

	AS_LOCK_EXIT(as);

	return (DDI_SUCCESS);
}