Beispiel #1
0
/*
 * xpvtap_segmf_register()
 */
static int
xpvtap_segmf_register(xpvtap_state_t *state)
{
	struct seg *seg;
	uint64_t pte_ma;
	struct as *as;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	as = state->bt_map.um_as;
	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	if (pgcnt == 0) {
		return (DDI_FAILURE);
	}

	AS_LOCK_ENTER(as, RW_READER);

	seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
	if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) >
	    (seg->s_base + seg->s_size))) {
		AS_LOCK_EXIT(as);
		return (DDI_FAILURE);
	}

	/*
	 * lock down the htables so the HAT can't steal them. Register the
	 * PTE MA's for each gref page with seg_mf so we can do user space
	 * gref mappings.
	 */
	for (i = 0; i < pgcnt; i++) {
		hat_prepare_mapping(as->a_hat, uaddr, &pte_ma);
		hat_devload(as->a_hat, uaddr, PAGESIZE, (pfn_t)0,
		    PROT_READ | PROT_WRITE | PROT_USER | HAT_UNORDERED_OK,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
		hat_release_mapping(as->a_hat, uaddr);
		segmf_add_gref_pte(seg, uaddr, pte_ma);
		uaddr += PAGESIZE;
	}

	state->bt_map.um_registered = B_TRUE;

	AS_LOCK_EXIT(as);

	return (DDI_SUCCESS);
}
Beispiel #2
0
static void
xpvtap_user_request_unmap(xpvtap_state_t *state, uint_t uid)
{
	blkif_request_t *req;
	struct seg *seg;
	struct as *as;
	caddr_t uaddr;
	int e;


	as = state->bt_map.um_as;
	if (as == NULL) {
		return;
	}

	/* get a copy of the original request */
	req = &state->bt_map.um_outstanding_reqs[uid];

	/* unmap the grefs for this request */
	if ((req->operation != BLKIF_OP_WRITE_BARRIER) &&
	    (req->operation != BLKIF_OP_FLUSH_DISKCACHE) &&
	    (req->nr_segments != 0)) {
		uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, uid);
		AS_LOCK_ENTER(as, RW_READER);
		seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
		if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
		    (seg->s_base + seg->s_size))) {
			AS_LOCK_EXIT(as);
			xpvtap_rs_free(state->bt_map.um_rs, uid);
			return;
		}

		e = segmf_release_grefs(seg, uaddr, req->nr_segments);
		if (e != 0) {
			cmn_err(CE_WARN, "unable to release grefs");
		}

		AS_LOCK_EXIT(as);
	}

	/* free up the user ring id */
	xpvtap_rs_free(state->bt_map.um_rs, uid);
}
Beispiel #3
0
static int
xmem_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
	struct cred *cred)
{
	struct seg		*seg;
	struct segxmem_crargs	xmem_a;
	struct xmemnode 	*xp = (struct xmemnode *)VTOXN(vp);
	struct xmount 		*xm = (struct xmount *)VTOXM(vp);
	uint_t			blocknumber;
	int 			error;

#ifdef lint
	maxprot = maxprot;
#endif
	if (vp->v_flag & VNOMAP)
		return (ENOSYS);

	if (off < 0)
		return (EINVAL);

	/* offset, length and address has to all be block aligned */

	if (off & (xm->xm_bsize - 1) || len & (xm->xm_bsize - 1) ||
		((ulong_t)*addrp) & (xm->xm_bsize - 1)) {

		return (EINVAL);
	}

	if (vp->v_type != VREG)
		return (ENODEV);

	if (flags & MAP_PRIVATE)
		return (EINVAL);	/* XXX need to be handled */

	/*
	 * Don't allow mapping to locked file
	 */
	if (vn_has_mandatory_locks(vp, xp->xn_mode)) {
		return (EAGAIN);
	}

	if (error = xmem_fillpages(xp, vp, off, len, 1)) {
		return (error);
	}

	blocknumber = off >> xm->xm_bshift;

	if (flags & MAP_FIXED) {
		/*
		 * User specified address - blow away any previous mappings
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		seg = as_findseg(as, *addrp, 0);

		/*
		 * Fast path. segxmem_remap will fail if this is the wrong
		 * segment or if the len is beyond end of seg. If it fails,
		 * we do the regular stuff thru as_* routines.
		 */

		if (seg && (segxmem_remap(seg, vp, *addrp, len,
				&xp->xn_ppa[blocknumber], prot) == 0)) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (0);
		}
		AS_LOCK_EXIT(as, &as->a_lock);
		if (seg)
			(void) as_unmap(as, *addrp, len);

		as_rangelock(as);

		error = valid_usr_range(*addrp, len, prot, as, as->a_userlimit);

		if (error != RANGE_OKAY ||
			as_gap(as, len, addrp, &len, AH_CONTAIN, *addrp)) {
			as_rangeunlock(as);
			return (EINVAL);
		}

	} else {
		as_rangelock(as);
		map_addr(addrp, len, (offset_t)off, 1, flags);
	}

	if (*addrp == NULL) {
		as_rangeunlock(as);
		return (ENOMEM);
	}

	xmem_a.xma_vp = vp;
	xmem_a.xma_offset = (u_offset_t)off;
	xmem_a.xma_prot = prot;
	xmem_a.xma_cred = cred;
	xmem_a.xma_ppa = &xp->xn_ppa[blocknumber];
	xmem_a.xma_bshift = xm->xm_bshift;

	error = as_map(as, *addrp, len, segxmem_create, &xmem_a);

	as_rangeunlock(as);
	return (error);
}
/*ARGSUSED2*/
static int
do_privcmd_mmapbatch(void *uarg, int mode, cred_t *cr)
{
	privcmd_mmapbatch_t __mmapbatch, *mmb = &__mmapbatch;
	struct as *as = curproc->p_as;
	struct seg *seg;
	int i, error = 0;
	caddr_t addr;
	ulong_t *ulp;

	if (ddi_copyin(uarg, mmb, sizeof (*mmb), mode))
		return (EFAULT);

	DTRACE_XPV3(mmapbatch__start, domid_t, mmb->dom, int, mmb->num,
	    caddr_t, mmb->addr);

	addr = (caddr_t)mmb->addr;
	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	if ((seg = as_findseg(as, addr, 0)) == NULL ||
	    addr + ptob(mmb->num) > seg->s_base + seg->s_size) {
		error = EINVAL;
		goto done;
	}

	for (i = 0, ulp = mmb->arr;
	    i < mmb->num; i++, addr += PAGESIZE, ulp++) {
		mfn_t mfn;

		if (fulword(ulp, &mfn) != 0) {
			error = EFAULT;
			break;
		}

		if (mfn == MFN_INVALID) {
			/*
			 * This mfn is invalid and should not be added to
			 * segmf, as we'd only cause an immediate EFAULT when
			 * we tried to fault it in.
			 */
			mfn |= XEN_DOMCTL_PFINFO_XTAB;
			continue;
		}

		if (segmf_add_mfns(seg, addr, mfn, 1, mmb->dom) == 0)
			continue;

		/*
		 * Tell the process that this MFN could not be mapped, so it
		 * won't later try to access it.
		 */
		mfn |= XEN_DOMCTL_PFINFO_XTAB;
		if (sulword(ulp, mfn) != 0) {
			error = EFAULT;
			break;
		}
	}

done:
	AS_LOCK_EXIT(as, &as->a_lock);

	DTRACE_XPV3(mmapbatch__end, int, error, struct seg *, seg, caddr_t,
	    mmb->addr);

	return (error);
}
/*ARGSUSED2*/
int
do_privcmd_mmap(void *uarg, int mode, cred_t *cr)
{
	privcmd_mmap_t __mmapcmd, *mmc = &__mmapcmd;
	privcmd_mmap_entry_t *umme;
	struct as *as = curproc->p_as;
	struct seg *seg;
	int i, error = 0;

	if (ddi_copyin(uarg, mmc, sizeof (*mmc), mode))
		return (EFAULT);

	DTRACE_XPV3(mmap__start, domid_t, mmc->dom, int, mmc->num,
	    privcmd_mmap_entry_t *, mmc->entry);

	if (mmc->dom == DOMID_SELF) {
		error = ENOTSUP;	/* Too paranoid? */
		goto done;
	}

	for (umme = mmc->entry, i = 0; i < mmc->num; i++, umme++) {
		privcmd_mmap_entry_t __mmapent, *mme = &__mmapent;
		caddr_t addr;

		if (ddi_copyin(umme, mme, sizeof (*mme), mode)) {
			error = EFAULT;
			break;
		}

		DTRACE_XPV3(mmap__entry, ulong_t, mme->va, ulong_t, mme->mfn,
		    ulong_t, mme->npages);

		if (mme->mfn == MFN_INVALID) {
			error = EINVAL;
			break;
		}

		addr = (caddr_t)mme->va;

		/*
		 * Find the segment we want to mess with, then add
		 * the mfn range to the segment.
		 */
		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
		if ((seg = as_findseg(as, addr, 0)) == NULL ||
		    addr + mmu_ptob(mme->npages) > seg->s_base + seg->s_size)
			error = EINVAL;
		else
			error = segmf_add_mfns(seg, addr,
			    mme->mfn, mme->npages, mmc->dom);
		AS_LOCK_EXIT(as, &as->a_lock);

		if (error != 0)
			break;
	}

done:
	DTRACE_XPV1(mmap__end, int, error);

	return (error);
}
Beispiel #6
0
/*
 * xpvtap_user_request_map()
 */
static int
xpvtap_user_request_map(xpvtap_state_t *state, blkif_request_t *req,
    uint_t *uid)
{
	grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct seg *seg;
	struct as *as;
	domid_t domid;
	caddr_t uaddr;
	uint_t flags;
	int i;
	int e;


	domid = xvdi_get_oeid(state->bt_dip);

	as = state->bt_map.um_as;
	if ((as == NULL) || (state->bt_map.um_guest_pages == NULL)) {
		return (DDI_FAILURE);
	}

	/* has to happen after segmap returns */
	if (!state->bt_map.um_registered) {
		/* register the pte's with segmf */
		e = xpvtap_segmf_register(state);
		if (e != DDI_SUCCESS) {
			return (DDI_FAILURE);
		}
	}

	/* alloc an ID for the user ring */
	e = xpvtap_rs_alloc(state->bt_map.um_rs, uid);
	if (e != DDI_SUCCESS) {
		return (DDI_FAILURE);
	}

	/* if we don't have any segments to map, we're done */
	if ((req->operation == BLKIF_OP_WRITE_BARRIER) ||
	    (req->operation == BLKIF_OP_FLUSH_DISKCACHE) ||
	    (req->nr_segments == 0)) {
		return (DDI_SUCCESS);
	}

	/* get the apps gref address */
	uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, *uid);

	AS_LOCK_ENTER(as, RW_READER);
	seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
	if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) >
	    (seg->s_base + seg->s_size))) {
		AS_LOCK_EXIT(as);
		return (DDI_FAILURE);
	}

	/* if we are reading from disk, we are writing into memory */
	flags = 0;
	if (req->operation == BLKIF_OP_READ) {
		flags |= SEGMF_GREF_WR;
	}

	/* Load the grefs into seg_mf */
	for (i = 0; i < req->nr_segments; i++) {
		gref[i] = req->seg[i].gref;
	}
	(void) segmf_add_grefs(seg, uaddr, flags, gref, req->nr_segments,
	    domid);

	AS_LOCK_EXIT(as);

	return (DDI_SUCCESS);
}