Esempio n. 1
0
/*
 * Common code for pr_mappage() and pr_unmappage().
 */
static int
pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
{
	proc_t *p = curproc;
	struct as *as = p->p_as;
	char *eaddr = addr + size;
	int prot_rw = rw_to_prot(rw);
	int xrw = rw_to_index(rw);
	int rv = 0;
	struct watched_page *pwp;
	struct watched_page tpw;
	avl_index_t where;
	uint_t prot;

	ASSERT(as != &kas);

startover:
	ASSERT(rv == 0);
	if (avl_numnodes(&as->a_wpage) == 0)
		return (0);

	/*
	 * as->a_wpage can only be changed while the process is totally stopped.
	 * Don't grab p_lock here.  Holding p_lock while grabbing the address
	 * space lock leads to deadlocks with the clock thread.  Note that if an
	 * as_fault() is servicing a fault to a watched page on behalf of an
	 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
	 * will be set to wp_oprot).  Since this is done while holding as writer
	 * lock, we need to grab as lock (reader lock is good enough).
	 *
	 * p_maplock prevents simultaneous execution of this function.  Under
	 * normal circumstances, holdwatch() will stop all other threads, so the
	 * lock isn't really needed.  But there may be multiple threads within
	 * stop() when SWATCHOK is set, so we need to handle multiple threads
	 * at once.  See holdwatch() for the details of this dance.
	 */

	mutex_enter(&p->p_maplock);
	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);

	tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
	if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
		pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);

	for (; pwp != NULL && pwp->wp_vaddr < eaddr;
		pwp = AVL_NEXT(&as->a_wpage, pwp)) {

		/*
		 * If the requested protection has not been
		 * removed, we need not remap this page.
		 */
		prot = pwp->wp_prot;
		if (kernel || (prot & PROT_USER))
			if (prot & prot_rw)
				continue;
		/*
		 * If the requested access does not exist in the page's
		 * original protections, we need not remap this page.
		 * If the page does not exist yet, we can't test it.
		 */
		if ((prot = pwp->wp_oprot) != 0) {
			if (!(kernel || (prot & PROT_USER)))
				continue;
			if (!(prot & prot_rw))
				continue;
		}

		if (mapin) {
			/*
			 * Before mapping the page in, ensure that
			 * all other lwps are held in the kernel.
			 */
			if (p->p_mapcnt == 0) {
				/*
				 * Release as lock while in holdwatch()
				 * in case other threads need to grab it.
				 */
				AS_LOCK_EXIT(as, &as->a_lock);
				mutex_exit(&p->p_maplock);
				if (holdwatch() != 0) {
					/*
					 * We stopped in holdwatch().
					 * Start all over again because the
					 * watched page list may have changed.
					 */
					goto startover;
				}
				mutex_enter(&p->p_maplock);
				AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
			}
			p->p_mapcnt++;
		}

		addr = pwp->wp_vaddr;
		rv++;

		prot = pwp->wp_prot;
		if (mapin) {
			if (kernel)
				pwp->wp_kmap[xrw]++;
			else
				pwp->wp_umap[xrw]++;
			pwp->wp_flags |= WP_NOWATCH;
			if (pwp->wp_kmap[X] + pwp->wp_umap[X])
				/* cannot have exec-only protection */
				prot |= PROT_READ|PROT_EXEC;
			if (pwp->wp_kmap[R] + pwp->wp_umap[R])
				prot |= PROT_READ;
			if (pwp->wp_kmap[W] + pwp->wp_umap[W])
				/* cannot have write-only protection */
				prot |= PROT_READ|PROT_WRITE;
#if 0	/* damned broken mmu feature! */
			if (sum(pwp->wp_umap) == 0)
				prot &= ~PROT_USER;
#endif
		} else {
			ASSERT(pwp->wp_flags & WP_NOWATCH);
			if (kernel) {
				ASSERT(pwp->wp_kmap[xrw] != 0);
				--pwp->wp_kmap[xrw];
			} else {
				ASSERT(pwp->wp_umap[xrw] != 0);
				--pwp->wp_umap[xrw];
			}
			if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
				pwp->wp_flags &= ~WP_NOWATCH;
			else {
				if (pwp->wp_kmap[X] + pwp->wp_umap[X])
					/* cannot have exec-only protection */
					prot |= PROT_READ|PROT_EXEC;
				if (pwp->wp_kmap[R] + pwp->wp_umap[R])
					prot |= PROT_READ;
				if (pwp->wp_kmap[W] + pwp->wp_umap[W])
					/* cannot have write-only protection */
					prot |= PROT_READ|PROT_WRITE;
#if 0	/* damned broken mmu feature! */
				if (sum(pwp->wp_umap) == 0)
					prot &= ~PROT_USER;
#endif
			}
		}


		if (pwp->wp_oprot != 0) {	/* if page exists */
			struct seg *seg;
			uint_t oprot;
			int err, retrycnt = 0;

			AS_LOCK_EXIT(as, &as->a_lock);
			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
		retry:
			seg = as_segat(as, addr);
			ASSERT(seg != NULL);
			SEGOP_GETPROT(seg, addr, 0, &oprot);
			if (prot != oprot) {
				err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
				if (err == IE_RETRY) {
					ASSERT(retrycnt == 0);
					retrycnt++;
					goto retry;
				}
			}
			AS_LOCK_EXIT(as, &as->a_lock);
		} else
			AS_LOCK_EXIT(as, &as->a_lock);

		/*
		 * When all pages are mapped back to their normal state,
		 * continue the other lwps.
		 */
		if (!mapin) {
			ASSERT(p->p_mapcnt > 0);
			p->p_mapcnt--;
			if (p->p_mapcnt == 0) {
				mutex_exit(&p->p_maplock);
				mutex_enter(&p->p_lock);
				continuelwps(p);
				mutex_exit(&p->p_lock);
				mutex_enter(&p->p_maplock);
			}
		}

		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
	}

	AS_LOCK_EXIT(as, &as->a_lock);
	mutex_exit(&p->p_maplock);

	return (rv);
}
Esempio n. 2
0
/*
 * Perform I/O to a given process. This will return EIO if we dectect
 * corrupt memory and ENXIO if there is no such mapped address in the
 * user process's address space.
 */
static int
urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a)
{
	caddr_t addr = (caddr_t)a;
	caddr_t page;
	caddr_t vaddr;
	struct seg *seg;
	int error = 0;
	int err = 0;
	uint_t prot;
	uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
	int protchanged;
	on_trap_data_t otd;
	int retrycnt;
	struct as *as = p->p_as;
	enum seg_rw rw;

	/*
	 * Locate segment containing address of interest.
	 */
	page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
	retrycnt = 0;
	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
retry:
	if ((seg = as_segat(as, page)) == NULL ||
	    !page_valid(seg, page)) {
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	SEGOP_GETPROT(seg, page, 0, &prot);

	protchanged = 0;
	if ((prot & prot_rw) == 0) {
		protchanged = 1;
		err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);

		if (err == IE_RETRY) {
			protchanged = 0;
			ASSERT(retrycnt == 0);
			retrycnt++;
			goto retry;
		}

		if (err != 0) {
			AS_LOCK_EXIT(as, &as->a_lock);
			return (ENXIO);
		}
	}

	/*
	 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
	 * sharing to avoid a copy on write of a softlocked page by another
	 * thread. But since we locked the address space as a writer no other
	 * thread can cause a copy on write. S_READ_NOCOW is passed as the
	 * access type to tell segvn that it's ok not to do a copy-on-write
	 * for this SOFTLOCK fault.
	 */
	if (writing)
		rw = S_WRITE;
	else if (seg->s_ops == &segvn_ops)
		rw = S_READ_NOCOW;
	else
		rw = S_READ;

	if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
		if (protchanged)
			(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
		AS_LOCK_EXIT(as, &as->a_lock);
		return (ENXIO);
	}
	CPU_STATS_ADD_K(vm, softlock, 1);

	/*
	 * Make sure we're not trying to read or write off the end of the page.
	 */
	ASSERT(len <= page + PAGESIZE - addr);

	/*
	 * Map in the locked page, copy to our local buffer,
	 * then map the page out and unlock it.
	 */
	vaddr = mapin(as, addr, writing);

	/*
	 * Since we are copying memory on behalf of the user process,
	 * protect against memory error correction faults.
	 */
	if (!on_trap(&otd, OT_DATA_EC)) {
		if (seg->s_ops == &segdev_ops) {
			/*
			 * Device memory can behave strangely; invoke
			 * a segdev-specific copy operation instead.
			 */
			if (writing) {
				if (segdev_copyto(seg, addr, buf, vaddr, len))
					error = ENXIO;
			} else {
				if (segdev_copyfrom(seg, addr, vaddr, buf, len))
					error = ENXIO;
			}
		} else {
			if (writing)
				bcopy(buf, vaddr, len);
			else
				bcopy(vaddr, buf, len);
		}
	} else {
		error = EIO;
	}
	no_trap();

	/*
	 * If we're writing to an executable page, we may need to sychronize
	 * the I$ with the modifications we made through the D$.
	 */
	if (writing && (prot & PROT_EXEC))
		sync_icache(vaddr, (uint_t)len);

	mapout(as, addr, vaddr, writing);

	if (rw == S_READ_NOCOW)
		rw = S_READ;

	(void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);

	if (protchanged)
		(void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);

	AS_LOCK_EXIT(as, &as->a_lock);

	return (error);
}