Beispiel #1
0
/*
 * uvmfault_amapcopy: clear "needs_copy" in a map.
 *
 * => called with VM data structures unlocked (usually, see below)
 * => we get a write lock on the maps and clear needs_copy for a VA
 * => if we are out of RAM we sleep (waiting for more)
 */
static void
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
{

	/* while we haven't done the job */
	while (1) {
		/* no mapping?  give up. */
		if (uvmfault_lookup(ufi, TRUE) == FALSE)
			return;

		/* copy if needed. */
		if (UVM_ET_ISNEEDSCOPY(ufi->entry))
			amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE, 
				ufi->orig_rvaddr, ufi->orig_rvaddr + 1);

		/* didn't work?  must be out of RAM.  unlock and sleep. */
		if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
			uvmfault_unlockmaps(ufi, TRUE);
			uvm_wait("fltamapcopy");
			continue;
		}

		/* got it! unlock and return*/
		uvmfault_unlockmaps(ufi, TRUE);
		return;
	}
	/*NOTREACHED*/
}
Beispiel #2
0
int
uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
{
	struct uvm_faultinfo ufi;
	void **result, **output;
	int rv, error;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * ensure that one and only one of the flags is set
	 */

	KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
		((flags & UVM_LOAN_TOPAGE) == 0));
	KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);

	/*
	 * "output" is a pointer to the current place to put the loaned page.
	 */

	result = v;
	output = &result[0];	/* start at the beginning ... */

	/*
	 * while we've got pages to do
	 */

	while (len > 0) {

		/*
		 * fill in params for a call to uvmfault_lookup
		 */

		ufi.orig_map = map;
		ufi.orig_rvaddr = start;
		ufi.orig_size = len;

		/*
		 * do the lookup, the only time this will fail is if we hit on
		 * an unmapped region (an error)
		 */

		if (!uvmfault_lookup(&ufi, false)) {
			error = ENOENT;
			goto fail;
		}

		/*
		 * map now locked.  now do the loanout...
		 */

		rv = uvm_loanentry(&ufi, &output, flags);
		if (rv < 0) {
			/* all unlocked due to error */
			error = EINVAL;
			goto fail;
		}

		/*
		 * done!  the map is unlocked.  advance, if possible.
		 *
		 * XXXCDC: could be recoded to hold the map lock with
		 *	   smarter code (but it only happens on map entry
		 *	   boundaries, so it isn't that bad).
		 */

		if (rv) {
			rv <<= PAGE_SHIFT;
			len -= rv;
			start += rv;
		}
	}
	UVMHIST_LOG(loanhist, "success", 0,0,0,0);
	return 0;

fail:
	/*
	 * failed to complete loans.  drop any loans and return failure code.
	 * map is already unlocked.
	 */

	if (output - result) {
		if (flags & UVM_LOAN_TOANON) {
			uvm_unloananon((struct vm_anon **)result,
			    output - result);
		} else {
			uvm_unloanpage((struct vm_page **)result,
			    output - result);
		}
	}
	UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
	return (error);
}
Beispiel #3
0
int
uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
    vm_prot_t access_type)
{
	struct uvm_faultinfo ufi;
	vm_prot_t enter_prot;
	boolean_t wired, narrow, promote, locked, shadowed;
	int npages, nback, nforw, centeridx, result, lcv, gotpages;
	vaddr_t startva, currva;
	voff_t uoff;
	paddr_t pa; 
	struct vm_amap *amap;
	struct uvm_object *uobj;
	struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
	struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;

	anon = NULL;
	pg = NULL;

	uvmexp.faults++;	/* XXX: locking? */

	/*
	 * init the IN parameters in the ufi
	 */

	ufi.orig_map = orig_map;
	ufi.orig_rvaddr = trunc_page(vaddr);
	ufi.orig_size = PAGE_SIZE;	/* can't get any smaller than this */
	if (fault_type == VM_FAULT_WIRE)
		narrow = TRUE;		/* don't look for neighborhood
					 * pages on wire */
	else
		narrow = FALSE;		/* normal fault */

	/*
	 * "goto ReFault" means restart the page fault from ground zero.
	 */
ReFault:

	/*
	 * lookup and lock the maps
	 */

	if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
		return (EFAULT);
	}
	/* locked: maps(read) */

#ifdef DIAGNOSTIC
	if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
		panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
		    ufi.map, vaddr);
#endif

	/*
	 * check protection
	 */

	if ((ufi.entry->protection & access_type) != access_type) {
		uvmfault_unlockmaps(&ufi, FALSE);
		return (EACCES);
	}

	/*
	 * "enter_prot" is the protection we want to enter the page in at.
	 * for certain pages (e.g. copy-on-write pages) this protection can
	 * be more strict than ufi.entry->protection.  "wired" means either
	 * the entry is wired or we are fault-wiring the pg.
	 */

	enter_prot = ufi.entry->protection;
	wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
	if (wired)
		access_type = enter_prot; /* full access for wired */

	/*
	 * handle "needs_copy" case.   if we need to copy the amap we will
	 * have to drop our readlock and relock it with a write lock.  (we
	 * need a write lock to change anything in a map entry [e.g.
	 * needs_copy]).
	 */

	if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
		if ((access_type & VM_PROT_WRITE) ||
		    (ufi.entry->object.uvm_obj == NULL)) {
			/* need to clear */
			uvmfault_unlockmaps(&ufi, FALSE);
			uvmfault_amapcopy(&ufi);
			uvmexp.fltamcopy++;
			goto ReFault;

		} else {

			/*
			 * ensure that we pmap_enter page R/O since
			 * needs_copy is still true
			 */
			enter_prot &= ~VM_PROT_WRITE; 

		}
	}

	/*
	 * identify the players
	 */

	amap = ufi.entry->aref.ar_amap;		/* top layer */
	uobj = ufi.entry->object.uvm_obj;	/* bottom layer */

	/*
	 * check for a case 0 fault.  if nothing backing the entry then
	 * error now.
	 */

	if (amap == NULL && uobj == NULL) {
		uvmfault_unlockmaps(&ufi, FALSE);
		return (EFAULT);
	}

	/*
	 * establish range of interest based on advice from mapper
	 * and then clip to fit map entry.   note that we only want
	 * to do this the first time through the fault.   if we 
	 * ReFault we will disable this by setting "narrow" to true.
	 */

	if (narrow == FALSE) {

		/* wide fault (!narrow) */
		KASSERT(uvmadvice[ufi.entry->advice].advice ==
			 ufi.entry->advice);
		nback = min(uvmadvice[ufi.entry->advice].nback,
			    (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
		startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
		nforw = min(uvmadvice[ufi.entry->advice].nforw,
			    ((ufi.entry->end - ufi.orig_rvaddr) >>
			     PAGE_SHIFT) - 1);
		/*
		 * note: "-1" because we don't want to count the
		 * faulting page as forw
		 */
		npages = nback + nforw + 1;
		centeridx = nback;

		narrow = TRUE;	/* ensure only once per-fault */

	} else {