Exemple #1
0
/*
 * uvmfault_amapcopy: clear "needs_copy" in a map.
 *
 * => called with VM data structures unlocked (usually, see below)
 * => we get a write lock on the maps and clear needs_copy for a VA
 * => if we are out of RAM we sleep (waiting for more)
 */
static void
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
{

	/* while we haven't done the job */
	while (1) {
		/* no mapping?  give up. */
		if (uvmfault_lookup(ufi, TRUE) == FALSE)
			return;

		/* copy if needed. */
		if (UVM_ET_ISNEEDSCOPY(ufi->entry))
			amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE, 
				ufi->orig_rvaddr, ufi->orig_rvaddr + 1);

		/* didn't work?  must be out of RAM.  unlock and sleep. */
		if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
			uvmfault_unlockmaps(ufi, TRUE);
			uvm_wait("fltamapcopy");
			continue;
		}

		/* got it! unlock and return*/
		uvmfault_unlockmaps(ufi, TRUE);
		return;
	}
	/*NOTREACHED*/
}
Exemple #2
0
static inline int
uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
{
	vaddr_t curaddr = ufi->orig_rvaddr;
	vsize_t togo = ufi->size;
	struct vm_aref *aref = &ufi->entry->aref;
	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
	struct vm_anon *anon;
	int rv, result = 0;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * lock us the rest of the way down (we unlock before return)
	 */
	if (aref->ar_amap)
		amap_lock(aref->ar_amap);

	/*
	 * loop until done
	 */
	while (togo) {

		/*
		 * find the page we want.   check the anon layer first.
		 */

		if (aref->ar_amap) {
			anon = amap_lookup(aref, curaddr - ufi->entry->start);
		} else {
			anon = NULL;
		}

		/* locked: map, amap, uobj */
		if (anon) {
			rv = uvm_loananon(ufi, output, flags, anon);
		} else if (uobj) {
			rv = uvm_loanuobj(ufi, output, flags, curaddr);
		} else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
			rv = uvm_loanzero(ufi, output, flags);
		} else {
			uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
			rv = -1;
		}
		/* locked: if (rv > 0) => map, amap, uobj  [o.w. unlocked] */
		KASSERT(rv > 0 || aref->ar_amap == NULL ||
		    !mutex_owned(&aref->ar_amap->am_l));
		KASSERT(rv > 0 || uobj == NULL ||
		    !mutex_owned(&uobj->vmobjlock));

		/* total failure */
		if (rv < 0) {
			UVMHIST_LOG(loanhist, "failure %d", rv, 0,0,0);
			return (-1);
		}

		/* relock failed, need to do another lookup */
		if (rv == 0) {
			UVMHIST_LOG(loanhist, "relock failure %d", result
			    ,0,0,0);
			return (result);
		}

		/*
		 * got it... advance to next page
		 */

		result++;
		togo -= PAGE_SIZE;
		curaddr += PAGE_SIZE;
	}

	/*
	 * unlock what we locked, unlock the maps and return
	 */

	if (aref->ar_amap)
		amap_unlock(aref->ar_amap);
	uvmfault_unlockmaps(ufi, false);
	UVMHIST_LOG(loanhist, "done %d", result, 0,0,0);
	return (result);
}
Exemple #3
0
int
uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
    vm_prot_t access_type)
{
	struct uvm_faultinfo ufi;
	vm_prot_t enter_prot;
	boolean_t wired, narrow, promote, locked, shadowed;
	int npages, nback, nforw, centeridx, result, lcv, gotpages;
	vaddr_t startva, currva;
	voff_t uoff;
	paddr_t pa; 
	struct vm_amap *amap;
	struct uvm_object *uobj;
	struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
	struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;

	anon = NULL;
	pg = NULL;

	uvmexp.faults++;	/* XXX: locking? */

	/*
	 * init the IN parameters in the ufi
	 */

	ufi.orig_map = orig_map;
	ufi.orig_rvaddr = trunc_page(vaddr);
	ufi.orig_size = PAGE_SIZE;	/* can't get any smaller than this */
	if (fault_type == VM_FAULT_WIRE)
		narrow = TRUE;		/* don't look for neighborhood
					 * pages on wire */
	else
		narrow = FALSE;		/* normal fault */

	/*
	 * "goto ReFault" means restart the page fault from ground zero.
	 */
ReFault:

	/*
	 * lookup and lock the maps
	 */

	if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
		return (EFAULT);
	}
	/* locked: maps(read) */

#ifdef DIAGNOSTIC
	if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0)
		panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
		    ufi.map, vaddr);
#endif

	/*
	 * check protection
	 */

	if ((ufi.entry->protection & access_type) != access_type) {
		uvmfault_unlockmaps(&ufi, FALSE);
		return (EACCES);
	}

	/*
	 * "enter_prot" is the protection we want to enter the page in at.
	 * for certain pages (e.g. copy-on-write pages) this protection can
	 * be more strict than ufi.entry->protection.  "wired" means either
	 * the entry is wired or we are fault-wiring the pg.
	 */

	enter_prot = ufi.entry->protection;
	wired = VM_MAPENT_ISWIRED(ufi.entry) || (fault_type == VM_FAULT_WIRE);
	if (wired)
		access_type = enter_prot; /* full access for wired */

	/*
	 * handle "needs_copy" case.   if we need to copy the amap we will
	 * have to drop our readlock and relock it with a write lock.  (we
	 * need a write lock to change anything in a map entry [e.g.
	 * needs_copy]).
	 */

	if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
		if ((access_type & VM_PROT_WRITE) ||
		    (ufi.entry->object.uvm_obj == NULL)) {
			/* need to clear */
			uvmfault_unlockmaps(&ufi, FALSE);
			uvmfault_amapcopy(&ufi);
			uvmexp.fltamcopy++;
			goto ReFault;

		} else {

			/*
			 * ensure that we pmap_enter page R/O since
			 * needs_copy is still true
			 */
			enter_prot &= ~VM_PROT_WRITE; 

		}
	}

	/*
	 * identify the players
	 */

	amap = ufi.entry->aref.ar_amap;		/* top layer */
	uobj = ufi.entry->object.uvm_obj;	/* bottom layer */

	/*
	 * check for a case 0 fault.  if nothing backing the entry then
	 * error now.
	 */

	if (amap == NULL && uobj == NULL) {
		uvmfault_unlockmaps(&ufi, FALSE);
		return (EFAULT);
	}

	/*
	 * establish range of interest based on advice from mapper
	 * and then clip to fit map entry.   note that we only want
	 * to do this the first time through the fault.   if we 
	 * ReFault we will disable this by setting "narrow" to true.
	 */

	if (narrow == FALSE) {

		/* wide fault (!narrow) */
		KASSERT(uvmadvice[ufi.entry->advice].advice ==
			 ufi.entry->advice);
		nback = min(uvmadvice[ufi.entry->advice].nback,
			    (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
		startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
		nforw = min(uvmadvice[ufi.entry->advice].nforw,
			    ((ufi.entry->end - ufi.orig_rvaddr) >>
			     PAGE_SHIFT) - 1);
		/*
		 * note: "-1" because we don't want to count the
		 * faulting page as forw
		 */
		npages = nback + nforw + 1;
		centeridx = nback;

		narrow = TRUE;	/* ensure only once per-fault */

	} else {