Exemplo n.º 1
0
static __inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
	int lcv;
	struct vm_page *pg;
	
	for (lcv = 0 ; lcv < n ; lcv++) {
		if (anons[lcv] == NULL)
			continue;
		simple_lock(&anons[lcv]->an_lock);
		pg = anons[lcv]->an_page;
		if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) {
			uvm_lock_pageq();
			if (pg->wire_count == 0) {
#ifdef UBC
				pmap_clear_reference(pg);
#else
				pmap_page_protect(pg, VM_PROT_NONE);
#endif
				uvm_pagedeactivate(pg);
			}
			uvm_unlock_pageq();
		}
		simple_unlock(&anons[lcv]->an_lock);
	}
}
Exemplo n.º 2
0
/*
 * uvm_km_pgremove: remove pages from a kernel uvm_object.
 *
 * => when you unmap a part of anonymous kernel memory you want to toss
 *    the pages right away.    (this gets called from uvm_unmap_...).
 */
void
uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end)
{
	struct vm_page *pp;
	voff_t curoff;
	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);

	KASSERT(uobj->pgops == &aobj_pager);

	for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
		pp = uvm_pagelookup(uobj, curoff);
		if (pp == NULL)
			continue;

		UVMHIST_LOG(maphist,"  page %p, busy=%ld", pp,
		    pp->pg_flags & PG_BUSY, 0, 0);

		if (pp->pg_flags & PG_BUSY) {
			/* owner must check for this when done */
			atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
		} else {
			/* free the swap slot... */
			uao_dropswap(uobj, curoff >> PAGE_SHIFT);

			/*
			 * ...and free the page; note it may be on the
			 * active or inactive queues.
			 */
			uvm_lock_pageq();
			uvm_pagefree(pp);
			uvm_unlock_pageq();
		}
	}
}
Exemplo n.º 3
0
int
uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end,
    struct pglist *pageq)
{
	int i, npages, left, error;
	struct vm_page *pgs[FETCH_PAGECOUNT];
	voff_t offset = start;

	left = (end - start) >> PAGE_SHIFT;

	while (left) {

		npages = MIN(FETCH_PAGECOUNT, left);

		/* Get the pages */
		memset(pgs, 0, sizeof(pgs));
		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
			PGO_ALLPAGES | PGO_SYNCIO);

		if (error)
			goto error;

		for (i = 0; i < npages; i++) {

			KASSERT(pgs[i] != NULL);
			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));

			if (pgs[i]->pg_flags & PQ_AOBJ) {
				atomic_clearbits_int(&pgs[i]->pg_flags,
				    PG_CLEAN);
				uao_dropswap(uobj, i);
			}
		}

		/* Wire the pages */
		uvm_lock_pageq();
		for (i = 0; i < npages; i++) {
			uvm_pagewire(pgs[i]);
			if (pageq != NULL)
				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
		}
		uvm_unlock_pageq();

		/* Unbusy the pages */
		uvm_page_unbusy(pgs, npages);

		left -= npages;
		offset += (voff_t)npages << PAGE_SHIFT;
	}

	return 0;

error:
	/* Unwire the pages which have been wired */
	uvm_objunwire(uobj, start, offset);

	return error;
}
Exemplo n.º 4
0
void
uvm_objunwire(struct uvm_object *uobj, voff_t start, voff_t end)
{
	struct vm_page *pg;
	off_t offset;

	uvm_lock_pageq();
	for (offset = start; offset < end; offset += PAGE_SIZE) {
		pg = uvm_pagelookup(uobj, offset);

		KASSERT(pg != NULL);
		KASSERT(!(pg->pg_flags & PG_RELEASED));

		uvm_pageunwire(pg);
	}
	uvm_unlock_pageq();
}
Exemplo n.º 5
0
/*
 * uvmfault_anonflush: try and deactivate pages in specified anons
 *
 * => does not have to deactivate page if it is busy
 */
static __inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
	int lcv;
	struct vm_page *pg;
	
	for (lcv = 0 ; lcv < n ; lcv++) {
		if (anons[lcv] == NULL)
			continue;
		mtx_enter(&anons[lcv]->an_lock);
		pg = anons[lcv]->an_page;
		if (pg && (pg->pg_flags & PG_BUSY) == 0 &&
		    pg->loan_count == 0) {
			uvm_lock_pageq();
			if (pg->wire_count == 0)
				uvm_pagedeactivate(pg);
			uvm_unlock_pageq();
		}
		mtx_leave(&anons[lcv]->an_lock);
	}
}
Exemplo n.º 6
0
int
uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
    struct vm_page ***ppsp_ptr, int *npages, int flags,
    voff_t start, voff_t stop)
{
	int result;
	daddr_t swblk;
	struct vm_page **ppsp = *ppsp_ptr;

	/*
	 * note that uobj is null  if we are doing a swap-backed pageout.
	 * note that uobj is !null if we are doing normal object pageout.
	 * note that the page queues must be locked to cluster.
	 */

	if (uobj) {	/* if !swap-backed */

		/*
		 * attempt to build a cluster for pageout using its
		 * make-put-cluster function (if it has one).
		 */

		if (uobj->pgops->pgo_mk_pcluster) {
			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
			    npages, pg, flags, start, stop);
			*ppsp_ptr = ppsp;  /* update caller's pointer */
		} else {
			ppsp[0] = pg;
			*npages = 1;
		}

		swblk = 0;		/* XXX: keep gcc happy */

	} else {

		/*
		 * for swap-backed pageout, the caller (the pagedaemon) has
		 * already built the cluster for us.   the starting swap
		 * block we are writing to has been passed in as "start."
		 * "pg" could be NULL if there is no page we are especially
		 * interested in (in which case the whole cluster gets dropped
		 * in the event of an error or a sync "done").
		 */
		swblk = start;
		/* ppsp and npages should be ok */
	}

	/* now that we've clustered we can unlock the page queues */
	uvm_unlock_pageq();

	/*
	 * now attempt the I/O.   if we have a failure and we are
	 * clustered, we will drop the cluster and try again.
	 */

ReTry:
	if (uobj) {
		/* object is locked */
		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
		/* object is now unlocked */
	} else {
		/* nothing locked */
		/* XXX daddr_t -> int */
		result = uvm_swap_put(swblk, ppsp, *npages, flags);
		/* nothing locked */
	}

	/*
	 * we have attempted the I/O.
	 *
	 * if the I/O was a success then:
	 * 	if !PGO_PDFREECLUST, we return the cluster to the 
	 *		caller (who must un-busy all pages)
	 *	else we un-busy cluster pages for the pagedaemon
	 *
	 * if I/O is pending (async i/o) then we return the pending code.
	 * [in this case the async i/o done function must clean up when
	 *  i/o is done...]
	 */

	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
			/*
			 * drop cluster and relock object (only if I/O is
			 * not pending)
			 */
			if (uobj)
				/* required for dropcluster */
				mtx_enter(&uobj->vmobjlock);
			if (*npages > 1 || pg == NULL)
				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
				    PGO_PDFREECLUST);
			/* if (uobj): object still locked, as per
			 * return-state item #3 */
		}
		return (result);
	}

	/*
	 * a pager error occured (even after dropping the cluster, if there
	 * was one).  give up! the caller only has one page ("pg")
	 * to worry about.
	 */

	if (*npages > 1 || pg == NULL) {
		if (uobj) {
			mtx_enter(&uobj->vmobjlock);
		}
		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);

		/*
		 * for failed swap-backed pageouts with a "pg",
		 * we need to reset pg's swslot to either:
		 * "swblk" (for transient errors, so we can retry),
		 * or 0 (for hard errors).
		 */

		if (uobj == NULL && pg != NULL) {
			/* XXX daddr_t -> int */
			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
			if (pg->pg_flags & PQ_ANON) {
				mtx_enter(&pg->uanon->an_lock);
				pg->uanon->an_swslot = nswblk;
				mtx_leave(&pg->uanon->an_lock);
			} else {
				mtx_enter(&pg->uobject->vmobjlock);
				uao_set_swslot(pg->uobject,
					       pg->offset >> PAGE_SHIFT,
					       nswblk);
				mtx_leave(&pg->uobject->vmobjlock);
			}
		}
		if (result == VM_PAGER_AGAIN) {

			/*
			 * for transient failures, free all the swslots that
			 * we're not going to retry with.
			 */

			if (uobj == NULL) {
				if (pg) {
					/* XXX daddr_t -> int */
					uvm_swap_free(swblk + 1, *npages - 1);
				} else {
					/* XXX daddr_t -> int */
					uvm_swap_free(swblk, *npages);
				}
			}
			if (pg) {
				ppsp[0] = pg;
				*npages = 1;
				goto ReTry;
			}
		} else if (uobj == NULL) {

			/*
			 * for hard errors on swap-backed pageouts,
			 * mark the swslots as bad.  note that we do not
			 * free swslots that we mark bad.
			 */

			/* XXX daddr_t -> int */
			uvm_swap_markbad(swblk, *npages);
		}

		if (uobj)
			mtx_leave(&uobj->vmobjlock);
	}
Exemplo n.º 7
0
int
uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
    struct vm_anon *anon)
{
	boolean_t we_own;	/* we own anon's page? */
	boolean_t locked;	/* did we relock? */
	struct vm_page *pg;
	int result;

	result = 0;		/* XXX shut up gcc */
	uvmexp.fltanget++;
        /* bump rusage counters */
	if (anon->an_page)
		curproc->p_ru.ru_minflt++;
	else
		curproc->p_ru.ru_majflt++;

	/* 
	 * loop until we get it, or fail.
	 */

	while (1) {

		we_own = FALSE;		/* TRUE if we set PG_BUSY on a page */
		pg = anon->an_page;

		/*
		 * if there is a resident page and it is loaned, then anon
		 * may not own it.   call out to uvm_anon_lockpage() to ensure
		 * the real owner of the page has been identified and locked.
		 */

		if (pg && pg->loan_count)
			pg = uvm_anon_lockloanpg(anon);

		/*
		 * page there?   make sure it is not busy/released.
		 */

		if (pg) {

			/*
			 * at this point, if the page has a uobject [meaning
			 * we have it on loan], then that uobject is locked
			 * by us!   if the page is busy, we drop all the
			 * locks (including uobject) and try again.
			 */

			if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) {
				return (VM_PAGER_OK);
			}
			atomic_setbits_int(&pg->pg_flags, PG_WANTED);
			uvmexp.fltpgwait++;

			/*
			 * the last unlock must be an atomic unlock+wait on
			 * the owner of page
			 */
			if (pg->uobject) {	/* owner is uobject ? */
				uvmfault_unlockall(ufi, amap, NULL, anon);
				UVM_UNLOCK_AND_WAIT(pg,
				    &pg->uobject->vmobjlock,
				    FALSE, "anonget1",0);
			} else {
				/* anon owns page */
				uvmfault_unlockall(ufi, amap, NULL, NULL);
				UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
				    "anonget2",0);
			}
			/* ready to relock and try again */

		} else {
		
			/*
			 * no page, we must try and bring it in.
			 */
			pg = uvm_pagealloc(NULL, 0, anon, 0);

			if (pg == NULL) {		/* out of RAM.  */

				uvmfault_unlockall(ufi, amap, NULL, anon);
				uvmexp.fltnoram++;
				uvm_wait("flt_noram1");
				/* ready to relock and try again */

			} else {
	
				/* we set the PG_BUSY bit */
				we_own = TRUE;	
				uvmfault_unlockall(ufi, amap, NULL, anon);

				/*
				 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
				 * page into the uvm_swap_get function with
				 * all data structures unlocked.  note that
				 * it is ok to read an_swslot here because
				 * we hold PG_BUSY on the page.
				 */
				uvmexp.pageins++;
				result = uvm_swap_get(pg, anon->an_swslot,
				    PGO_SYNCIO);

				/*
				 * we clean up after the i/o below in the
				 * "we_own" case
				 */
				/* ready to relock and try again */
			}
		}

		/*
		 * now relock and try again
		 */

		locked = uvmfault_relock(ufi);
		if (locked || we_own)
			simple_lock(&anon->an_lock);

		/*
		 * if we own the page (i.e. we set PG_BUSY), then we need
		 * to clean up after the I/O. there are three cases to
		 * consider:
		 *   [1] page released during I/O: free anon and ReFault.
		 *   [2] I/O not OK.   free the page and cause the fault 
		 *       to fail.
		 *   [3] I/O OK!   activate the page and sync with the
		 *       non-we_own case (i.e. drop anon lock if not locked).
		 */
		
		if (we_own) {

			if (pg->pg_flags & PG_WANTED) {
				/* still holding object lock */
				wakeup(pg);	
			}
			/* un-busy! */
			atomic_clearbits_int(&pg->pg_flags,
			    PG_WANTED|PG_BUSY|PG_FAKE);
			UVM_PAGE_OWN(pg, NULL);

			/* 
			 * if we were RELEASED during I/O, then our anon is
			 * no longer part of an amap.   we need to free the
			 * anon and try again.
			 */
			if (pg->pg_flags & PG_RELEASED) {
				pmap_page_protect(pg, VM_PROT_NONE);
				simple_unlock(&anon->an_lock);
				uvm_anfree(anon);	/* frees page for us */
				if (locked)
					uvmfault_unlockall(ufi, amap, NULL,
							   NULL);
				uvmexp.fltpgrele++;
				return (VM_PAGER_REFAULT);	/* refault! */
			}

			if (result != VM_PAGER_OK) {
				KASSERT(result != VM_PAGER_PEND);

				/* remove page from anon */
				anon->an_page = NULL;

				/*
				 * remove the swap slot from the anon
				 * and mark the anon as having no real slot.
				 * don't free the swap slot, thus preventing
				 * it from being used again.
				 */
				uvm_swap_markbad(anon->an_swslot, 1);
				anon->an_swslot = SWSLOT_BAD;

				/*
				 * note: page was never !PG_BUSY, so it
				 * can't be mapped and thus no need to
				 * pmap_page_protect it...
				 */
				uvm_lock_pageq();
				uvm_pagefree(pg);
				uvm_unlock_pageq();

				if (locked)
					uvmfault_unlockall(ufi, amap, NULL,
					    anon);
				else
					simple_unlock(&anon->an_lock);
				return (VM_PAGER_ERROR);
			}
			
			/*
			 * must be OK, clear modify (already PG_CLEAN)
			 * and activate
			 */
			pmap_clear_modify(pg);
			uvm_lock_pageq();
			uvm_pageactivate(pg);
			uvm_unlock_pageq();
			if (!locked)
				simple_unlock(&anon->an_lock);
		}

		/*
		 * we were not able to relock.   restart fault.
		 */

		if (!locked)
			return (VM_PAGER_REFAULT);

		/*
		 * verify no one has touched the amap and moved the anon on us.
		 */

		if (ufi != NULL &&
		    amap_lookup(&ufi->entry->aref, 
				ufi->orig_rvaddr - ufi->entry->start) != anon) {
			
			uvmfault_unlockall(ufi, amap, NULL, anon);
			return (VM_PAGER_REFAULT);
		}
			
		/*
		 * try it again! 
		 */

		uvmexp.fltanretry++;
		continue;

	} /* while (1) */

	/*NOTREACHED*/
}
Exemplo n.º 8
0
int
uvm_objwire(struct uvm_object *uobj, off_t start, off_t end,
    struct pglist *pageq)
{
	int i, npages, error;
	struct vm_page *pgs[FETCH_PAGECOUNT];
	off_t offset = start, left;

	left = (end - start) >> PAGE_SHIFT;

	mtx_enter(&uobj->vmobjlock);
	while (left) {

		npages = MIN(FETCH_PAGECOUNT, left);

		/* Get the pages */
		memset(pgs, 0, sizeof(pgs));
		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
			PGO_ALLPAGES | PGO_SYNCIO);

		if (error)
			goto error;

		mtx_enter(&uobj->vmobjlock);
		for (i = 0; i < npages; i++) {

			KASSERT(pgs[i] != NULL);
			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));

#if 0
			/*
			 * Loan break
			 */
			if (pgs[i]->loan_count) {
				while (pgs[i]->loan_count) {
					pg = uvm_loanbreak(pgs[i]);
					if (!pg) {
						mtx_leave(&uobj->vmobjlock);
						uvm_wait("uobjwirepg");
						mtx_enter(&uobj->vmobjlock);
						continue;
					}
				}
				pgs[i] = pg;
			}
#endif

			if (pgs[i]->pg_flags & PQ_AOBJ) {
				atomic_clearbits_int(&pgs[i]->pg_flags,
				    PG_CLEAN);
				uao_dropswap(uobj, i);
			}
		}

		/* Wire the pages */
		uvm_lock_pageq();
		for (i = 0; i < npages; i++) {
			uvm_pagewire(pgs[i]);
			if (pageq != NULL)
				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
		}
		uvm_unlock_pageq();

		/* Unbusy the pages */
		uvm_page_unbusy(pgs, npages);

		left -= npages;
		offset += (off_t)npages << PAGE_SHIFT;
	}
	mtx_leave(&uobj->vmobjlock);

	return 0;

error:
	/* Unwire the pages which have been wired */
	uvm_objunwire(uobj, start, offset);

	return error;
}