Exemple #1
0
static int
ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
{
	struct vm_page *pg;

	KDASSERT(uobj == &uvm_loanzero_object);

	/*
	 * Don't need to do any work here if we're not freeing pages.
	 */

	if ((flags & PGO_FREE) == 0) {
		mutex_exit(&uobj->vmobjlock);
		return 0;
	}

	/*
	 * we don't actually want to ever free the uvm_loanzero_page, so
	 * just reactivate or dequeue it.
	 */

	pg = TAILQ_FIRST(&uobj->memq);
	KASSERT(pg != NULL);
	KASSERT(TAILQ_NEXT(pg, listq.queue) == NULL);

	mutex_enter(&uvm_pageqlock);
	if (pg->uanon)
		uvm_pageactivate(pg);
	else
		uvm_pagedequeue(pg);
	mutex_exit(&uvm_pageqlock);

	mutex_exit(&uobj->vmobjlock);
	return 0;
}
Exemple #2
0
int
uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
{
	struct vm_page *pg;

	KASSERT(mutex_owned(anon->an_lock));
	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));

	/* get new un-owned replacement page */
	pg = uvm_pagealloc(NULL, 0, NULL, 0);
	if (pg == NULL) {
		return ENOMEM;
	}

	/* copy old -> new */
	uvm_pagecopy(anon->an_page, pg);

	/* force reload */
	pmap_page_protect(anon->an_page, VM_PROT_NONE);
	mutex_enter(&uvm_pageqlock);	  /* KILL loan */

	anon->an_page->uanon = NULL;
	/* in case we owned */
	anon->an_page->pqflags &= ~PQ_ANON;

	if (uobj) {
		/* if we were receiver of loan */
		anon->an_page->loan_count--;
	} else {
		/*
		 * we were the lender (A->K); need to remove the page from
		 * pageq's.
		 */
		uvm_pagedequeue(anon->an_page);
	}

	if (uobj) {
		mutex_exit(uobj->vmobjlock);
	}

	/* install new page in anon */
	anon->an_page = pg;
	pg->uanon = anon;
	pg->pqflags |= PQ_ANON;

	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);

	pg->flags &= ~(PG_BUSY|PG_FAKE);
	UVM_PAGE_OWN(pg, NULL);

	/* done! */

	return 0;
}
Exemple #3
0
/*
 * uvm_loanpage: loan out pages to kernel (->K)
 *
 * => pages should be object-owned and the object should be locked.
 * => in the case of error, the object might be unlocked and relocked.
 * => caller should busy the pages beforehand.
 * => pages will be unbusied.
 * => fail with EBUSY if meet a wired page.
 */
static int
uvm_loanpage(struct vm_page **pgpp, int npages)
{
	int i;
	int error = 0;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	for (i = 0; i < npages; i++) {
		struct vm_page *pg = pgpp[i];

		KASSERT(pg->uobject != NULL);
		KASSERT(pg->uobject == pgpp[0]->uobject);
		KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
		KASSERT(mutex_owned(&pg->uobject->vmobjlock));
		KASSERT(pg->flags & PG_BUSY);

		mutex_enter(&uvm_pageqlock);
		if (pg->wire_count > 0) {
			mutex_exit(&uvm_pageqlock);
			UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
			error = EBUSY;
			break;
		}
		if (pg->loan_count == 0) {
			pmap_page_protect(pg, VM_PROT_READ);
		}
		pg->loan_count++;
		uvm_pageactivate(pg);
		mutex_exit(&uvm_pageqlock);
	}

	uvm_page_unbusy(pgpp, npages);

	if (error) {
		/*
		 * backout what we've done
		 */
		kmutex_t *slock = &pgpp[0]->uobject->vmobjlock;

		mutex_exit(slock);
		uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
		mutex_enter(slock);
	}

	UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
	return error;
}
Exemple #4
0
static int
uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
{
	struct vm_anon *anon;
	struct vm_page *pg;
	struct vm_amap *amap = ufi->entry->aref.ar_amap;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
again:
	mutex_enter(&uvm_loanzero_object.vmobjlock);

	/*
	 * first, get ahold of our single zero page.
	 */

	if (__predict_false((pg =
			     TAILQ_FIRST(&uvm_loanzero_object.memq)) == NULL)) {
		while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
					   UVM_PGA_ZERO)) == NULL) {
			mutex_exit(&uvm_loanzero_object.vmobjlock);
			uvmfault_unlockall(ufi, amap, NULL, NULL);
			uvm_wait("loanzero");
			if (!uvmfault_relock(ufi)) {
				return (0);
			}
			if (amap) {
				amap_lock(amap);
			}
			goto again;
		}

		/* got a zero'd page. */
		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
		pg->flags |= PG_RDONLY;
		mutex_enter(&uvm_pageqlock);
		uvm_pageactivate(pg);
		mutex_exit(&uvm_pageqlock);
		UVM_PAGE_OWN(pg, NULL);
	}

	if ((flags & UVM_LOAN_TOANON) == 0) {	/* loaning to kernel-page */
		mutex_enter(&uvm_pageqlock);
		pg->loan_count++;
		mutex_exit(&uvm_pageqlock);
		mutex_exit(&uvm_loanzero_object.vmobjlock);
		**output = pg;
		(*output)++;
		return (1);
	}

	/*
	 * loaning to an anon.  check to see if there is already an anon
	 * associated with this page.  if so, then just return a reference
	 * to this object.
	 */

	if (pg->uanon) {
		anon = pg->uanon;
		mutex_enter(&anon->an_lock);
		anon->an_ref++;
		mutex_exit(&anon->an_lock);
		mutex_exit(&uvm_loanzero_object.vmobjlock);
		**output = anon;
		(*output)++;
		return (1);
	}

	/*
	 * need to allocate a new anon
	 */

	anon = uvm_analloc();
	if (anon == NULL) {
		/* out of swap causes us to fail */
		mutex_exit(&uvm_loanzero_object.vmobjlock);
		uvmfault_unlockall(ufi, amap, NULL, NULL);
		return (-1);
	}
	anon->an_page = pg;
	pg->uanon = anon;
	mutex_enter(&uvm_pageqlock);
	pg->loan_count++;
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);
	mutex_exit(&anon->an_lock);
	mutex_exit(&uvm_loanzero_object.vmobjlock);
	**output = anon;
	(*output)++;
	return (1);
}
Exemple #5
0
static int
uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
{
	struct vm_amap *amap = ufi->entry->aref.ar_amap;
	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
	struct vm_page *pg;
	struct vm_anon *anon;
	int error, npages;
	bool locked;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * first we must make sure the page is resident.
	 *
	 * XXXCDC: duplicate code with uvm_fault().
	 */

	mutex_enter(&uobj->vmobjlock);
	if (uobj->pgops->pgo_get) {	/* try locked pgo_get */
		npages = 1;
		pg = NULL;
		error = (*uobj->pgops->pgo_get)(uobj,
		    va - ufi->entry->start + ufi->entry->offset,
		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
	} else {
		error = EIO;		/* must have pgo_get op */
	}

	/*
	 * check the result of the locked pgo_get.  if there is a problem,
	 * then we fail the loan.
	 */

	if (error && error != EBUSY) {
		uvmfault_unlockall(ufi, amap, uobj, NULL);
		return (-1);
	}

	/*
	 * if we need to unlock for I/O, do so now.
	 */

	if (error == EBUSY) {
		uvmfault_unlockall(ufi, amap, NULL, NULL);

		/* locked: uobj */
		npages = 1;
		error = (*uobj->pgops->pgo_get)(uobj,
		    va - ufi->entry->start + ufi->entry->offset,
		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
		/* locked: <nothing> */

		if (error) {
			if (error == EAGAIN) {
				tsleep(&lbolt, PVM, "fltagain2", 0);
				return (0);
			}
			return (-1);
		}

		/*
		 * pgo_get was a success.   attempt to relock everything.
		 */

		locked = uvmfault_relock(ufi);
		if (locked && amap)
			amap_lock(amap);
		uobj = pg->uobject;
		mutex_enter(&uobj->vmobjlock);

		/*
		 * verify that the page has not be released and re-verify
		 * that amap slot is still free.   if there is a problem we
		 * drop our lock (thus force a lookup refresh/retry).
		 */

		if ((pg->flags & PG_RELEASED) != 0 ||
		    (locked && amap && amap_lookup(&ufi->entry->aref,
		    ufi->orig_rvaddr - ufi->entry->start))) {
			if (locked)
				uvmfault_unlockall(ufi, amap, NULL, NULL);
			locked = false;
		}

		/*
		 * didn't get the lock?   release the page and retry.
		 */

		if (locked == false) {
			if (pg->flags & PG_WANTED) {
				wakeup(pg);
			}
			if (pg->flags & PG_RELEASED) {
				mutex_enter(&uvm_pageqlock);
				uvm_pagefree(pg);
				mutex_exit(&uvm_pageqlock);
				mutex_exit(&uobj->vmobjlock);
				return (0);
			}
			mutex_enter(&uvm_pageqlock);
			uvm_pageactivate(pg);
			mutex_exit(&uvm_pageqlock);
			pg->flags &= ~(PG_BUSY|PG_WANTED);
			UVM_PAGE_OWN(pg, NULL);
			mutex_exit(&uobj->vmobjlock);
			return (0);
		}
	}

	KASSERT(uobj == pg->uobject);

	/*
	 * at this point we have the page we want ("pg") marked PG_BUSY for us
	 * and we have all data structures locked.  do the loanout.  page can
	 * not be PG_RELEASED (we caught this above).
	 */

	if ((flags & UVM_LOAN_TOANON) == 0) {
		if (uvm_loanpage(&pg, 1)) {
			uvmfault_unlockall(ufi, amap, uobj, NULL);
			return (-1);
		}
		mutex_exit(&uobj->vmobjlock);
		**output = pg;
		(*output)++;
		return (1);
	}

	/*
	 * must be a loan to an anon.   check to see if there is already
	 * an anon associated with this page.  if so, then just return
	 * a reference to this object.   the page should already be
	 * mapped read-only because it is already on loan.
	 */

	if (pg->uanon) {
		anon = pg->uanon;
		mutex_enter(&anon->an_lock);
		anon->an_ref++;
		mutex_exit(&anon->an_lock);
		if (pg->flags & PG_WANTED) {
			wakeup(pg);
		}
		pg->flags &= ~(PG_WANTED|PG_BUSY);
		UVM_PAGE_OWN(pg, NULL);
		mutex_exit(&uobj->vmobjlock);
		**output = anon;
		(*output)++;
		return (1);
	}

	/*
	 * need to allocate a new anon
	 */

	anon = uvm_analloc();
	if (anon == NULL) {
		goto fail;
	}
	anon->an_page = pg;
	pg->uanon = anon;
	mutex_enter(&uvm_pageqlock);
	if (pg->wire_count > 0) {
		mutex_exit(&uvm_pageqlock);
		UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
		pg->uanon = NULL;
		anon->an_page = NULL;
		anon->an_ref--;
		mutex_exit(&anon->an_lock);
		uvm_anfree(anon);
		goto fail;
	}
	if (pg->loan_count == 0) {
		pmap_page_protect(pg, VM_PROT_READ);
	}
	pg->loan_count++;
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);
	if (pg->flags & PG_WANTED) {
		wakeup(pg);
	}
	pg->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(pg, NULL);
	mutex_exit(&uobj->vmobjlock);
	mutex_exit(&anon->an_lock);
	**output = anon;
	(*output)++;
	return (1);

fail:
	UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
	/*
	 * unlock everything and bail out.
	 */
	if (pg->flags & PG_WANTED) {
		wakeup(pg);
	}
	pg->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(pg, NULL);
	uvmfault_unlockall(ufi, amap, uobj, NULL);
	return (-1);
}
Exemple #6
0
int
uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
    struct vm_anon *anon)
{
	struct vm_page *pg;
	int error;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * if we are loaning to "another" anon then it is easy, we just
	 * bump the reference count on the current anon and return a
	 * pointer to it (it becomes copy-on-write shared).
	 */

	if (flags & UVM_LOAN_TOANON) {
		mutex_enter(&anon->an_lock);
		pg = anon->an_page;
		if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) {
			if (pg->wire_count > 0) {
				UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0);
				uvmfault_unlockall(ufi,
				    ufi->entry->aref.ar_amap,
				    ufi->entry->object.uvm_obj, anon);
				return (-1);
			}
			pmap_page_protect(pg, VM_PROT_READ);
		}
		anon->an_ref++;
		**output = anon;
		(*output)++;
		mutex_exit(&anon->an_lock);
		UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
		return (1);
	}

	/*
	 * we are loaning to a kernel-page.   we need to get the page
	 * resident so we can wire it.   uvmfault_anonget will handle
	 * this for us.
	 */

	mutex_enter(&anon->an_lock);
	error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);

	/*
	 * if we were unable to get the anon, then uvmfault_anonget has
	 * unlocked everything and returned an error code.
	 */

	if (error) {
		UVMHIST_LOG(loanhist, "error %d", error,0,0,0);

		/* need to refault (i.e. refresh our lookup) ? */
		if (error == ERESTART) {
			return (0);
		}

		/* "try again"?   sleep a bit and retry ... */
		if (error == EAGAIN) {
			tsleep(&lbolt, PVM, "loanagain", 0);
			return (0);
		}

		/* otherwise flag it as an error */
		return (-1);
	}

	/*
	 * we have the page and its owner locked: do the loan now.
	 */

	pg = anon->an_page;
	mutex_enter(&uvm_pageqlock);
	if (pg->wire_count > 0) {
		mutex_exit(&uvm_pageqlock);
		UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0);
		KASSERT(pg->uobject == NULL);
		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
		    NULL, anon);
		return (-1);
	}
	if (pg->loan_count == 0) {
		pmap_page_protect(pg, VM_PROT_READ);
	}
	pg->loan_count++;
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);
	**output = pg;
	(*output)++;

	/* unlock anon and return success */
	if (pg->uobject)
		mutex_exit(&pg->uobject->vmobjlock);
	mutex_exit(&anon->an_lock);
	UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
	return (1);
}
Exemple #7
0
/*
 * uvm_loanbreak: break loan on a uobj page
 *
 * => called with uobj locked
 * => the page should be busy
 * => return value:
 *	newly allocated page if succeeded
 */
struct vm_page *
uvm_loanbreak(struct vm_page *uobjpage)
{
	struct vm_page *pg;
#ifdef DIAGNOSTIC
	struct uvm_object *uobj = uobjpage->uobject;
#endif

	KASSERT(uobj != NULL);
	KASSERT(mutex_owned(&uobj->vmobjlock));
	KASSERT(uobjpage->flags & PG_BUSY);

	/* alloc new un-owned page */
	pg = uvm_pagealloc(NULL, 0, NULL, 0);
	if (pg == NULL)
		return NULL;

	/*
	 * copy the data from the old page to the new
	 * one and clear the fake flags on the new page (keep it busy).
	 * force a reload of the old page by clearing it from all
	 * pmaps.
	 * transfer dirtiness of the old page to the new page.
	 * then lock the page queues to rename the pages.
	 */

	uvm_pagecopy(uobjpage, pg);	/* old -> new */
	pg->flags &= ~PG_FAKE;
	pmap_page_protect(uobjpage, VM_PROT_NONE);
	if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) {
		pmap_clear_modify(pg);
		pg->flags |= PG_CLEAN;
	} else {
		/* uvm_pagecopy marked it dirty */
		KASSERT((pg->flags & PG_CLEAN) == 0);
		/* a object with a dirty page should be dirty. */
		KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
	}
	if (uobjpage->flags & PG_WANTED)
		wakeup(uobjpage);
	/* uobj still locked */
	uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(uobjpage, NULL);

	mutex_enter(&uvm_pageqlock);

	/*
	 * replace uobjpage with new page.
	 */

	uvm_pagereplace(uobjpage, pg);

	/*
	 * if the page is no longer referenced by
	 * an anon (i.e. we are breaking an O->K
	 * loan), then remove it from any pageq's.
	 */
	if (uobjpage->uanon == NULL)
		uvm_pagedequeue(uobjpage);

	/*
	 * at this point we have absolutely no
	 * control over uobjpage
	 */

	/* install new page */
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);

	/*
	 * done!  loan is broken and "pg" is
	 * PG_BUSY.   it can now replace uobjpage.
	 */

	return pg;
}
Exemple #8
0
int
uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
    struct vm_anon *anon)
{
	boolean_t we_own;	/* we own anon's page? */
	boolean_t locked;	/* did we relock? */
	struct vm_page *pg;
	int result;

	result = 0;		/* XXX shut up gcc */
	uvmexp.fltanget++;
        /* bump rusage counters */
	if (anon->an_page)
		curproc->p_ru.ru_minflt++;
	else
		curproc->p_ru.ru_majflt++;

	/* 
	 * loop until we get it, or fail.
	 */

	while (1) {

		we_own = FALSE;		/* TRUE if we set PG_BUSY on a page */
		pg = anon->an_page;

		/*
		 * if there is a resident page and it is loaned, then anon
		 * may not own it.   call out to uvm_anon_lockpage() to ensure
		 * the real owner of the page has been identified and locked.
		 */

		if (pg && pg->loan_count)
			pg = uvm_anon_lockloanpg(anon);

		/*
		 * page there?   make sure it is not busy/released.
		 */

		if (pg) {

			/*
			 * at this point, if the page has a uobject [meaning
			 * we have it on loan], then that uobject is locked
			 * by us!   if the page is busy, we drop all the
			 * locks (including uobject) and try again.
			 */

			if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) {
				return (VM_PAGER_OK);
			}
			atomic_setbits_int(&pg->pg_flags, PG_WANTED);
			uvmexp.fltpgwait++;

			/*
			 * the last unlock must be an atomic unlock+wait on
			 * the owner of page
			 */
			if (pg->uobject) {	/* owner is uobject ? */
				uvmfault_unlockall(ufi, amap, NULL, anon);
				UVM_UNLOCK_AND_WAIT(pg,
				    &pg->uobject->vmobjlock,
				    FALSE, "anonget1",0);
			} else {
				/* anon owns page */
				uvmfault_unlockall(ufi, amap, NULL, NULL);
				UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
				    "anonget2",0);
			}
			/* ready to relock and try again */

		} else {
		
			/*
			 * no page, we must try and bring it in.
			 */
			pg = uvm_pagealloc(NULL, 0, anon, 0);

			if (pg == NULL) {		/* out of RAM.  */

				uvmfault_unlockall(ufi, amap, NULL, anon);
				uvmexp.fltnoram++;
				uvm_wait("flt_noram1");
				/* ready to relock and try again */

			} else {
	
				/* we set the PG_BUSY bit */
				we_own = TRUE;	
				uvmfault_unlockall(ufi, amap, NULL, anon);

				/*
				 * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
				 * page into the uvm_swap_get function with
				 * all data structures unlocked.  note that
				 * it is ok to read an_swslot here because
				 * we hold PG_BUSY on the page.
				 */
				uvmexp.pageins++;
				result = uvm_swap_get(pg, anon->an_swslot,
				    PGO_SYNCIO);

				/*
				 * we clean up after the i/o below in the
				 * "we_own" case
				 */
				/* ready to relock and try again */
			}
		}

		/*
		 * now relock and try again
		 */

		locked = uvmfault_relock(ufi);
		if (locked || we_own)
			simple_lock(&anon->an_lock);

		/*
		 * if we own the page (i.e. we set PG_BUSY), then we need
		 * to clean up after the I/O. there are three cases to
		 * consider:
		 *   [1] page released during I/O: free anon and ReFault.
		 *   [2] I/O not OK.   free the page and cause the fault 
		 *       to fail.
		 *   [3] I/O OK!   activate the page and sync with the
		 *       non-we_own case (i.e. drop anon lock if not locked).
		 */
		
		if (we_own) {

			if (pg->pg_flags & PG_WANTED) {
				/* still holding object lock */
				wakeup(pg);	
			}
			/* un-busy! */
			atomic_clearbits_int(&pg->pg_flags,
			    PG_WANTED|PG_BUSY|PG_FAKE);
			UVM_PAGE_OWN(pg, NULL);

			/* 
			 * if we were RELEASED during I/O, then our anon is
			 * no longer part of an amap.   we need to free the
			 * anon and try again.
			 */
			if (pg->pg_flags & PG_RELEASED) {
				pmap_page_protect(pg, VM_PROT_NONE);
				simple_unlock(&anon->an_lock);
				uvm_anfree(anon);	/* frees page for us */
				if (locked)
					uvmfault_unlockall(ufi, amap, NULL,
							   NULL);
				uvmexp.fltpgrele++;
				return (VM_PAGER_REFAULT);	/* refault! */
			}

			if (result != VM_PAGER_OK) {
				KASSERT(result != VM_PAGER_PEND);

				/* remove page from anon */
				anon->an_page = NULL;

				/*
				 * remove the swap slot from the anon
				 * and mark the anon as having no real slot.
				 * don't free the swap slot, thus preventing
				 * it from being used again.
				 */
				uvm_swap_markbad(anon->an_swslot, 1);
				anon->an_swslot = SWSLOT_BAD;

				/*
				 * note: page was never !PG_BUSY, so it
				 * can't be mapped and thus no need to
				 * pmap_page_protect it...
				 */
				uvm_lock_pageq();
				uvm_pagefree(pg);
				uvm_unlock_pageq();

				if (locked)
					uvmfault_unlockall(ufi, amap, NULL,
					    anon);
				else
					simple_unlock(&anon->an_lock);
				return (VM_PAGER_ERROR);
			}
			
			/*
			 * must be OK, clear modify (already PG_CLEAN)
			 * and activate
			 */
			pmap_clear_modify(pg);
			uvm_lock_pageq();
			uvm_pageactivate(pg);
			uvm_unlock_pageq();
			if (!locked)
				simple_unlock(&anon->an_lock);
		}

		/*
		 * we were not able to relock.   restart fault.
		 */

		if (!locked)
			return (VM_PAGER_REFAULT);

		/*
		 * verify no one has touched the amap and moved the anon on us.
		 */

		if (ufi != NULL &&
		    amap_lookup(&ufi->entry->aref, 
				ufi->orig_rvaddr - ufi->entry->start) != anon) {
			
			uvmfault_unlockall(ufi, amap, NULL, anon);
			return (VM_PAGER_REFAULT);
		}
			
		/*
		 * try it again! 
		 */

		uvmexp.fltanretry++;
		continue;

	} /* while (1) */

	/*NOTREACHED*/
}
Exemple #9
0
int
ufs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred,
    int flags)
{
	off_t neweof;	/* file size after the operation */
	off_t neweob;	/* offset next to the last block after the operation */
	off_t pagestart; /* starting offset of range covered by pgs */
	off_t eob;	/* offset next to allocated blocks */
	struct uvm_object *uobj;
	int i, delta, error, npages;
	int bshift = vp->v_mount->mnt_fs_bshift;
	int bsize = 1 << bshift;
	int ppb = MAX(bsize >> PAGE_SHIFT, 1);
	struct vm_page **pgs;
	size_t pgssize;
	UVMHIST_FUNC("ufs_balloc_range"); UVMHIST_CALLED(ubchist);
	UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
		    vp, off, len, vp->v_size);

	neweof = MAX(vp->v_size, off + len);
	GOP_SIZE(vp, neweof, &neweob, 0);

	error = 0;
	uobj = &vp->v_uobj;

	/*
	 * read or create pages covering the range of the allocation and
	 * keep them locked until the new block is allocated, so there
	 * will be no window where the old contents of the new block are
	 * visible to racing threads.
	 */

	pagestart = trunc_page(off) & ~(bsize - 1);
	npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
	pgssize = npages * sizeof(struct vm_page *);
	pgs = kmem_zalloc(pgssize, KM_SLEEP);

	mutex_enter(&uobj->vmobjlock);
	error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
	    VM_PROT_WRITE, 0,
	    PGO_SYNCIO|PGO_PASTEOF|PGO_NOBLOCKALLOC|PGO_NOTIMESTAMP);
	if (error) {
		goto out;
	}
	mutex_enter(&uobj->vmobjlock);
	mutex_enter(&uvm_pageqlock);
	for (i = 0; i < npages; i++) {
		UVMHIST_LOG(ubchist, "got pgs[%d] %p", i, pgs[i],0,0);
		KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
		pgs[i]->flags &= ~PG_CLEAN;
		uvm_pageactivate(pgs[i]);
	}
	mutex_exit(&uvm_pageqlock);
	mutex_exit(&uobj->vmobjlock);

	/*
	 * adjust off to be block-aligned.
	 */

	delta = off & (bsize - 1);
	off -= delta;
	len += delta;

	/*
	 * now allocate the range.
	 */

	genfs_node_wrlock(vp);
	error = GOP_ALLOC(vp, off, len, flags, cred);
	genfs_node_unlock(vp);

	/*
	 * clear PG_RDONLY on any pages we are holding
	 * (since they now have backing store) and unbusy them.
	 */

	GOP_SIZE(vp, off + len, &eob, 0);
	mutex_enter(&uobj->vmobjlock);
	for (i = 0; i < npages; i++) {
		if (error) {
			pgs[i]->flags |= PG_RELEASED;
		} else if (off <= pagestart + (i << PAGE_SHIFT) &&
		    pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
			pgs[i]->flags &= ~PG_RDONLY;
		}
	}
	if (error) {
		mutex_enter(&uvm_pageqlock);
		uvm_page_unbusy(pgs, npages);
		mutex_exit(&uvm_pageqlock);
	} else {
		uvm_page_unbusy(pgs, npages);
	}
	mutex_exit(&uobj->vmobjlock);

 out:
 	kmem_free(pgs, pgssize);
	return error;
}
Exemple #10
0
int
ulfs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred,
    int flags)
{
	off_t neweof;	/* file size after the operation */
	off_t neweob;	/* offset next to the last block after the operation */
	off_t pagestart; /* starting offset of range covered by pgs */
	off_t eob;	/* offset next to allocated blocks */
	struct uvm_object *uobj;
	int i, delta, error, npages;
	int bshift = vp->v_mount->mnt_fs_bshift;
	int bsize = 1 << bshift;
	int ppb = MAX(bsize >> PAGE_SHIFT, 1);
	struct vm_page **pgs;
	size_t pgssize;
	UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist);
	UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
		    vp, off, len, vp->v_size);

	neweof = MAX(vp->v_size, off + len);
	GOP_SIZE(vp, neweof, &neweob, 0);

	error = 0;
	uobj = &vp->v_uobj;

	/*
	 * read or create pages covering the range of the allocation and
	 * keep them locked until the new block is allocated, so there
	 * will be no window where the old contents of the new block are
	 * visible to racing threads.
	 */

	pagestart = trunc_page(off) & ~(bsize - 1);
	npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
	pgssize = npages * sizeof(struct vm_page *);
	pgs = kmem_zalloc(pgssize, KM_SLEEP);

	/*
	 * adjust off to be block-aligned.
	 */

	delta = off & (bsize - 1);
	off -= delta;
	len += delta;

	genfs_node_wrlock(vp);
	mutex_enter(uobj->vmobjlock);
	error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
	    VM_PROT_WRITE, 0, PGO_SYNCIO | PGO_PASTEOF | PGO_NOBLOCKALLOC |
	    PGO_NOTIMESTAMP | PGO_GLOCKHELD);
	if (error) {
		goto out;
	}

	/*
	 * now allocate the range.
	 */

	error = GOP_ALLOC(vp, off, len, flags, cred);
	genfs_node_unlock(vp);

	/*
	 * if the allocation succeeded, clear PG_CLEAN on all the pages
	 * and clear PG_RDONLY on any pages that are now fully backed
	 * by disk blocks.  if the allocation failed, we do not invalidate
	 * the pages since they might have already existed and been dirty,
	 * in which case we need to keep them around.  if we created the pages,
	 * they will be clean and read-only, and leaving such pages
	 * in the cache won't cause any problems.
	 */

	GOP_SIZE(vp, off + len, &eob, 0);
	mutex_enter(uobj->vmobjlock);
	mutex_enter(&uvm_pageqlock);
	for (i = 0; i < npages; i++) {
		KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
		if (!error) {
			if (off <= pagestart + (i << PAGE_SHIFT) &&
			    pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
				pgs[i]->flags &= ~PG_RDONLY;
			}
			pgs[i]->flags &= ~PG_CLEAN;
		}
		uvm_pageactivate(pgs[i]);
	}
	mutex_exit(&uvm_pageqlock);
	uvm_page_unbusy(pgs, npages);
	mutex_exit(uobj->vmobjlock);

 out:
 	kmem_free(pgs, pgssize);
	return error;
}