Exemplo n.º 1
0
static int
ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
{
	struct vm_page *pg;

	KDASSERT(uobj == &uvm_loanzero_object);

	/*
	 * Don't need to do any work here if we're not freeing pages.
	 */

	if ((flags & PGO_FREE) == 0) {
		mutex_exit(&uobj->vmobjlock);
		return 0;
	}

	/*
	 * we don't actually want to ever free the uvm_loanzero_page, so
	 * just reactivate or dequeue it.
	 */

	pg = TAILQ_FIRST(&uobj->memq);
	KASSERT(pg != NULL);
	KASSERT(TAILQ_NEXT(pg, listq.queue) == NULL);

	mutex_enter(&uvm_pageqlock);
	if (pg->uanon)
		uvm_pageactivate(pg);
	else
		uvm_pagedequeue(pg);
	mutex_exit(&uvm_pageqlock);

	mutex_exit(&uobj->vmobjlock);
	return 0;
}
Exemplo n.º 2
0
int
uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
{
	struct vm_page *pg;

	KASSERT(mutex_owned(anon->an_lock));
	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));

	/* get new un-owned replacement page */
	pg = uvm_pagealloc(NULL, 0, NULL, 0);
	if (pg == NULL) {
		return ENOMEM;
	}

	/* copy old -> new */
	uvm_pagecopy(anon->an_page, pg);

	/* force reload */
	pmap_page_protect(anon->an_page, VM_PROT_NONE);
	mutex_enter(&uvm_pageqlock);	  /* KILL loan */

	anon->an_page->uanon = NULL;
	/* in case we owned */
	anon->an_page->pqflags &= ~PQ_ANON;

	if (uobj) {
		/* if we were receiver of loan */
		anon->an_page->loan_count--;
	} else {
		/*
		 * we were the lender (A->K); need to remove the page from
		 * pageq's.
		 */
		uvm_pagedequeue(anon->an_page);
	}

	if (uobj) {
		mutex_exit(uobj->vmobjlock);
	}

	/* install new page in anon */
	anon->an_page = pg;
	pg->uanon = anon;
	pg->pqflags |= PQ_ANON;

	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);

	pg->flags &= ~(PG_BUSY|PG_FAKE);
	UVM_PAGE_OWN(pg, NULL);

	/* done! */

	return 0;
}
Exemplo n.º 3
0
/*
 * uvm_loanbreak: break loan on a uobj page
 *
 * => called with uobj locked
 * => the page should be busy
 * => return value:
 *	newly allocated page if succeeded
 */
struct vm_page *
uvm_loanbreak(struct vm_page *uobjpage)
{
	struct vm_page *pg;
#ifdef DIAGNOSTIC
	struct uvm_object *uobj = uobjpage->uobject;
#endif

	KASSERT(uobj != NULL);
	KASSERT(mutex_owned(&uobj->vmobjlock));
	KASSERT(uobjpage->flags & PG_BUSY);

	/* alloc new un-owned page */
	pg = uvm_pagealloc(NULL, 0, NULL, 0);
	if (pg == NULL)
		return NULL;

	/*
	 * copy the data from the old page to the new
	 * one and clear the fake flags on the new page (keep it busy).
	 * force a reload of the old page by clearing it from all
	 * pmaps.
	 * transfer dirtiness of the old page to the new page.
	 * then lock the page queues to rename the pages.
	 */

	uvm_pagecopy(uobjpage, pg);	/* old -> new */
	pg->flags &= ~PG_FAKE;
	pmap_page_protect(uobjpage, VM_PROT_NONE);
	if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) {
		pmap_clear_modify(pg);
		pg->flags |= PG_CLEAN;
	} else {
		/* uvm_pagecopy marked it dirty */
		KASSERT((pg->flags & PG_CLEAN) == 0);
		/* a object with a dirty page should be dirty. */
		KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
	}
	if (uobjpage->flags & PG_WANTED)
		wakeup(uobjpage);
	/* uobj still locked */
	uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(uobjpage, NULL);

	mutex_enter(&uvm_pageqlock);

	/*
	 * replace uobjpage with new page.
	 */

	uvm_pagereplace(uobjpage, pg);

	/*
	 * if the page is no longer referenced by
	 * an anon (i.e. we are breaking an O->K
	 * loan), then remove it from any pageq's.
	 */
	if (uobjpage->uanon == NULL)
		uvm_pagedequeue(uobjpage);

	/*
	 * at this point we have absolutely no
	 * control over uobjpage
	 */

	/* install new page */
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);

	/*
	 * done!  loan is broken and "pg" is
	 * PG_BUSY.   it can now replace uobjpage.
	 */

	return pg;
}