/* * amap_share_protect: change protection of anons in a shared amap * * for shared amaps, given the current data structure layout, it is * not possible for us to directly locate all maps referencing the * shared anon (to change the protection). in order to protect data * in shared maps we use pmap_page_protect(). [this is useful for IPC * mechanisms like map entry passing that may want to write-protect * all mappings of a shared amap.] we traverse am_anon or am_slots * depending on the current state of the amap. * * => entry's map and amap must be locked by the caller */ void amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot) { struct vm_amap *amap = entry->aref.ar_amap; int slots, lcv, slot, stop; KASSERT(mutex_owned(&amap->am_l)); AMAP_B2SLOT(slots, (entry->end - entry->start)); stop = entry->aref.ar_pageoff + slots; if (slots < amap->am_nused) { /* cheaper to traverse am_anon */ for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) { if (amap->am_anon[lcv] == NULL) continue; if (amap->am_anon[lcv]->an_page != NULL) pmap_page_protect(amap->am_anon[lcv]->an_page, prot); } return; } /* cheaper to traverse am_slots */ for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { slot = amap->am_slots[lcv]; if (slot < entry->aref.ar_pageoff || slot >= stop) continue; if (amap->am_anon[slot]->an_page != NULL) pmap_page_protect(amap->am_anon[slot]->an_page, prot); } }
static __inline void uvmfault_anonflush(struct vm_anon **anons, int n) { int lcv; struct vm_page *pg; for (lcv = 0 ; lcv < n ; lcv++) { if (anons[lcv] == NULL) continue; simple_lock(&anons[lcv]->an_lock); pg = anons[lcv]->an_page; if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) { #ifdef UBC pmap_clear_reference(pg); #else pmap_page_protect(pg, VM_PROT_NONE); #endif uvm_pagedeactivate(pg); } uvm_unlock_pageq(); } simple_unlock(&anons[lcv]->an_lock); } }
void i915_driver_lastclose(struct drm_device *dev) { struct inteldrm_softc *dev_priv = dev->dev_private; struct vm_page *p; int ret; if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fb_restore_mode(dev); return; } ret = i915_gem_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); if (dev_priv->agpdmat != NULL) { /* * make sure we nuke everything, we may have mappings that we've * unrefed, but uvm has a reference to them for maps. Make sure * they get unbound and any accesses will segfault. * XXX only do ones in GEM. */ for (p = dev_priv->pgs; p < dev_priv->pgs + (dev->agp->info.ai_aperture_size / PAGE_SIZE); p++) pmap_page_protect(p, VM_PROT_NONE); agp_bus_dma_destroy((struct agp_softc *)dev->agp->agpdev, dev_priv->agpdmat); } dev_priv->agpdmat = NULL; }
int uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj) { struct vm_page *pg; KASSERT(mutex_owned(anon->an_lock)); KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock)); /* get new un-owned replacement page */ pg = uvm_pagealloc(NULL, 0, NULL, 0); if (pg == NULL) { return ENOMEM; } /* copy old -> new */ uvm_pagecopy(anon->an_page, pg); /* force reload */ pmap_page_protect(anon->an_page, VM_PROT_NONE); mutex_enter(&uvm_pageqlock); /* KILL loan */ anon->an_page->uanon = NULL; /* in case we owned */ anon->an_page->pqflags &= ~PQ_ANON; if (uobj) { /* if we were receiver of loan */ anon->an_page->loan_count--; } else { /* * we were the lender (A->K); need to remove the page from * pageq's. */ uvm_pagedequeue(anon->an_page); } if (uobj) { mutex_exit(uobj->vmobjlock); } /* install new page in anon */ anon->an_page = pg; pg->uanon = anon; pg->pqflags |= PQ_ANON; uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); pg->flags &= ~(PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); /* done! */ return 0; }
/* XXX */ void cdev_pager_free_page(vm_object_t object, vm_page_t m) { if (object->type == OBJT_MGTDEVICE) { KKASSERT((m->flags & PG_FICTITIOUS) != 0); pmap_page_protect(m, VM_PROT_NONE); vm_page_remove(m); vm_page_wakeup(m); } else if (object->type == OBJT_DEVICE) { TAILQ_REMOVE(&object->un_pager.devp.devp_pglist, m, pageq); dev_pager_putfake(m); } }
int nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags) { int i; mutex_enter(vp->v_interlock); for (i = 0; i < npages; i++) { pmap_page_protect(pgs[i], VM_PROT_READ); } mutex_exit(vp->v_interlock); return genfs_gop_write(vp, pgs, npages, flags); }
/* * uvm_loanpage: loan out pages to kernel (->K) * * => pages should be object-owned and the object should be locked. * => in the case of error, the object might be unlocked and relocked. * => caller should busy the pages beforehand. * => pages will be unbusied. * => fail with EBUSY if meet a wired page. */ static int uvm_loanpage(struct vm_page **pgpp, int npages) { int i; int error = 0; UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); for (i = 0; i < npages; i++) { struct vm_page *pg = pgpp[i]; KASSERT(pg->uobject != NULL); KASSERT(pg->uobject == pgpp[0]->uobject); KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT))); KASSERT(mutex_owned(&pg->uobject->vmobjlock)); KASSERT(pg->flags & PG_BUSY); mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0); error = EBUSY; break; } if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } pg->loan_count++; uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); } uvm_page_unbusy(pgpp, npages); if (error) { /* * backout what we've done */ kmutex_t *slock = &pgpp[0]->uobject->vmobjlock; mutex_exit(slock); uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE); mutex_enter(slock); } UVMHIST_LOG(loanhist, "done %d", error,0,0,0); return error; }
static int uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va) { struct vm_amap *amap = ufi->entry->aref.ar_amap; struct uvm_object *uobj = ufi->entry->object.uvm_obj; struct vm_page *pg; struct vm_anon *anon; int error, npages; bool locked; UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); /* * first we must make sure the page is resident. * * XXXCDC: duplicate code with uvm_fault(). */ mutex_enter(&uobj->vmobjlock); if (uobj->pgops->pgo_get) { /* try locked pgo_get */ npages = 1; pg = NULL; error = (*uobj->pgops->pgo_get)(uobj, va - ufi->entry->start + ufi->entry->offset, &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED); } else { error = EIO; /* must have pgo_get op */ } /* * check the result of the locked pgo_get. if there is a problem, * then we fail the loan. */ if (error && error != EBUSY) { uvmfault_unlockall(ufi, amap, uobj, NULL); return (-1); } /* * if we need to unlock for I/O, do so now. */ if (error == EBUSY) { uvmfault_unlockall(ufi, amap, NULL, NULL); /* locked: uobj */ npages = 1; error = (*uobj->pgops->pgo_get)(uobj, va - ufi->entry->start + ufi->entry->offset, &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO); /* locked: <nothing> */ if (error) { if (error == EAGAIN) { tsleep(&lbolt, PVM, "fltagain2", 0); return (0); } return (-1); } /* * pgo_get was a success. attempt to relock everything. */ locked = uvmfault_relock(ufi); if (locked && amap) amap_lock(amap); uobj = pg->uobject; mutex_enter(&uobj->vmobjlock); /* * verify that the page has not be released and re-verify * that amap slot is still free. if there is a problem we * drop our lock (thus force a lookup refresh/retry). */ if ((pg->flags & PG_RELEASED) != 0 || (locked && amap && amap_lookup(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start))) { if (locked) uvmfault_unlockall(ufi, amap, NULL, NULL); locked = false; } /* * didn't get the lock? release the page and retry. */ if (locked == false) { if (pg->flags & PG_WANTED) { wakeup(pg); } if (pg->flags & PG_RELEASED) { mutex_enter(&uvm_pageqlock); uvm_pagefree(pg); mutex_exit(&uvm_pageqlock); mutex_exit(&uobj->vmobjlock); return (0); } mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); pg->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(pg, NULL); mutex_exit(&uobj->vmobjlock); return (0); } } KASSERT(uobj == pg->uobject); /* * at this point we have the page we want ("pg") marked PG_BUSY for us * and we have all data structures locked. do the loanout. page can * not be PG_RELEASED (we caught this above). */ if ((flags & UVM_LOAN_TOANON) == 0) { if (uvm_loanpage(&pg, 1)) { uvmfault_unlockall(ufi, amap, uobj, NULL); return (-1); } mutex_exit(&uobj->vmobjlock); **output = pg; (*output)++; return (1); } /* * must be a loan to an anon. check to see if there is already * an anon associated with this page. if so, then just return * a reference to this object. the page should already be * mapped read-only because it is already on loan. */ if (pg->uanon) { anon = pg->uanon; mutex_enter(&anon->an_lock); anon->an_ref++; mutex_exit(&anon->an_lock); if (pg->flags & PG_WANTED) { wakeup(pg); } pg->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); mutex_exit(&uobj->vmobjlock); **output = anon; (*output)++; return (1); } /* * need to allocate a new anon */ anon = uvm_analloc(); if (anon == NULL) { goto fail; } anon->an_page = pg; pg->uanon = anon; mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0); pg->uanon = NULL; anon->an_page = NULL; anon->an_ref--; mutex_exit(&anon->an_lock); uvm_anfree(anon); goto fail; } if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } pg->loan_count++; uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); if (pg->flags & PG_WANTED) { wakeup(pg); } pg->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); mutex_exit(&uobj->vmobjlock); mutex_exit(&anon->an_lock); **output = anon; (*output)++; return (1); fail: UVMHIST_LOG(loanhist, "fail", 0,0,0,0); /* * unlock everything and bail out. */ if (pg->flags & PG_WANTED) { wakeup(pg); } pg->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); uvmfault_unlockall(ufi, amap, uobj, NULL); return (-1); }
int uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags, struct vm_anon *anon) { struct vm_page *pg; int error; UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); /* * if we are loaning to "another" anon then it is easy, we just * bump the reference count on the current anon and return a * pointer to it (it becomes copy-on-write shared). */ if (flags & UVM_LOAN_TOANON) { mutex_enter(&anon->an_lock); pg = anon->an_page; if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) { if (pg->wire_count > 0) { UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, ufi->entry->object.uvm_obj, anon); return (-1); } pmap_page_protect(pg, VM_PROT_READ); } anon->an_ref++; **output = anon; (*output)++; mutex_exit(&anon->an_lock); UVMHIST_LOG(loanhist, "->A done", 0,0,0,0); return (1); } /* * we are loaning to a kernel-page. we need to get the page * resident so we can wire it. uvmfault_anonget will handle * this for us. */ mutex_enter(&anon->an_lock); error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon); /* * if we were unable to get the anon, then uvmfault_anonget has * unlocked everything and returned an error code. */ if (error) { UVMHIST_LOG(loanhist, "error %d", error,0,0,0); /* need to refault (i.e. refresh our lookup) ? */ if (error == ERESTART) { return (0); } /* "try again"? sleep a bit and retry ... */ if (error == EAGAIN) { tsleep(&lbolt, PVM, "loanagain", 0); return (0); } /* otherwise flag it as an error */ return (-1); } /* * we have the page and its owner locked: do the loan now. */ pg = anon->an_page; mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0); KASSERT(pg->uobject == NULL); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, anon); return (-1); } if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } pg->loan_count++; uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); **output = pg; (*output)++; /* unlock anon and return success */ if (pg->uobject) mutex_exit(&pg->uobject->vmobjlock); mutex_exit(&anon->an_lock); UVMHIST_LOG(loanhist, "->K done", 0,0,0,0); return (1); }
/* * uvm_loanbreak: break loan on a uobj page * * => called with uobj locked * => the page should be busy * => return value: * newly allocated page if succeeded */ struct vm_page * uvm_loanbreak(struct vm_page *uobjpage) { struct vm_page *pg; #ifdef DIAGNOSTIC struct uvm_object *uobj = uobjpage->uobject; #endif KASSERT(uobj != NULL); KASSERT(mutex_owned(&uobj->vmobjlock)); KASSERT(uobjpage->flags & PG_BUSY); /* alloc new un-owned page */ pg = uvm_pagealloc(NULL, 0, NULL, 0); if (pg == NULL) return NULL; /* * copy the data from the old page to the new * one and clear the fake flags on the new page (keep it busy). * force a reload of the old page by clearing it from all * pmaps. * transfer dirtiness of the old page to the new page. * then lock the page queues to rename the pages. */ uvm_pagecopy(uobjpage, pg); /* old -> new */ pg->flags &= ~PG_FAKE; pmap_page_protect(uobjpage, VM_PROT_NONE); if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) { pmap_clear_modify(pg); pg->flags |= PG_CLEAN; } else { /* uvm_pagecopy marked it dirty */ KASSERT((pg->flags & PG_CLEAN) == 0); /* a object with a dirty page should be dirty. */ KASSERT(!UVM_OBJ_IS_CLEAN(uobj)); } if (uobjpage->flags & PG_WANTED) wakeup(uobjpage); /* uobj still locked */ uobjpage->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(uobjpage, NULL); mutex_enter(&uvm_pageqlock); /* * replace uobjpage with new page. */ uvm_pagereplace(uobjpage, pg); /* * if the page is no longer referenced by * an anon (i.e. we are breaking an O->K * loan), then remove it from any pageq's. */ if (uobjpage->uanon == NULL) uvm_pagedequeue(uobjpage); /* * at this point we have absolutely no * control over uobjpage */ /* install new page */ uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); /* * done! loan is broken and "pg" is * PG_BUSY. it can now replace uobjpage. */ return pg; }
int uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap, struct vm_anon *anon) { boolean_t we_own; /* we own anon's page? */ boolean_t locked; /* did we relock? */ struct vm_page *pg; int result; result = 0; /* XXX shut up gcc */ uvmexp.fltanget++; /* bump rusage counters */ if (anon->an_page) curproc->p_ru.ru_minflt++; else curproc->p_ru.ru_majflt++; /* * loop until we get it, or fail. */ while (1) { we_own = FALSE; /* TRUE if we set PG_BUSY on a page */ pg = anon->an_page; /* * if there is a resident page and it is loaned, then anon * may not own it. call out to uvm_anon_lockpage() to ensure * the real owner of the page has been identified and locked. */ if (pg && pg->loan_count) pg = uvm_anon_lockloanpg(anon); /* * page there? make sure it is not busy/released. */ if (pg) { /* * at this point, if the page has a uobject [meaning * we have it on loan], then that uobject is locked * by us! if the page is busy, we drop all the * locks (including uobject) and try again. */ if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0) { return (VM_PAGER_OK); } atomic_setbits_int(&pg->pg_flags, PG_WANTED); uvmexp.fltpgwait++; /* * the last unlock must be an atomic unlock+wait on * the owner of page */ if (pg->uobject) { /* owner is uobject ? */ uvmfault_unlockall(ufi, amap, NULL, anon); UVM_UNLOCK_AND_WAIT(pg, &pg->uobject->vmobjlock, FALSE, "anonget1",0); } else { /* anon owns page */ uvmfault_unlockall(ufi, amap, NULL, NULL); UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0, "anonget2",0); } /* ready to relock and try again */ } else { /* * no page, we must try and bring it in. */ pg = uvm_pagealloc(NULL, 0, anon, 0); if (pg == NULL) { /* out of RAM. */ uvmfault_unlockall(ufi, amap, NULL, anon); uvmexp.fltnoram++; uvm_wait("flt_noram1"); /* ready to relock and try again */ } else { /* we set the PG_BUSY bit */ we_own = TRUE; uvmfault_unlockall(ufi, amap, NULL, anon); /* * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN * page into the uvm_swap_get function with * all data structures unlocked. note that * it is ok to read an_swslot here because * we hold PG_BUSY on the page. */ uvmexp.pageins++; result = uvm_swap_get(pg, anon->an_swslot, PGO_SYNCIO); /* * we clean up after the i/o below in the * "we_own" case */ /* ready to relock and try again */ } } /* * now relock and try again */ locked = uvmfault_relock(ufi); if (locked || we_own) simple_lock(&anon->an_lock); /* * if we own the page (i.e. we set PG_BUSY), then we need * to clean up after the I/O. there are three cases to * consider: * [1] page released during I/O: free anon and ReFault. * [2] I/O not OK. free the page and cause the fault * to fail. * [3] I/O OK! activate the page and sync with the * non-we_own case (i.e. drop anon lock if not locked). */ if (we_own) { if (pg->pg_flags & PG_WANTED) { /* still holding object lock */ wakeup(pg); } /* un-busy! */ atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); /* * if we were RELEASED during I/O, then our anon is * no longer part of an amap. we need to free the * anon and try again. */ if (pg->pg_flags & PG_RELEASED) { pmap_page_protect(pg, VM_PROT_NONE); simple_unlock(&anon->an_lock); uvm_anfree(anon); /* frees page for us */ if (locked) uvmfault_unlockall(ufi, amap, NULL, NULL); uvmexp.fltpgrele++; return (VM_PAGER_REFAULT); /* refault! */ } if (result != VM_PAGER_OK) { KASSERT(result != VM_PAGER_PEND); /* remove page from anon */ anon->an_page = NULL; /* * remove the swap slot from the anon * and mark the anon as having no real slot. * don't free the swap slot, thus preventing * it from being used again. */ uvm_swap_markbad(anon->an_swslot, 1); anon->an_swslot = SWSLOT_BAD; /* * note: page was never !PG_BUSY, so it * can't be mapped and thus no need to * pmap_page_protect it... */ uvm_lock_pageq(); uvm_pagefree(pg); uvm_unlock_pageq(); if (locked) uvmfault_unlockall(ufi, amap, NULL, anon); else simple_unlock(&anon->an_lock); return (VM_PAGER_ERROR); } /* * must be OK, clear modify (already PG_CLEAN) * and activate */ pmap_clear_modify(pg); uvm_lock_pageq(); uvm_pageactivate(pg); uvm_unlock_pageq(); if (!locked) simple_unlock(&anon->an_lock); } /* * we were not able to relock. restart fault. */ if (!locked) return (VM_PAGER_REFAULT); /* * verify no one has touched the amap and moved the anon on us. */ if (ufi != NULL && amap_lookup(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start) != anon) { uvmfault_unlockall(ufi, amap, NULL, anon); return (VM_PAGER_REFAULT); } /* * try it again! */ uvmexp.fltanretry++; continue; } /* while (1) */ /*NOTREACHED*/ }
/* * vm_pageout_scan does the dirty work for the pageout daemon. */ void vm_pageout_scan() { register vm_page_t m, next; register int page_shortage; register int s; register int pages_freed; int free; vm_object_t object; /* * Only continue when we want more pages to be "free" */ cnt.v_rev++; s = splimp(); simple_lock(&vm_page_queue_free_lock); free = cnt.v_free_count; simple_unlock(&vm_page_queue_free_lock); splx(s); if (free < cnt.v_free_target) { swapout_threads(); /* * Be sure the pmap system is updated so * we can scan the inactive queue. */ pmap_update(); } /* * Acquire the resident page system lock, * as we may be changing what's resident quite a bit. */ vm_page_lock_queues(); /* * Start scanning the inactive queue for pages we can free. * We keep scanning until we have enough free pages or * we have scanned through the entire queue. If we * encounter dirty pages, we start cleaning them. */ pages_freed = 0; for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) { s = splimp(); simple_lock(&vm_page_queue_free_lock); free = cnt.v_free_count; simple_unlock(&vm_page_queue_free_lock); splx(s); if (free >= cnt.v_free_target) break; cnt.v_scan++; next = m->pageq.tqe_next; /* * If the page has been referenced, move it back to the * active queue. */ if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { vm_page_activate(m); cnt.v_reactivated++; continue; } /* * If the page is clean, free it up. */ if (m->flags & PG_CLEAN) { object = m->object; if (vm_object_lock_try(object)) { pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); vm_page_free(m); pages_freed++; cnt.v_dfree++; vm_object_unlock(object); } continue; } /* * If the page is dirty but already being washed, skip it. */ if ((m->flags & PG_LAUNDRY) == 0) continue; /* * Otherwise the page is dirty and still in the laundry, * so we start the cleaning operation and remove it from * the laundry. */ object = m->object; if (!vm_object_lock_try(object)) continue; cnt.v_pageouts++; #ifdef CLUSTERED_PAGEOUT if (object->pager && vm_pager_cancluster(object->pager, PG_CLUSTERPUT)) vm_pageout_cluster(m, object); else #endif vm_pageout_page(m, object); thread_wakeup((int) object); vm_object_unlock(object); /* * Former next page may no longer even be on the inactive * queue (due to potential blocking in the pager with the * queues unlocked). If it isn't, we just start over. */ if (next && (next->flags & PG_INACTIVE) == 0) next = vm_page_queue_inactive.tqh_first; } /* * Compute the page shortage. If we are still very low on memory * be sure that we will move a minimal amount of pages from active * to inactive. */ page_shortage = cnt.v_inactive_target - cnt.v_inactive_count; if (page_shortage <= 0 && pages_freed == 0) page_shortage = 1; while (page_shortage > 0) { /* * Move some more pages from active to inactive. */ if ((m = vm_page_queue_active.tqh_first) == NULL) break; vm_page_deactivate(m); page_shortage--; } vm_page_unlock_queues(); }
unsigned int pmap_disconnect(ppnum_t pa) { pmap_page_protect(pa, 0); return 0; }