static __inline void uvmfault_anonflush(struct vm_anon **anons, int n) { int lcv; struct vm_page *pg; for (lcv = 0 ; lcv < n ; lcv++) { if (anons[lcv] == NULL) continue; simple_lock(&anons[lcv]->an_lock); pg = anons[lcv]->an_page; if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) { #ifdef UBC pmap_clear_reference(pg); #else pmap_page_protect(pg, VM_PROT_NONE); #endif uvm_pagedeactivate(pg); } uvm_unlock_pageq(); } simple_unlock(&anons[lcv]->an_lock); } }
/* * uvmfault_anonflush: try and deactivate pages in specified anons * * => does not have to deactivate page if it is busy */ static __inline void uvmfault_anonflush(struct vm_anon **anons, int n) { int lcv; struct vm_page *pg; for (lcv = 0 ; lcv < n ; lcv++) { if (anons[lcv] == NULL) continue; mtx_enter(&anons[lcv]->an_lock); pg = anons[lcv]->an_page; if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) { uvm_lock_pageq(); if (pg->wire_count == 0) uvm_pagedeactivate(pg); uvm_unlock_pageq(); } mtx_leave(&anons[lcv]->an_lock); } }