/* * munlockall(void) * * Unwire all user-wired map entries, cancel MCL_FUTURE. * * No requirements */ int sys_munlockall(struct munlockall_args *uap) { struct thread *td = curthread; struct proc *p = td->td_proc; vm_map_t map = &p->p_vmspace->vm_map; vm_map_entry_t entry; int rc = KERN_SUCCESS; vm_map_lock(map); /* Clear MAP_WIREFUTURE to cancel mlockall(MCL_FUTURE) */ map->flags &= ~MAP_WIREFUTURE; retry: for (entry = map->header.next; entry != &map->header; entry = entry->next) { if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) continue; /* * If we encounter an in-transition entry, we release the * map lock and retry the scan; we do not decrement any * wired_count more than once because we do not touch * any entries with MAP_ENTRY_USER_WIRED not set. * * There is a potential interleaving with concurrent * mlockall()s here -- if we abort a scan, an mlockall() * could start, wire a number of entries before our * current position in, and then stall itself on this * or any other in-transition entry. If that occurs, when * we resume, we will unwire those entries. */ if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; ++mycpu->gd_cnt.v_intrans_coll; ++mycpu->gd_cnt.v_intrans_wait; vm_map_transition_wait(map); goto retry; } KASSERT(entry->wired_count > 0, ("wired_count was 0 with USER_WIRED set! %p", entry)); /* Drop wired count, if it hits zero, unwire the entry */ entry->eflags &= ~MAP_ENTRY_USER_WIRED; entry->wired_count--; if (entry->wired_count == 0) vm_fault_unwire(map, entry); } map->timestamp++; vm_map_unlock(map); return (rc); }
/* * vm_fault_wire: * * Wire down a range of virtual addresses in a map. */ int vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t fictitious) { vm_offset_t va; int rv; /* * We simulate a fault to get the page and enter it in the physical * map. For user wiring, we only ask for read access on currently * read-only sections. */ for (va = start; va < end; va += PAGE_SIZE) { rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING); if (rv) { if (va != start) vm_fault_unwire(map, start, va, fictitious); return (rv); } } return (KERN_SUCCESS); }