int memory_object_control_uiomove( memory_object_control_t control, memory_object_offset_t offset, void * uio, int start_offset, int io_requested, int mark_dirty, int take_reference) { vm_object_t object; vm_page_t dst_page; int xsize; int retval = 0; int cur_run; int cur_needed; int i; int orig_offset; boolean_t make_lru = FALSE; vm_page_t page_run[MAX_RUN]; object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { return (0); } assert(!object->internal); vm_object_lock(object); if (mark_dirty && object->copy != VM_OBJECT_NULL) { /* * We can't modify the pages without honoring * copy-on-write obligations first, so fall off * this optimized path and fall back to the regular * path. */ vm_object_unlock(object); return 0; } orig_offset = start_offset; while (io_requested && retval == 0) { cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE; if (cur_needed > MAX_RUN) cur_needed = MAX_RUN; for (cur_run = 0; cur_run < cur_needed; ) { if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) break; /* * Sync up on getting the busy bit */ if ((dst_page->busy || dst_page->cleaning)) { /* * someone else is playing with the page... if we've * already collected pages into this run, go ahead * and process now, we can't block on this * page while holding other pages in the BUSY state * otherwise we will wait */ if (cur_run) break; PAGE_SLEEP(object, dst_page, THREAD_UNINT); continue; } /* * this routine is only called when copying * to/from real files... no need to consider * encrypted swap pages */ assert(!dst_page->encrypted); if (mark_dirty) { dst_page->dirty = TRUE; if (dst_page->cs_validated) { /* * CODE SIGNING: * We're modifying a code-signed * page: assume that it is now tainted. */ dst_page->cs_tainted = TRUE; vm_cs_tainted_forces++; } } dst_page->busy = TRUE; page_run[cur_run++] = dst_page; offset += PAGE_SIZE_64; } if (cur_run == 0) /* * we hit a 'hole' in the cache * we bail at this point * we'll unlock the object below */ break; vm_object_unlock(object); for (i = 0; i < cur_run; i++) { dst_page = page_run[i]; if ((xsize = PAGE_SIZE - start_offset) > io_requested) xsize = io_requested; if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) ) break; io_requested -= xsize; start_offset = 0; } vm_object_lock(object); /* * if we have more than 1 page to work on * in the current run, or the original request * started at offset 0 of the page, or we're * processing multiple batches, we will move * the pages to the tail of the inactive queue * to implement an LRU for read/write accesses * * the check for orig_offset == 0 is there to * mitigate the cost of small (< page_size) requests * to the same page (this way we only move it once) */ if (take_reference && (cur_run > 1 || orig_offset == 0)) { vm_page_lockspin_queues(); make_lru = TRUE; } for (i = 0; i < cur_run; i++) { dst_page = page_run[i]; /* * someone is explicitly referencing this page... * update clustered and speculative state * */ VM_PAGE_CONSUME_CLUSTERED(dst_page); if (make_lru == TRUE) vm_page_lru(dst_page); PAGE_WAKEUP_DONE(dst_page); } if (make_lru == TRUE) { vm_page_unlock_queues(); make_lru = FALSE; } orig_offset = 0; } vm_object_unlock(object); return (retval); }
int memory_object_control_uiomove( memory_object_control_t control, memory_object_offset_t offset, void * uio, int start_offset, int io_requested, int mark_dirty, int take_reference) { vm_object_t object; vm_page_t dst_page; int xsize; int retval = 0; int cur_run; int cur_needed; int i; int orig_offset; vm_page_t page_run[MAX_RUN]; object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { return (0); } assert(!object->internal); vm_object_lock(object); if (mark_dirty && object->copy != VM_OBJECT_NULL) { /* * We can't modify the pages without honoring * copy-on-write obligations first, so fall off * this optimized path and fall back to the regular * path. */ vm_object_unlock(object); return 0; } orig_offset = start_offset; while (io_requested && retval == 0) { cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE; if (cur_needed > MAX_RUN) cur_needed = MAX_RUN; for (cur_run = 0; cur_run < cur_needed; ) { if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) break; /* * if we're in this routine, we are inside a filesystem's * locking model, so we don't ever want to wait for pages that have * list_req_pending == TRUE since it means that the * page is a candidate for some type of I/O operation, * but that it has not yet been gathered into a UPL... * this implies that it is still outside the domain * of the filesystem and that whoever is responsible for * grabbing it into a UPL may be stuck behind the filesystem * lock this thread owns, or trying to take a lock exclusively * and waiting for the readers to drain from a rw lock... * if we block in those cases, we will deadlock */ if (dst_page->list_req_pending) { if (dst_page->absent) { /* * this is the list_req_pending | absent | busy case * which originates from vm_fault_page... we want * to fall out of the fast path and go back * to the caller which will gather this page * into a UPL and issue the I/O if no one * else beats us to it */ break; } if (dst_page->pageout || dst_page->cleaning) { /* * this is the list_req_pending | pageout | busy case * or the list_req_pending | cleaning case... * which originate from the pageout_scan and * msync worlds for the pageout case and the hibernate * pre-cleaning world for the cleaning case... * we need to reset the state of this page to indicate * it should stay in the cache marked dirty... nothing else we * can do at this point... we can't block on it, we can't busy * it and we can't clean it from this routine. */ vm_page_lockspin_queues(); vm_pageout_queue_steal(dst_page, TRUE); vm_page_deactivate(dst_page); vm_page_unlock_queues(); } /* * this is the list_req_pending | cleaning case... * we can go ahead and deal with this page since * its ok for us to mark this page busy... if a UPL * tries to gather this page, it will block until the * busy is cleared, thus allowing us safe use of the page * when we're done with it, we will clear busy and wake * up anyone waiting on it, thus allowing the UPL creation * to finish */ } else if (dst_page->busy || dst_page->cleaning) { /* * someone else is playing with the page... if we've * already collected pages into this run, go ahead * and process now, we can't block on this * page while holding other pages in the BUSY state * otherwise we will wait */ if (cur_run) break; PAGE_SLEEP(object, dst_page, THREAD_UNINT); continue; } /* * this routine is only called when copying * to/from real files... no need to consider * encrypted swap pages */ assert(!dst_page->encrypted); if (mark_dirty) { dst_page->dirty = TRUE; if (dst_page->cs_validated && !dst_page->cs_tainted) { /* * CODE SIGNING: * We're modifying a code-signed * page: force revalidate */ dst_page->cs_validated = FALSE; #if DEVELOPMENT || DEBUG vm_cs_validated_resets++; #endif pmap_disconnect(dst_page->phys_page); } } dst_page->busy = TRUE; page_run[cur_run++] = dst_page; offset += PAGE_SIZE_64; } if (cur_run == 0) /* * we hit a 'hole' in the cache or * a page we don't want to try to handle, * so bail at this point * we'll unlock the object below */ break; vm_object_unlock(object); for (i = 0; i < cur_run; i++) { dst_page = page_run[i]; if ((xsize = PAGE_SIZE - start_offset) > io_requested) xsize = io_requested; if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) ) break; io_requested -= xsize; start_offset = 0; } vm_object_lock(object); /* * if we have more than 1 page to work on * in the current run, or the original request * started at offset 0 of the page, or we're * processing multiple batches, we will move * the pages to the tail of the inactive queue * to implement an LRU for read/write accesses * * the check for orig_offset == 0 is there to * mitigate the cost of small (< page_size) requests * to the same page (this way we only move it once) */ if (take_reference && (cur_run > 1 || orig_offset == 0)) { vm_page_lockspin_queues(); for (i = 0; i < cur_run; i++) vm_page_lru(page_run[i]); vm_page_unlock_queues(); } for (i = 0; i < cur_run; i++) { dst_page = page_run[i]; /* * someone is explicitly referencing this page... * update clustered and speculative state * */ VM_PAGE_CONSUME_CLUSTERED(dst_page); PAGE_WAKEUP_DONE(dst_page); } orig_offset = 0; } vm_object_unlock(object); return (retval); }