static int uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio) { vm_page_t m; vm_pindex_t idx; size_t tlen; int error, offset, rv; idx = OFF_TO_IDX(uio->uio_offset); offset = uio->uio_offset & PAGE_MASK; tlen = MIN(PAGE_SIZE - offset, len); VM_OBJECT_WLOCK(obj); /* * Parallel reads of the page content from disk are prevented * by exclusive busy. * * Although the tmpfs vnode lock is held here, it is * nonetheless safe to sleep waiting for a free page. The * pageout daemon does not need to acquire the tmpfs vnode * lock to page out tobj's pages because tobj is a OBJT_SWAP * type object. */ m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL); if (m->valid != VM_PAGE_BITS_ALL) { if (vm_pager_has_page(obj, idx, NULL, NULL)) { rv = vm_pager_get_pages(obj, &m, 1, 0); m = vm_page_lookup(obj, idx); if (m == NULL) { printf( "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n", obj, idx, rv); VM_OBJECT_WUNLOCK(obj); return (EIO); } if (rv != VM_PAGER_OK) { printf( "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n", obj, idx, m->valid, rv); vm_page_lock(m); vm_page_free(m); vm_page_unlock(m); VM_OBJECT_WUNLOCK(obj); return (EIO); } } else vm_page_zero_invalid(m, TRUE); } vm_page_xunbusy(m); vm_page_lock(m); vm_page_hold(m); vm_page_unlock(m); VM_OBJECT_WUNLOCK(obj); error = uiomove_fromphys(&m, offset, tlen, uio); if (uio->uio_rw == UIO_WRITE && error == 0) { VM_OBJECT_WLOCK(obj); vm_page_dirty(m); VM_OBJECT_WUNLOCK(obj); } vm_page_lock(m); vm_page_unhold(m); if (m->queue == PQ_NONE) { vm_page_deactivate(m); } else { /* Requeue to maintain LRU ordering. */ vm_page_requeue(m); } vm_page_unlock(m); return (error); }
static status_t cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer, size_t* _size, bool doWrite) { if (_cacheRef == NULL) panic("cache_io() called with NULL ref!\n"); file_cache_ref* ref = (file_cache_ref*)_cacheRef; VMCache* cache = ref->cache; off_t fileSize = cache->virtual_end; bool useBuffer = buffer != 0; TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n", ref, offset, (void*)buffer, *_size, doWrite ? "write" : "read")); // out of bounds access? if (offset >= fileSize || offset < 0) { *_size = 0; return B_OK; } int32 pageOffset = offset & (B_PAGE_SIZE - 1); size_t size = *_size; offset -= pageOffset; if ((off_t)(offset + pageOffset + size) > fileSize) { // adapt size to be within the file's offsets size = fileSize - pageOffset - offset; *_size = size; } if (size == 0) return B_OK; // "offset" and "lastOffset" are always aligned to B_PAGE_SIZE, // the "last*" variables always point to the end of the last // satisfied request part const uint32 kMaxChunkSize = MAX_IO_VECS * B_PAGE_SIZE; size_t bytesLeft = size, lastLeft = size; int32 lastPageOffset = pageOffset; addr_t lastBuffer = buffer; off_t lastOffset = offset; size_t lastReservedPages = min_c(MAX_IO_VECS, (pageOffset + bytesLeft + B_PAGE_SIZE - 1) >> PAGE_SHIFT); size_t reservePages = 0; size_t pagesProcessed = 0; cache_func function = NULL; vm_page_reservation reservation; reserve_pages(ref, &reservation, lastReservedPages, doWrite); AutoLocker<VMCache> locker(cache); while (bytesLeft > 0) { // Periodically reevaluate the low memory situation and select the // read/write hook accordingly if (pagesProcessed % 32 == 0) { if (size >= BYPASS_IO_SIZE && low_resource_state(B_KERNEL_RESOURCE_PAGES) != B_NO_LOW_RESOURCE) { // In low memory situations we bypass the cache beyond a // certain I/O size. function = doWrite ? write_to_file : read_from_file; } else function = doWrite ? write_to_cache : read_into_cache; } // check if this page is already in memory vm_page* page = cache->LookupPage(offset); if (page != NULL) { // The page may be busy - since we need to unlock the cache sometime // in the near future, we need to satisfy the request of the pages // we didn't get yet (to make sure no one else interferes in the // meantime). status_t status = satisfy_cache_io(ref, cookie, function, offset, buffer, useBuffer, pageOffset, bytesLeft, reservePages, lastOffset, lastBuffer, lastPageOffset, lastLeft, lastReservedPages, &reservation); if (status != B_OK) return status; // Since satisfy_cache_io() unlocks the cache, we need to look up // the page again. page = cache->LookupPage(offset); if (page != NULL && page->busy) { cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); continue; } } size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft); TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset " "= %lu\n", offset, page, bytesLeft, pageOffset)); if (page != NULL) { if (doWrite || useBuffer) { // Since the following user_mem{cpy,set}() might cause a page // fault, which in turn might cause pages to be reserved, we // need to unlock the cache temporarily to avoid a potential // deadlock. To make sure that our page doesn't go away, we mark // it busy for the time. page->busy = true; locker.Unlock(); // copy the contents of the page already in memory phys_addr_t pageAddress = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE + pageOffset; bool userBuffer = IS_USER_ADDRESS(buffer); if (doWrite) { if (useBuffer) { vm_memcpy_to_physical(pageAddress, (void*)buffer, bytesInPage, userBuffer); } else { vm_memset_physical(pageAddress, 0, bytesInPage); } } else if (useBuffer) { vm_memcpy_from_physical((void*)buffer, pageAddress, bytesInPage, userBuffer); } locker.Lock(); if (doWrite) { DEBUG_PAGE_ACCESS_START(page); page->modified = true; if (page->State() != PAGE_STATE_MODIFIED) vm_page_set_state(page, PAGE_STATE_MODIFIED); DEBUG_PAGE_ACCESS_END(page); } cache->MarkPageUnbusy(page); } // If it is cached only, requeue the page, so the respective queue // roughly remains LRU first sorted. if (page->State() == PAGE_STATE_CACHED || page->State() == PAGE_STATE_MODIFIED) { DEBUG_PAGE_ACCESS_START(page); vm_page_requeue(page, true); DEBUG_PAGE_ACCESS_END(page); } if (bytesLeft <= bytesInPage) { // we've read the last page, so we're done! locker.Unlock(); vm_page_unreserve_pages(&reservation); return B_OK; } // prepare a potential gap request lastBuffer = buffer + bytesInPage; lastLeft = bytesLeft - bytesInPage; lastOffset = offset + B_PAGE_SIZE; lastPageOffset = 0; } if (bytesLeft <= bytesInPage) break; buffer += bytesInPage; bytesLeft -= bytesInPage; pageOffset = 0; offset += B_PAGE_SIZE; pagesProcessed++; if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) { status_t status = satisfy_cache_io(ref, cookie, function, offset, buffer, useBuffer, pageOffset, bytesLeft, reservePages, lastOffset, lastBuffer, lastPageOffset, lastLeft, lastReservedPages, &reservation); if (status != B_OK) return status; } } // fill the last remaining bytes of the request (either write or read) return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer, lastLeft, useBuffer, &reservation, 0); }