extern "C" status_t file_cache_write(void* _cacheRef, void* cookie, off_t offset, const void* buffer, size_t* _size) { file_cache_ref* ref = (file_cache_ref*)_cacheRef; if (ref->disabled_count > 0) { // Caching is disabled -- write directly to the file. if (buffer != NULL) { generic_io_vec vec; vec.base = (addr_t)buffer; generic_size_t size = vec.length = *_size; status_t error = vfs_write_pages(ref->vnode, cookie, offset, &vec, 1, 0, &size); *_size = size; return error; } // NULL buffer -- use a dummy buffer to write zeroes size_t size = *_size; while (size > 0) { size_t toWrite = min_c(size, kZeroVecSize); generic_size_t written = toWrite; status_t error = vfs_write_pages(ref->vnode, cookie, offset, sZeroVecs, kZeroVecCount, B_PHYSICAL_IO_REQUEST, &written); if (error != B_OK) return error; if (written == 0) break; offset += written; size -= written; } *_size -= size; return B_OK; } status_t status = cache_io(ref, cookie, offset, (addr_t)const_cast<void*>(buffer), _size, true); TRACE(("file_cache_write(ref = %p, offset = %Ld, buffer = %p, size = %lu)" " = %ld\n", ref, offset, buffer, *_size, status)); return status; }
static status_t write_to_file(file_cache_ref* ref, void* cookie, off_t offset, int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer, vm_page_reservation* reservation, size_t reservePages) { push_access(ref, offset, bufferSize, true); ref->cache->Unlock(); vm_page_unreserve_pages(reservation); status_t status = B_OK; if (!useBuffer) { while (bufferSize > 0) { generic_size_t written = min_c(bufferSize, kZeroVecSize); status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset, sZeroVecs, kZeroVecCount, B_PHYSICAL_IO_REQUEST, &written); if (status != B_OK) return status; if (written == 0) return B_ERROR; bufferSize -= written; pageOffset += written; } } else { generic_io_vec vec; vec.base = buffer; vec.length = bufferSize; generic_size_t toWrite = bufferSize; status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset, &vec, 1, 0, &toWrite); } if (status == B_OK) reserve_pages(ref, reservation, reservePages, true); ref->cache->Lock(); return status; }
static fssh_status_t write_to_file(file_cache_ref *ref, void *cookie, fssh_off_t offset, int32_t pageOffset, fssh_addr_t buffer, fssh_size_t bufferSize) { fssh_iovec vec; vec.iov_base = (void *)buffer; vec.iov_len = bufferSize; fssh_mutex_unlock(&ref->lock); fssh_status_t status = vfs_write_pages(ref->node, cookie, offset + pageOffset, &vec, 1, &bufferSize); fssh_mutex_lock(&ref->lock); return status; }
/*! Like read_into_cache() but writes data into the cache. To preserve data consistency, it might also read pages into the cache, though, if only a partial page gets written. The same restrictions apply. */ static status_t write_to_cache(file_cache_ref* ref, void* cookie, off_t offset, int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer, vm_page_reservation* reservation, size_t reservePages) { // TODO: We're using way too much stack! Rather allocate a sufficiently // large chunk on the heap. generic_io_vec vecs[MAX_IO_VECS]; uint32 vecCount = 0; generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize); vm_page* pages[MAX_IO_VECS]; int32 pageIndex = 0; status_t status = B_OK; // ToDo: this should be settable somewhere bool writeThrough = false; // allocate pages for the cache and mark them busy for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) { // TODO: if space is becoming tight, and this cache is already grown // big - shouldn't we better steal the pages directly in that case? // (a working set like approach for the file cache) // TODO: the pages we allocate here should have been reserved upfront // in cache_io() vm_page* page = pages[pageIndex++] = vm_page_allocate_page( reservation, (writeThrough ? PAGE_STATE_CACHED : PAGE_STATE_MODIFIED) | VM_PAGE_ALLOC_BUSY); page->modified = !writeThrough; ref->cache->InsertPage(page, offset + pos); add_to_iovec(vecs, vecCount, MAX_IO_VECS, page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE); } push_access(ref, offset, bufferSize, true); ref->cache->Unlock(); vm_page_unreserve_pages(reservation); // copy contents (and read in partially written pages first) if (pageOffset != 0) { // This is only a partial write, so we have to read the rest of the page // from the file to have consistent data in the cache generic_io_vec readVec = { vecs[0].base, B_PAGE_SIZE }; generic_size_t bytesRead = B_PAGE_SIZE; status = vfs_read_pages(ref->vnode, cookie, offset, &readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead); // ToDo: handle errors for real! if (status < B_OK) panic("1. vfs_read_pages() failed: %s!\n", strerror(status)); } size_t lastPageOffset = (pageOffset + bufferSize) % B_PAGE_SIZE; if (lastPageOffset != 0) { // get the last page in the I/O vectors generic_addr_t last = vecs[vecCount - 1].base + vecs[vecCount - 1].length - B_PAGE_SIZE; if ((off_t)(offset + pageOffset + bufferSize) == ref->cache->virtual_end) { // the space in the page after this write action needs to be cleaned vm_memset_physical(last + lastPageOffset, 0, B_PAGE_SIZE - lastPageOffset); } else { // the end of this write does not happen on a page boundary, so we // need to fetch the last page before we can update it generic_io_vec readVec = { last, B_PAGE_SIZE }; generic_size_t bytesRead = B_PAGE_SIZE; status = vfs_read_pages(ref->vnode, cookie, PAGE_ALIGN(offset + pageOffset + bufferSize) - B_PAGE_SIZE, &readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead); // ToDo: handle errors for real! if (status < B_OK) panic("vfs_read_pages() failed: %s!\n", strerror(status)); if (bytesRead < B_PAGE_SIZE) { // the space beyond the file size needs to be cleaned vm_memset_physical(last + bytesRead, 0, B_PAGE_SIZE - bytesRead); } } } for (uint32 i = 0; i < vecCount; i++) { generic_addr_t base = vecs[i].base; generic_size_t bytes = min_c((generic_size_t)bufferSize, generic_size_t(vecs[i].length - pageOffset)); if (useBuffer) { // copy data from user buffer vm_memcpy_to_physical(base + pageOffset, (void*)buffer, bytes, IS_USER_ADDRESS(buffer)); } else { // clear buffer instead vm_memset_physical(base + pageOffset, 0, bytes); } bufferSize -= bytes; if (bufferSize == 0) break; buffer += bytes; pageOffset = 0; } if (writeThrough) { // write cached pages back to the file if we were asked to do that status_t status = vfs_write_pages(ref->vnode, cookie, offset, vecs, vecCount, B_PHYSICAL_IO_REQUEST, &numBytes); if (status < B_OK) { // ToDo: remove allocated pages, ...? panic("file_cache: remove allocated pages! write pages failed: %s\n", strerror(status)); } } if (status == B_OK) reserve_pages(ref, reservation, reservePages, true); ref->cache->Lock(); // make the pages accessible in the cache for (int32 i = pageIndex; i-- > 0;) { ref->cache->MarkPageUnbusy(pages[i]); DEBUG_PAGE_ACCESS_END(pages[i]); } return status; }