/* * wait for a page to complete writing to the cache */ void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) { const struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); if (PageFsCache(page)) fscache_wait_on_page_write(v9inode->fscache, page); }
/* * invalidate part or all of a page * - release a page and clean up its private data if offset is 0 (indicating * the entire page) */ static void afs_invalidatepage(struct page *page, unsigned long offset) { struct afs_writeback *wb = (struct afs_writeback *) page_private(page); _enter("{%lu},%lu", page->index, offset); BUG_ON(!PageLocked(page)); /* we clean up only if the entire page is being invalidated */ if (offset == 0) { #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); fscache_wait_on_page_write(vnode->cache, page); fscache_uncache_page(vnode->cache, page); ClearPageFsCache(page); } #endif if (PagePrivate(page)) { if (wb && !PageWriteback(page)) { set_page_private(page, 0); afs_put_writeback(wb); } if (!page_private(page)) ClearPagePrivate(page); } } _leave(""); }
void __v9fs_fscache_invalidate_page(struct page *page) { struct inode *inode = page->mapping->host; struct v9fs_inode *v9inode = V9FS_I(inode); BUG_ON(!v9inode->fscache); if (PageFsCache(page)) { fscache_wait_on_page_write(v9inode->fscache, page); BUG_ON(!PageLocked(page)); fscache_uncache_page(v9inode->fscache, page); } }
int cifs_fscache_release_page(struct page *page, gfp_t gfp) { if (PageFsCache(page)) { struct inode *inode = page->mapping->host; struct cifsInodeInfo *cifsi = CIFS_I(inode); cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", page, cifsi->fscache); if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) return 0; } return 1; }
/* * Release the caching state associated with a page, if the page isn't busy * interacting with the cache. * - Returns true (can release page) or false (page busy). */ int nfs_fscache_release_page(struct page *page, gfp_t gfp) { if (PageFsCache(page)) { struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); BUG_ON(!cookie); dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", cookie, page, NFS_I(page->mapping->host)); if (!fscache_maybe_release_page(cookie, page, gfp)) return 0; nfs_inc_fscache_stats(page->mapping->host, NFSIOS_FSCACHE_PAGES_UNCACHED); } return 1; }
/* * release a page and clean up its private state if it's not busy * - return true if the page can now be released, false if not */ static int afs_releasepage(struct page *page, gfp_t gfp_flags) { struct afs_writeback *wb = (struct afs_writeback *) page_private(page); struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); _enter("{{%x:%u}[%lu],%lx},%x", vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, gfp_flags); /* deny if page is being written to the cache and the caller hasn't * elected to wait */ #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { if (fscache_check_page_write(vnode->cache, page)) { if (!(gfp_flags & __GFP_WAIT)) { _leave(" = F [cache busy]"); return 0; } fscache_wait_on_page_write(vnode->cache, page); } fscache_uncache_page(vnode->cache, page); ClearPageFsCache(page); } #endif if (PagePrivate(page)) { if (wb) { set_page_private(page, 0); afs_put_writeback(wb); } ClearPagePrivate(page); } /* indicate that the page can be released */ _leave(" = T"); return 1; }
/* * AFS read page from file, directory or symlink */ static int afs_readpage(struct file *file, struct page *page) { struct afs_vnode *vnode; struct inode *inode; struct key *key; size_t len; off_t offset; int ret; inode = page->mapping->host; if (file) { key = file->private_data; ASSERT(key != NULL); } else { key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error_nokey; } } _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); vnode = AFS_FS_I(inode); BUG_ON(!PageLocked(page)); ret = -ESTALE; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto error; /* is it cached? */ #ifdef CONFIG_AFS_FSCACHE ret = fscache_read_or_alloc_page(vnode->cache, page, afs_file_readpage_read_complete, NULL, GFP_KERNEL); #else ret = -ENOBUFS; #endif switch (ret) { /* read BIO submitted (page in cache) */ case 0: break; /* page not yet cached */ case -ENODATA: _debug("cache said ENODATA"); goto go_on; /* page will not be cached */ case -ENOBUFS: _debug("cache said ENOBUFS"); default: go_on: offset = page->index << PAGE_CACHE_SHIFT; len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); /* read the contents of the file from the server into the * page */ ret = afs_vnode_fetch_data(vnode, key, offset, len, page); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" " - marking file deleted and stale"); set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } #ifdef CONFIG_AFS_FSCACHE fscache_uncache_page(vnode->cache, page); #endif BUG_ON(PageFsCache(page)); goto error; } SetPageUptodate(page); /* send the page to the cache */ #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page) && fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { fscache_uncache_page(vnode->cache, page); BUG_ON(PageFsCache(page)); } #endif unlock_page(page); } if (!file) key_put(key); _leave(" = 0"); return 0; error: SetPageError(page); unlock_page(page); if (!file) key_put(key); error_nokey: _leave(" = %d", ret); return ret; }
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ static int nfs_write_mapping(struct address_space *mapping, int how) { struct writeback_control wbc = { .bdi = mapping->backing_dev_info, .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_start = 0, .range_end = LLONG_MAX, }; return __nfs_write_mapping(mapping, &wbc, how); } /* * flush the inode to disk. */ int nfs_wb_all(struct inode *inode) { return nfs_write_mapping(inode->i_mapping, 0); } int nfs_wb_nocommit(struct inode *inode) { return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT); } int nfs_wb_page_cancel(struct inode *inode, struct page *page) { struct nfs_page *req; loff_t range_start = page_offset(page); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); struct writeback_control wbc = { .bdi = page->mapping->backing_dev_info, .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_start = range_start, .range_end = range_end, }; int ret = 0; BUG_ON(!PageLocked(page)); for (;;) { req = nfs_page_find_request(page); if (req == NULL) goto out; if (test_bit(PG_CLEAN, &req->wb_flags)) { nfs_release_request(req); break; } if (nfs_lock_request_dontget(req)) { nfs_inode_remove_request(req); /* * In case nfs_inode_remove_request has marked the * page as being dirty */ cancel_dirty_page(page, PAGE_CACHE_SIZE); nfs_unlock_request(req); break; } ret = nfs_wait_on_request(req); if (ret < 0) goto out; } if (!PagePrivate(page)) return 0; ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE); out: return ret; } static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) { loff_t range_start = page_offset(page); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); struct writeback_control wbc = { .bdi = page->mapping->backing_dev_info, .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .range_start = range_start, .range_end = range_end, }; int ret; do { if (clear_page_dirty_for_io(page)) { ret = nfs_writepage_locked(page, &wbc); if (ret < 0) goto out_error; } else if (!PagePrivate(page)) break; ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); if (ret < 0) goto out_error; } while (PagePrivate(page)); return 0; out_error: __mark_inode_dirty(inode, I_DIRTY_PAGES); return ret; } /* * Write back all requests on one page - we do this before reading it. */ int nfs_wb_page(struct inode *inode, struct page* page) { return nfs_wb_page_priority(inode, page, FLUSH_STABLE); } #ifdef CONFIG_MIGRATION int nfs_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page) { struct nfs_page *req; int ret; if (PageFsCache(page)) nfs_fscache_release_page(page, GFP_KERNEL); req = nfs_find_and_lock_request(page); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; ret = migrate_page(mapping, newpage, page); if (!req) goto out; if (ret) goto out_unlock; page_cache_get(newpage); req->wb_page = newpage; SetPagePrivate(newpage); set_page_private(newpage, page_private(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); out_unlock: nfs_clear_page_tag_locked(req); nfs_release_request(req); out: return ret; } #endif int __init nfs_init_writepagecache(void) { nfs_wdata_cachep = kmem_cache_create("nfs_write_data", sizeof(struct nfs_write_data), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_wdata_cachep == NULL) return -ENOMEM; nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, nfs_wdata_cachep); if (nfs_wdata_mempool == NULL) return -ENOMEM; nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, nfs_wdata_cachep); if (nfs_commit_mempool == NULL) return -ENOMEM; /* * NFS congestion size, scale with available memory. * * 64MB: 8192k * 128MB: 11585k * 256MB: 16384k * 512MB: 23170k * 1GB: 32768k * 2GB: 46340k * 4GB: 65536k * 8GB: 92681k * 16GB: 131072k * * This allows larger machines to have larger/more transfers. * Limit the default to 256M */ nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); if (nfs_congestion_kb > 256*1024) nfs_congestion_kb = 256*1024; return 0; } void nfs_destroy_writepagecache(void) { mempool_destroy(nfs_commit_mempool); mempool_destroy(nfs_wdata_mempool); kmem_cache_destroy(nfs_wdata_cachep); }