/* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct page *page) { struct inode *inode = page->mapping->host; struct nfs_page *req; int ret; spin_lock(&inode->i_lock); for(;;) { req = nfs_page_find_request_locked(page); if (req == NULL) { spin_unlock(&inode->i_lock); return 0; } if (nfs_set_page_tag_locked(req)) break; /* Note: If we hold the page lock, as is the case in nfs_writepage, * then the call to nfs_set_page_tag_locked() will always * succeed provided that someone hasn't already marked the * request as dirty (in which case we don't care). */ spin_unlock(&inode->i_lock); ret = nfs_wait_on_request(req); nfs_release_request(req); if (ret != 0) return ret; spin_lock(&inode->i_lock); } if (test_bit(PG_CLEAN, &req->wb_flags)) { spin_unlock(&inode->i_lock); BUG(); } if (nfs_set_page_writeback(page) != 0) { spin_unlock(&inode->i_lock); BUG(); } spin_unlock(&inode->i_lock); if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); return pgio->pg_error; } return 0; }
/* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct page *page) { struct nfs_page *req; int ret = 0; req = nfs_find_and_lock_request(page); if (!req) goto out; ret = PTR_ERR(req); if (IS_ERR(req)) goto out; ret = nfs_set_page_writeback(page); BUG_ON(ret != 0); BUG_ON(test_bit(PG_CLEAN, &req->wb_flags)); if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); ret = pgio->pg_error; } out: return ret; }