int nfs_flush_incompatible(struct file *file, struct page *page) { struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_page *req; int do_flush, status; /* * Look for a request corresponding to this page. If there * is one, and it belongs to another file, we flush it out * before we try to copy anything into the page. Do this * due to the lack of an ACCESS-type call in NFSv2. * Also do the same if we find a request from an existing * dropped page. */ do { req = nfs_page_find_request(page); if (req == NULL) return 0; do_flush = req->wb_page != page || req->wb_context != ctx; nfs_release_request(req); if (!do_flush) return 0; status = nfs_wb_page(page->mapping->host, page); } while (status == 0); return status; }
static int nfs_launder_page(struct page *page) { struct inode *inode = page->mapping->host; dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", inode->i_ino, (long long)page_offset(page)); return nfs_wb_page(inode, page); }
static int nfs_release_page(struct page *page, gfp_t gfp) { if (gfp & __GFP_FS) return !nfs_wb_page(page->mapping->host, page); else /* * Avoid deadlock on nfs_wait_on_request(). */ return 0; }
/* * Attempt to clear the private state associated with a page when an error * occurs that requires the cached contents of an inode to be written back or * destroyed * - Called if either PG_private or fscache is set on the page * - Caller holds page lock * - Return 0 if successful, -error otherwise */ static int nfs_launder_page(struct page *page) { struct inode *inode = page->mapping->host; struct nfs_inode *nfsi = NFS_I(inode); dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", inode->i_ino, (long long)page_offset(page)); nfs_fscache_wait_on_page_write(nfsi, page); return nfs_wb_page(inode, page); }
/* * Attempt to release the private state associated with a page * - Called if either PG_private or PG_fscache is set on the page * - Caller holds page lock * - Return true (may release page) or false (may not) */ static int nfs_release_page(struct page *page, gfp_t gfp) { dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); if (gfp & __GFP_WAIT) nfs_wb_page(page->mapping->host, page); /* If PagePrivate() is set, then the page is not freeable */ if (PagePrivate(page)) return 0; return nfs_fscache_release_page(page, gfp); }
/* * Write an mmapped page to the server. */ int nfs_writepage(struct page *page) { struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int inode_referenced = 0; int err; /* * Note: We need to ensure that we have a reference to the inode * if we are to do asynchronous writes. If not, waiting * in nfs_wait_on_request() may deadlock with clear_inode(). * * If igrab() fails here, then it is in any case safe to * call nfs_wb_page(), since there will be no pending writes. */ if (igrab(inode) != 0) inode_referenced = 1; end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* Ensure we've flushed out any previous writes */ nfs_wb_page(inode,page); /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ err = -EIO; if (page->index >= end_index+1 || !offset) goto out; do_it: lock_kernel(); if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode) && inode_referenced) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) err = 0; } unlock_kernel(); out: UnlockPage(page); if (inode_referenced) iput(inode); return err; }
/* * Write an mmapped page to the server. */ int nfs_writepage(struct page *page) { struct inode *inode; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int err; struct address_space *mapping = page->mapping; if (!mapping) BUG(); inode = mapping->host; if (!inode) BUG(); end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* Ensure we've flushed out any previous writes */ nfs_wb_page(inode,page); /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ err = -EIO; if (page->index >= end_index+1 || !offset) goto out; do_it: lock_kernel(); if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode)) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) err = 0; } unlock_kernel(); out: UnlockPage(page); return err; }
/* * Read a page over NFS. * We read the page synchronously in the following cases: * - The file is a swap file. Swap-ins are always sync operations, * so there's no need bothering to make async reads 100% fail-safe. * - The NFS rsize is smaller than PAGE_SIZE. We could kludge our way * around this by creating several consecutive read requests, but * that's hardly worth it. * - The error flag is set for this page. This happens only when a * previous async read operation failed. * - The server is congested. */ int nfs_readpage(struct file *file, struct page *page) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; int error; dprintk("NFS: nfs_readpage (%p %ld@%ld)\n", page, PAGE_SIZE, page->offset); atomic_inc(&page->count); set_bit(PG_locked, &page->flags); /* * Try to flush any pending writes to the file.. * * NOTE! Because we own the page lock, there cannot * be any new pending writes generated at this point * for this page (other pages can be written to). */ error = nfs_wb_page(inode, page); if (error) goto out_error; error = -1; if (!IS_SWAPFILE(inode) && !PageError(page) && NFS_SERVER(inode)->rsize >= PAGE_SIZE) error = nfs_readpage_async(dentry, inode, page); if (error >= 0) goto out; error = nfs_readpage_sync(dentry, inode, page); if (error < 0 && IS_SWAPFILE(inode)) printk("Aiee.. nfs swap-in of page failed!\n"); goto out_free; out_error: clear_bit(PG_locked, &page->flags); out_free: free_page(page_address(page)); out: return error; }
/* * Search for an existing write request, and attempt to update * it to reflect a new dirty region on a given page. * * If the attempt fails, then the existing request is flushed out * to disk. */ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, struct page *page, unsigned int offset, unsigned int bytes) { struct nfs_page *req; unsigned int rqend; unsigned int end; int error; if (!PagePrivate(page)) return NULL; end = offset + bytes; spin_lock(&inode->i_lock); for (;;) { req = nfs_page_find_request_locked(page); if (req == NULL) goto out_unlock; rqend = req->wb_offset + req->wb_bytes; /* * Tell the caller to flush out the request if * the offsets are non-contiguous. * Note: nfs_flush_incompatible() will already * have flushed out requests having wrong owners. */ if (offset > rqend || end < req->wb_offset) goto out_flushme; if (nfs_set_page_tag_locked(req)) break; /* The request is locked, so wait and then retry */ spin_unlock(&inode->i_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error != 0) goto out_err; spin_lock(&inode->i_lock); } if (nfs_clear_request_commit(req)) radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_COMMIT); /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { req->wb_offset = offset; req->wb_pgbase = offset; } if (end > rqend) req->wb_bytes = end - req->wb_offset; else req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); return req; out_flushme: spin_unlock(&inode->i_lock); nfs_release_request(req); error = nfs_wb_page(inode, page); out_err: return ERR_PTR(error); }
static int nfs_launder_page(struct page *page) { return nfs_wb_page(page->mapping->host, page); }