static struct nfs_page *nfs_find_and_lock_request(struct page *page) { struct inode *inode = page->mapping->host; struct nfs_page *req; int ret; spin_lock(&inode->i_lock); for (;;) { req = nfs_page_find_request_locked(page); if (req == NULL) break; if (nfs_set_page_tag_locked(req)) break; /* Note: If we hold the page lock, as is the case in nfs_writepage, * then the call to nfs_set_page_tag_locked() will always * succeed provided that someone hasn't already marked the * request as dirty (in which case we don't care). */ spin_unlock(&inode->i_lock); ret = nfs_wait_on_request(req); nfs_release_request(req); if (ret != 0) return ERR_PTR(ret); spin_lock(&inode->i_lock); } spin_unlock(&inode->i_lock); return req; }
/** * nfs_scan_list - Scan a list for matching requests * @nfsi: NFS inode * @dst: Destination list * @idx_start: lower bound of page->index to scan * @npages: idx_start + npages sets the upper bound to scan. * @tag: tag to scan for * * Moves elements from one of the inode request lists. * If the number of requests is set to 0, the entire address_space * starting at index idx_start, is scanned. * The requests are *not* checked to ensure that they form a contiguous set. * You must be holding the inode's i_lock when calling this function */ int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst, pgoff_t idx_start, unsigned int npages, int tag) { struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; struct nfs_page *req; pgoff_t idx_end; int found, i; int res; struct list_head *list; res = 0; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; for (;;) { found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES, tag); if (found <= 0) break; for (i = 0; i < found; i++) { req = pgvec[i]; if (req->wb_index > idx_end) goto out; idx_start = req->wb_index + 1; if (nfs_set_page_tag_locked(req)) { kref_get(&req->wb_kref); radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, tag); list = pnfs_choose_commit_list(req, dst); nfs_list_add_request(req, list); res++; if (res == INT_MAX) goto out; } } /* for latency reduction */ cond_resched_lock(&nfsi->vfs_inode.i_lock); } out: return res; }
/* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). */ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct page *page) { struct inode *inode = page->mapping->host; struct nfs_page *req; int ret; spin_lock(&inode->i_lock); for(;;) { req = nfs_page_find_request_locked(page); if (req == NULL) { spin_unlock(&inode->i_lock); return 0; } if (nfs_set_page_tag_locked(req)) break; /* Note: If we hold the page lock, as is the case in nfs_writepage, * then the call to nfs_set_page_tag_locked() will always * succeed provided that someone hasn't already marked the * request as dirty (in which case we don't care). */ spin_unlock(&inode->i_lock); ret = nfs_wait_on_request(req); nfs_release_request(req); if (ret != 0) return ret; spin_lock(&inode->i_lock); } if (test_bit(PG_CLEAN, &req->wb_flags)) { spin_unlock(&inode->i_lock); BUG(); } if (nfs_set_page_writeback(page) != 0) { spin_unlock(&inode->i_lock); BUG(); } spin_unlock(&inode->i_lock); if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); return pgio->pg_error; } return 0; }
/* * Search for an existing write request, and attempt to update * it to reflect a new dirty region on a given page. * * If the attempt fails, then the existing request is flushed out * to disk. */ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, struct page *page, unsigned int offset, unsigned int bytes) { struct nfs_page *req; unsigned int rqend; unsigned int end; int error; if (!PagePrivate(page)) return NULL; end = offset + bytes; spin_lock(&inode->i_lock); for (;;) { req = nfs_page_find_request_locked(page); if (req == NULL) goto out_unlock; rqend = req->wb_offset + req->wb_bytes; /* * Tell the caller to flush out the request if * the offsets are non-contiguous. * Note: nfs_flush_incompatible() will already * have flushed out requests having wrong owners. */ if (offset > rqend || end < req->wb_offset) goto out_flushme; if (nfs_set_page_tag_locked(req)) break; /* The request is locked, so wait and then retry */ spin_unlock(&inode->i_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error != 0) goto out_err; spin_lock(&inode->i_lock); } if (nfs_clear_request_commit(req)) radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_COMMIT); /* Okay, the request matches. Update the region */ if (offset < req->wb_offset) { req->wb_offset = offset; req->wb_pgbase = offset; } if (end > rqend) req->wb_bytes = end - req->wb_offset; else req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); return req; out_flushme: spin_unlock(&inode->i_lock); nfs_release_request(req); error = nfs_wb_page(inode, page); out_err: return ERR_PTR(error); }