/** * nfs_scan_list - Scan a list for matching requests * @head: One of the NFS inode request lists * @dst: Destination list * @idx_start: lower bound of page->index to scan * @npages: idx_start + npages sets the upper bound to scan. * * Moves elements from one of the inode request lists. * If the number of requests is set to 0, the entire address_space * starting at index idx_start, is scanned. * The requests are *not* checked to ensure that they form a contiguous set. * You must be holding the inode's req_lock when calling this function */ int nfs_scan_list(struct list_head *head, struct list_head *dst, unsigned long idx_start, unsigned int npages) { struct list_head *pos, *tmp; struct nfs_page *req; unsigned long idx_end; int res; res = 0; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; list_for_each_safe(pos, tmp, head) { req = nfs_list_entry(pos); if (req->wb_index < idx_start) continue; if (req->wb_index > idx_end) break; if (!nfs_lock_request(req)) continue; nfs_list_remove_request(req); nfs_list_add_request(req, dst); res++; }
/** * nfs_set_page_writeback_locked - Lock a request for writeback * @req: */ int nfs_set_page_writeback_locked(struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); if (!nfs_lock_request(req)) return 0; radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); return 1; }
/** * nfs_set_page_tag_locked - Tag a request as locked * @req: */ static int nfs_set_page_tag_locked(struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); if (!nfs_lock_request(req)) return 0; radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; }
/** * nfs_pageio_add_request - Attempt to coalesce a request into a page list. * @desc: destination io descriptor * @req: request * * This may split a request into subrequests which are all part of the * same page group. * * Returns true if the request 'req' was successfully coalesced into the * existing list of pages 'desc'. */ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { struct nfs_page *subreq; unsigned int bytes_left = 0; unsigned int offset, pgbase; nfs_page_group_lock(req, false); subreq = req; bytes_left = subreq->wb_bytes; offset = subreq->wb_offset; pgbase = subreq->wb_pgbase; do { if (!nfs_pageio_do_add_request(desc, subreq)) { /* make sure pg_test call(s) did nothing */ WARN_ON_ONCE(subreq->wb_bytes != bytes_left); WARN_ON_ONCE(subreq->wb_offset != offset); WARN_ON_ONCE(subreq->wb_pgbase != pgbase); nfs_page_group_unlock(req); desc->pg_moreio = 1; nfs_pageio_doio(desc); if (desc->pg_error < 0) return 0; if (desc->pg_recoalesce) return 0; /* retry add_request for this subreq */ nfs_page_group_lock(req, false); continue; } /* check for buggy pg_test call(s) */ WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE); WARN_ON_ONCE(subreq->wb_bytes > bytes_left); WARN_ON_ONCE(subreq->wb_bytes == 0); bytes_left -= subreq->wb_bytes; offset += subreq->wb_bytes; pgbase += subreq->wb_bytes; if (bytes_left) { subreq = nfs_create_request(req->wb_context, req->wb_page, subreq, pgbase, bytes_left); if (IS_ERR(subreq)) goto err_ptr; nfs_lock_request(subreq); subreq->wb_offset = offset; subreq->wb_index = req->wb_index; } } while (bytes_left > 0); nfs_page_group_unlock(req); return 1; err_ptr: desc->pg_error = PTR_ERR(subreq); nfs_page_group_unlock(req); return 0; }