/** * nfs_wait_on_request - Wait for a request to complete. * @req: request to wait upon. * * Interruptible by signals only if mounted with intr flag. * The user is responsible for holding a count on the request. */ int nfs_wait_on_request(struct nfs_page *req) { struct inode *inode = req->wb_context->dentry->d_inode; struct rpc_clnt *clnt = NFS_CLIENT(inode); if (!NFS_WBACK_BUSY(req)) return 0; return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req)); }
/* * Wait for a request to complete. * * Interruptible by fatal signals only. */ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req; pgoff_t idx_end, next; unsigned int res = 0; int error; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; next = idx_start; while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { if (req->wb_index > idx_end) break; next = req->wb_index + 1; BUG_ON(!NFS_WBACK_BUSY(req)); kref_get(&req->wb_kref); spin_unlock(&inode->i_lock); error = nfs_wait_on_request(req); nfs_release_request(req); spin_lock(&inode->i_lock); if (error < 0) return error; res++; } return res; }
/* * Insert a write request into an inode */ static inline void nfs_inode_remove_request(struct nfs_page *req) { struct inode *inode; spin_lock(&nfs_wreq_lock); if (list_empty(&req->wb_hash)) { spin_unlock(&nfs_wreq_lock); return; } if (!NFS_WBACK_BUSY(req)) printk(KERN_ERR "NFS: unlocked request attempted unhashed!\n"); inode = req->wb_inode; list_del(&req->wb_hash); INIT_LIST_HEAD(&req->wb_hash); inode->u.nfs_i.npages--; if ((inode->u.nfs_i.npages == 0) != list_empty(&inode->u.nfs_i.writeback)) printk(KERN_ERR "NFS: desynchronized value of nfs_i.npages.\n"); if (list_empty(&inode->u.nfs_i.writeback)) { spin_unlock(&nfs_wreq_lock); iput(inode); } else spin_unlock(&nfs_wreq_lock); nfs_clear_request(req); nfs_release_request(req); }
/** * nfs_unlock_request - Unlock request and wake up sleepers. * @req: */ void nfs_unlock_request(struct nfs_page *req) { if (!NFS_WBACK_BUSY(req)) { printk(KERN_ERR "NFS: Invalid unlock attempted\n"); BUG(); } smp_mb__before_clear_bit(); clear_bit(PG_BUSY, &req->wb_flags); smp_mb__after_clear_bit(); wake_up_bit(&req->wb_flags, PG_BUSY); }
/** * nfs_release_request - Release the count on an NFS read/write request * @req: request to release * * Note: Should never be called with the spinlock held! */ void nfs_release_request(struct nfs_page *req) { if (!atomic_dec_and_test(&req->wb_count)) return; #ifdef NFS_PARANOIA BUG_ON (!list_empty(&req->wb_list)); BUG_ON (NFS_WBACK_BUSY(req)); #endif /* Release struct file or cached credential */ nfs_clear_request(req); put_nfs_open_context(req->wb_context); nfs_page_free(req); }
/* * Remove a write request from an inode */ static void nfs_inode_remove_request(struct nfs_page *req) { struct inode *inode = req->wb_context->path.dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); BUG_ON (!NFS_WBACK_BUSY(req)); spin_lock(&inode->i_lock); set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); nfsi->npages--; if (!nfsi->npages) { spin_unlock(&inode->i_lock); iput(inode); } else spin_unlock(&inode->i_lock); nfs_clear_request(req); nfs_release_request(req); }
/* * Wait for a request to complete. * * Interruptible by signals only if mounted with intr flag. */ static int nfs_wait_on_requests(struct inode *inode, struct file *file, unsigned long idx_start, unsigned int npages) { struct list_head *p, *head; unsigned long idx_end; unsigned int res = 0; int error; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; head = &inode->u.nfs_i.writeback; restart: spin_lock(&nfs_wreq_lock); list_for_each_prev(p, head) { unsigned long pg_idx; struct nfs_page *req = nfs_inode_wb_entry(p); if (file && req->wb_file != file) continue; pg_idx = page_index(req->wb_page); if (pg_idx < idx_start) break; if (pg_idx > idx_end) continue; if (!NFS_WBACK_BUSY(req)) continue; req->wb_count++; spin_unlock(&nfs_wreq_lock); error = nfs_wait_on_request(req); nfs_release_request(req); if (error < 0) return error; res++; goto restart; }
/* * Insert a write request into an inode * Note: we sort the list in order to be able to optimize nfs_find_request() * & co. for the 'write append' case. For 2.5 we may want to consider * some form of hashing so as to perform well on random writes. */ static inline void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct list_head *pos, *head; unsigned long pg_idx = page_index(req->wb_page); if (!list_empty(&req->wb_hash)) return; if (!NFS_WBACK_BUSY(req)) printk(KERN_ERR "NFS: unlocked request attempted hashed!\n"); head = &inode->u.nfs_i.writeback; if (list_empty(head)) igrab(inode); list_for_each_prev(pos, head) { struct nfs_page *entry = nfs_inode_wb_entry(pos); if (page_index(entry->wb_page) < pg_idx) break; } inode->u.nfs_i.npages++; list_add(&req->wb_hash, pos); req->wb_count++; }