static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) return; if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { task = rpc_wake_up_next(&xprt->sending); if (!task) goto out_unlock; } if (xprt->nocong || __xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } return; } out_unlock: smp_mb__before_clear_bit(); clear_bit(XPRT_LOCKED, &xprt->sockstate); smp_mb__after_clear_bit(); }
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) { struct rpc_task *task; if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) return; if (RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { task = rpc_wake_up_next(&xprt->sending); if (!task) goto out_unlock; } if (__xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } return; } out_unlock: xprt_clear_locked(xprt); }
/** * xprt_rdma_free_slot - release an rpc_rqst * @xprt: controlling RPC transport * @rqst: rpc_rqst to release * */ static void xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) { memset(rqst, 0, sizeof(*rqst)); rpcrdma_buffer_put(rpcr_to_rdmar(rqst)); rpc_wake_up_next(&xprt->backlog); }
static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (xprt->snd_task) return; task = rpc_wake_up_next(&xprt->resend); if (!task) { if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) return; task = rpc_wake_up_next(&xprt->sending); if (!task) return; } if (xprt->nocong || __xprt_get_cong(xprt, task)) xprt->snd_task = task; }
static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (xprt->snd_task) return; task = rpc_wake_up_next(&xprt->resend); if (!task) { if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) return; task = rpc_wake_up_next(&xprt->sending); if (!task) return; } if (xprt->nocong || __xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } } }
static void nfsd4_cb_done(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; dprintk("%s: minorversion=%d\n", __func__, clp->cl_cb_conn.cb_minorversion); if (clp->cl_cb_conn.cb_minorversion) { /* No need for lock, access serialized in nfsd4_cb_prepare */ ++clp->cl_cb_seq_nr; clear_bit(0, &clp->cl_cb_slot_busy); rpc_wake_up_next(&clp->cl_cb_waitq); dprintk("%s: freed slot, new seqid=%d\n", __func__, clp->cl_cb_seq_nr); /* We're done looking into the sequence information */ task->tk_msg.rpc_resp = NULL; } }
static void nfs4_end_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; int max_slots; if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) { spin_lock(&ses->fc_slot_table.slot_tbl_lock); max_slots = ses->fc_slot_table.max_slots; while (max_slots--) { struct rpc_task *task; task = rpc_wake_up_next(&ses->fc_slot_table. slot_tbl_waitq); if (!task) break; rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); } spin_unlock(&ses->fc_slot_table.slot_tbl_lock); } }