static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) return; if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { task = rpc_wake_up_next(&xprt->sending); if (!task) goto out_unlock; } if (xprt->nocong || __xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } return; } out_unlock: smp_mb__before_clear_bit(); clear_bit(XPRT_LOCKED, &xprt->sockstate); smp_mb__after_clear_bit(); }
/* * Van Jacobson congestion avoidance. Check if the congestion window * overflowed. Put the task to sleep if this is the case. */ static int __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; if (req->rq_cong) return 1; dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n", task->tk_pid, xprt->cong, xprt->cwnd); if (RPCXPRT_CONGESTED(xprt)) return 0; req->rq_cong = 1; xprt->cong += RPC_CWNDSCALE; return 1; }
static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (xprt->snd_task) return; task = rpc_wake_up_next(&xprt->resend); if (!task) { if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) return; task = rpc_wake_up_next(&xprt->sending); if (!task) return; } if (xprt->nocong || __xprt_get_cong(xprt, task)) xprt->snd_task = task; }
static void __xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; if (xprt->snd_task) return; task = rpc_wake_up_next(&xprt->resend); if (!task) { if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) return; task = rpc_wake_up_next(&xprt->sending); if (!task) return; } if (xprt->nocong || __xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; req->rq_ntrans++; } } }