void sdp_do_posts(struct sdp_sock *ssk) { struct socket *sk = ssk->socket; int xmit_poll_force; struct mbuf *mb; SDP_WLOCK_ASSERT(ssk); if (!ssk->qp_active) { sdp_dbg(sk, "QP is deactivated\n"); return; } while ((mb = ssk->rx_ctl_q)) { ssk->rx_ctl_q = mb->m_nextpkt; mb->m_nextpkt = NULL; sdp_process_rx_ctl_mb(ssk, mb); } if (ssk->state == TCPS_TIME_WAIT) return; if (!ssk->rx_ring.cq || !ssk->tx_ring.cq) return; sdp_post_recvs(ssk); if (tx_ring_posted(ssk)) sdp_xmit_poll(ssk, 1); sdp_post_sends(ssk, M_NOWAIT); xmit_poll_force = tx_credits(ssk) < SDP_MIN_TX_CREDITS; if (credit_update_needed(ssk) || xmit_poll_force) { /* if has pending tx because run out of tx_credits - xmit it */ sdp_prf(sk, NULL, "Processing to free pending sends"); sdp_xmit_poll(ssk, xmit_poll_force); sdp_prf(sk, NULL, "Sending credit update"); sdp_post_sends(ssk, M_NOWAIT); } }
int sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp) { /* TODO: nonagle? */ struct sk_buff *skb; int post_count = 0; struct sock *sk = sk_ssk(ssk); if (unlikely(!ssk->id)) { if (sk->sk_send_head) { sdp_dbg(sk, "Send on socket without cmid ECONNRESET\n"); /* TODO: flush send queue? */ sdp_reset(sk); } return -ECONNRESET; } again: if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2) sdp_xmit_poll(ssk, 1); /* Run out of credits, check if got a credit update */ if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) { sdp_poll_rx_cq(ssk); if (unlikely(sdp_should_rearm(sk) || !posts_handler(ssk))) sdp_arm_rx_cq(sk); } if (unlikely((ssk->sa_post_rdma_rd_compl || ssk->sa_post_sendsm) && tx_credits(ssk) < SDP_MIN_TX_CREDITS)) { sdp_dbg_data(sk, "Run out of credits, can't abort SrcAvail. " "RdmaRdCompl: %d SendSm: %d\n", ssk->sa_post_rdma_rd_compl, ssk->sa_post_sendsm); } if (ssk->sa_post_rdma_rd_compl && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) { int unreported = ssk->sa_post_rdma_rd_compl; skb = sdp_alloc_skb_rdmardcompl(sk, unreported, gfp); if (!skb) goto no_mem; sdp_post_send(ssk, skb); post_count++; ssk->sa_post_rdma_rd_compl = 0; } if (ssk->sa_post_sendsm && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) { skb = sdp_alloc_skb_sendsm(sk, gfp); if (unlikely(!skb)) goto no_mem; sdp_post_send(ssk, skb); ssk->sa_post_sendsm = 0; post_count++; } if (ssk->recv_request && ring_tail(ssk->rx_ring) >= SDP_MIN_TX_CREDITS && tx_credits(ssk) >= SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk)) { skb = sdp_alloc_skb_chrcvbuf_ack(sk, ssk->recv_frags * PAGE_SIZE, gfp); if (!skb) goto no_mem; ssk->recv_request = 0; sdp_post_send(ssk, skb); post_count++; } if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk) && sk->sk_send_head && sdp_nagle_off(ssk, sk->sk_send_head)) { SDPSTATS_COUNTER_INC(send_miss_no_credits); } while (tx_credits(ssk) > SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk) && (skb = sk->sk_send_head) && sdp_nagle_off(ssk, skb)) { update_send_head(sk, skb); __skb_dequeue(&sk->sk_write_queue); sdp_post_send(ssk, skb); post_count++; } if (credit_update_needed(ssk) && likely((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) { skb = sdp_alloc_skb_data(sk, 0, gfp); if (!skb) goto no_mem; sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); sdp_post_send(ssk, skb); SDPSTATS_COUNTER_INC(post_send_credits); post_count++; } /* send DisConn if needed * Do not send DisConn if there is only 1 credit. Compliance with CA4-82 * If one credit is available, an implementation shall only send SDP * messages that provide additional credits and also do not contain ULP * payload. */ if (unlikely(ssk->sdp_disconnect) && !sk->sk_send_head && tx_credits(ssk) >= SDP_MIN_TX_CREDITS) { skb = sdp_alloc_skb_disconnect(sk, gfp); if (!skb) goto no_mem; ssk->sdp_disconnect = 0; sdp_post_send(ssk, skb); post_count++; } if (!sdp_tx_ring_slots_left(ssk) || post_count) { if (sdp_xmit_poll(ssk, 1)) goto again; } no_mem: return post_count; }
void sdp_post_sends(struct sdp_sock *ssk, int wait) { struct mbuf *mb; int post_count = 0; struct socket *sk; int low; sk = ssk->socket; if (unlikely(!ssk->id)) { if (sk->so_snd.sb_sndptr) { sdp_dbg(ssk->socket, "Send on socket without cmid ECONNRESET.\n"); sdp_notify(ssk, ECONNRESET); } return; } again: if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2) sdp_xmit_poll(ssk, 1); if (ssk->recv_request && ring_tail(ssk->rx_ring) >= ssk->recv_request_head && tx_credits(ssk) >= SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk)) { mb = sdp_alloc_mb_chrcvbuf_ack(sk, ssk->recv_bytes - SDP_HEAD_SIZE, wait); if (mb == NULL) goto allocfail; ssk->recv_request = 0; sdp_post_send(ssk, mb); post_count++; } if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk) && sk->so_snd.sb_sndptr && sdp_nagle_off(ssk, sk->so_snd.sb_sndptr)) { SDPSTATS_COUNTER_INC(send_miss_no_credits); } while (tx_credits(ssk) > SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(ssk) && (mb = sk->so_snd.sb_sndptr) && sdp_nagle_off(ssk, mb)) { struct mbuf *n; SOCKBUF_LOCK(&sk->so_snd); sk->so_snd.sb_sndptr = mb->m_nextpkt; sk->so_snd.sb_mb = mb->m_nextpkt; mb->m_nextpkt = NULL; SB_EMPTY_FIXUP(&sk->so_snd); for (n = mb; n != NULL; n = n->m_next) sbfree(&sk->so_snd, n); SOCKBUF_UNLOCK(&sk->so_snd); sdp_post_send(ssk, mb); post_count++; } if (credit_update_needed(ssk) && ssk->state >= TCPS_ESTABLISHED && ssk->state < TCPS_FIN_WAIT_2) { mb = sdp_alloc_mb_data(ssk->socket, wait); if (mb == NULL) goto allocfail; sdp_post_send(ssk, mb); SDPSTATS_COUNTER_INC(post_send_credits); post_count++; } /* send DisConn if needed * Do not send DisConn if there is only 1 credit. Compliance with CA4-82 * If one credit is available, an implementation shall only send SDP * messages that provide additional credits and also do not contain ULP * payload. */ if ((ssk->flags & SDP_NEEDFIN) && !sk->so_snd.sb_sndptr && tx_credits(ssk) > 1) { mb = sdp_alloc_mb_disconnect(sk, wait); if (mb == NULL) goto allocfail; ssk->flags &= ~SDP_NEEDFIN; sdp_post_send(ssk, mb); post_count++; } low = (sdp_tx_ring_slots_left(ssk) <= SDP_MIN_TX_CREDITS); if (post_count || low) { if (low) sdp_arm_tx_cq(ssk); if (sdp_xmit_poll(ssk, low)) goto again; } return; allocfail: ssk->nagle_last_unacked = -1; callout_reset(&ssk->nagle_timer, 1, sdp_nagle_timeout, ssk); return; }