static int sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc) { struct mbuf *mb = NULL; struct sdp_bsdh *h; if (unlikely(wc->status)) { if (wc->status != IB_WC_WR_FLUSH_ERR) { sdp_prf(ssk->socket, mb, "Send completion with error. " "Status %d", wc->status); sdp_dbg_data(ssk->socket, "Send completion with error. " "Status %d\n", wc->status); sdp_notify(ssk, ECONNRESET); } } mb = sdp_send_completion(ssk, wc->wr_id); if (unlikely(!mb)) return -1; h = mtod(mb, struct sdp_bsdh *); sdp_prf1(ssk->socket, mb, "tx completion. mseq:%d", ntohl(h->mseq)); sdp_dbg(ssk->socket, "tx completion. %p %d mseq:%d", mb, mb->m_pkthdr.len, ntohl(h->mseq)); m_freem(mb); return 0; }
static void sdp_tx_ring_purge(struct sdp_sock *ssk) { while (ring_posted(ssk->tx_ring)) { struct sk_buff *skb; skb = sdp_send_completion(ssk, ring_tail(ssk->tx_ring)); if (!skb) break; sk_wmem_free_skb(sk_ssk(ssk), skb); } }
static void sdp_tx_ring_purge(struct sdp_sock *ssk) { while (tx_ring_posted(ssk)) { struct mbuf *mb; mb = sdp_send_completion(ssk, ring_tail(ssk->tx_ring)); if (!mb) break; m_freem(mb); } }
static inline void sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc) { struct sock *sk = sk_ssk(ssk); if (likely(wc->wr_id & SDP_OP_SEND)) { struct sk_buff *skb; skb = sdp_send_completion(ssk, wc->wr_id); if (likely(skb)) sk_wmem_free_skb(sk, skb); } else if (wc->wr_id & SDP_OP_RDMA) { if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) { /* Only last RDMA read WR is signalled. Order is guaranteed - * therefore if Last RDMA read WR is completed - all other * have, too */ ssk->tx_ring.rdma_inflight->busy = 0; } else { sdp_warn(sk, "Unexpected RDMA read completion, " "probably was canceled already\n"); } wake_up(sdp_sk_sleep(sk)); } else { /* Keepalive probe sent cleanup */ sdp_cnt(sdp_keepalive_probes_sent); } if (likely(!wc->status) || wc->status == IB_WC_WR_FLUSH_ERR) return; sdp_warn(sk, "Send completion with error. wr_id 0x%llx Status %d\n", wc->wr_id, wc->status); sdp_set_error(sk, -ECONNRESET); }