static void uct_ud_verbs_iface_progress_pending(uct_ud_verbs_iface_t *iface) { uct_ud_ep_t *ep; ucs_status_t status; uct_ud_neth_t neth; uct_ud_send_skb_t *skb; while (!ucs_queue_is_empty(&iface->super.tx.pending_ops)) { status = uct_ud_iface_get_next_pending(&iface->super, &ep, &neth, &skb); if (status == UCS_ERR_NO_RESOURCE) { return; } if (status == UCS_INPROGRESS) { continue; } if (ucs_unlikely(skb != NULL)) { /* TODO: not every skb is inline */ iface->tx.sge[0].addr = (uintptr_t) (skb->neth); iface->tx.sge[0].length = skb->len; uct_ud_verbs_iface_tx_ctl(iface, ucs_derived_of(ep, uct_ud_verbs_ep_t)); uct_ud_ep_log_tx_tag("PENDING_TX: (skb)", ep, skb->neth, skb->len); } else { iface->tx.sge[0].addr = (uintptr_t)&neth; iface->tx.sge[0].length = sizeof(neth); UCT_UD_EP_HOOK_CALL_TX(ep, &neth); uct_ud_verbs_iface_tx_ctl(iface, ucs_derived_of(ep, uct_ud_verbs_ep_t)); uct_ud_ep_log_tx_tag("PENDING_TX: (neth)", ep, &neth, sizeof(neth)); } } }
static ucs_status_t uct_cuda_copy_iface_flush(uct_iface_h tl_iface, unsigned flags, uct_completion_t *comp) { uct_cuda_copy_iface_t *iface = ucs_derived_of(tl_iface, uct_cuda_copy_iface_t); if (comp != NULL) { return UCS_ERR_UNSUPPORTED; } if (ucs_queue_is_empty(&iface->outstanding_d2h_cuda_event_q) && ucs_queue_is_empty(&iface->outstanding_h2d_cuda_event_q)) { UCT_TL_IFACE_STAT_FLUSH(ucs_derived_of(tl_iface, uct_base_iface_t)); return UCS_OK; } UCT_TL_IFACE_STAT_FLUSH_WAIT(ucs_derived_of(tl_iface, uct_base_iface_t)); return UCS_INPROGRESS; }
ucs_status_t uct_tcp_ep_flush(uct_ep_h tl_ep, unsigned flags, uct_completion_t *comp) { uct_tcp_ep_t *ep = ucs_derived_of(tl_ep, uct_tcp_ep_t); if (!uct_tcp_ep_can_send(ep)) { return UCS_ERR_NO_RESOURCE; } ucs_assert(ucs_queue_is_empty(&ep->pending_q)); UCT_TL_EP_STAT_FLUSH(&ep->super); return UCS_OK; }
unsigned uct_tcp_ep_progress_tx(uct_tcp_ep_t *ep) { uct_pending_req_priv_t *priv; unsigned count = 0; ucs_trace_func("ep=%p", ep); if (ep->length > 0) { count += uct_tcp_ep_send(ep); } uct_pending_queue_dispatch(priv, &ep->pending_q, uct_tcp_ep_can_send(ep)); if (uct_tcp_ep_can_send(ep)) { ucs_assert(ucs_queue_is_empty(&ep->pending_q)); uct_tcp_ep_mod_events(ep, 0, EPOLLOUT); } return count; }
static UCS_F_ALWAYS_INLINE ucs_status_t uct_rc_mlx5_iface_poll_rx(uct_rc_mlx5_iface_t *iface) { struct mlx5_wqe_srq_next_seg *seg; uct_rc_mlx5_recv_desc_t *desc; uct_rc_hdr_t *hdr; struct mlx5_cqe64 *cqe; unsigned byte_len; uint16_t wqe_ctr_be; uint16_t max_batch; ucs_status_t status; cqe = uct_ib_mlx5_get_cqe(&iface->rx.cq, iface->rx.cq.cqe_size_log); if (cqe == NULL) { /* If not CQE - post receives */ status = UCS_ERR_NO_PROGRESS; goto done; } UCS_STATS_UPDATE_COUNTER(iface->super.stats, UCT_RC_IFACE_STAT_RX_COMPLETION, 1); ucs_assert(!ucs_queue_is_empty(&iface->rx.desc_q)); ucs_memory_cpu_load_fence(); desc = ucs_queue_pull_elem_non_empty(&iface->rx.desc_q, uct_rc_mlx5_recv_desc_t, queue); byte_len = ntohl(cqe->byte_cnt); uct_ib_iface_desc_received(&iface->super.super, &desc->super, byte_len, !(cqe->op_own & (MLX5_INLINE_SCATTER_32|MLX5_INLINE_SCATTER_64))); /* Get a pointer to AM header (after which comes the payload) * Support cases of inline scatter by pointing directly to CQE. */ if (cqe->op_own & MLX5_INLINE_SCATTER_32) { hdr = (uct_rc_hdr_t*)cqe; UCS_STATS_UPDATE_COUNTER(iface->stats, UCT_RC_MLX5_IFACE_STAT_RX_INL_32, 1); } else if (cqe->op_own & MLX5_INLINE_SCATTER_64) { hdr = (uct_rc_hdr_t*)(cqe - 1); UCS_STATS_UPDATE_COUNTER(iface->stats, UCT_RC_MLX5_IFACE_STAT_RX_INL_64, 1) } else {
static void ucp_stub_pending_purge(uct_ep_h uct_ep, uct_pending_callback_t cb) { ucp_stub_ep_t *stub_ep = ucs_derived_of(uct_ep, ucp_stub_ep_t); ucs_assert_always(ucs_queue_is_empty(&stub_ep->pending_q)); }