static UCS_CLASS_CLEANUP_FUNC(uct_dc_ep_t) { uct_dc_iface_t *iface = ucs_derived_of(self->super.super.iface, uct_dc_iface_t); uct_dc_ep_pending_purge(&self->super.super, NULL, NULL); ucs_arbiter_group_cleanup(&self->arb_group); uct_rc_fc_cleanup(&self->fc); ucs_assert_always(self->state != UCT_DC_EP_INVALID); if (self->dci == UCT_DC_EP_NO_DCI) { return; } /* TODO: this is good for dcs policy only. * Need to change if eps share dci */ ucs_assertv_always(uct_dc_iface_dci_has_outstanding(iface, self->dci), "iface (%p) ep (%p) dci leak detected: dci=%d", iface, self, self->dci); /* we can handle it but well behaving app should not do this */ ucs_warn("ep (%p) is destroyed with %d outstanding ops", self, (int16_t)iface->super.config.tx_qp_len - uct_rc_txqp_available(&iface->tx.dcis[self->dci].txqp)); uct_rc_txqp_purge_outstanding(&iface->tx.dcis[self->dci].txqp, UCS_ERR_CANCELED, 1); iface->tx.dcis[self->dci].ep = NULL; }
static UCS_F_NOINLINE void ucp_wireup_add_lane_desc(ucp_wireup_lane_desc_t *lane_descs, ucp_lane_index_t *num_lanes_p, ucp_rsc_index_t rsc_index, unsigned addr_index, ucp_rsc_index_t dst_md_index, double score, uint32_t usage) { ucp_wireup_lane_desc_t *lane_desc; for (lane_desc = lane_descs; lane_desc < lane_descs + (*num_lanes_p); ++lane_desc) { if ((lane_desc->rsc_index == rsc_index) && (lane_desc->addr_index == addr_index)) { ucs_assertv_always(dst_md_index == lane_desc->dst_md_index, "lane[%d].dst_md_index=%d, dst_md_index=%d", (int)(lane_desc - lane_descs), lane_desc->dst_md_index, dst_md_index); ucs_assertv_always(!(lane_desc->usage & usage), "lane[%d]=0x%x |= 0x%x", (int)(lane_desc - lane_descs), lane_desc->usage, usage); lane_desc->usage |= usage; goto out_update_score; } } lane_desc = &lane_descs[*num_lanes_p]; ++(*num_lanes_p); lane_desc->rsc_index = rsc_index; lane_desc->addr_index = addr_index; lane_desc->dst_md_index = dst_md_index; lane_desc->usage = usage; lane_desc->rma_score = 0.0; lane_desc->amo_score = 0.0; out_update_score: if (usage & UCP_WIREUP_LANE_USAGE_RMA) { lane_desc->rma_score = score; } if (usage & UCP_WIREUP_LANE_USAGE_AMO) { lane_desc->amo_score = score; } }
ucs_status_t uct_ud_mlx5_iface_get_av(uct_ib_iface_t *iface, uct_ud_mlx5_iface_common_t *ud_common_iface, const uct_ib_address_t *ib_addr, uint8_t path_bits, uct_ib_mlx5_base_av_t *base_av, struct mlx5_grh_av *grh_av, int *is_global) { ucs_status_t status; struct ibv_ah *ah; struct mlx5_wqe_av mlx5_av; status = uct_ib_iface_create_ah(iface, ib_addr, path_bits, &ah, is_global); if (status != UCS_OK) { return UCS_ERR_INVALID_ADDR; } uct_ib_mlx5_get_av(ah, &mlx5_av); ibv_destroy_ah(ah); base_av->stat_rate_sl = mlx5_av_base(&mlx5_av)->stat_rate_sl; base_av->fl_mlid = mlx5_av_base(&mlx5_av)->fl_mlid; base_av->rlid = mlx5_av_base(&mlx5_av)->rlid; /* copy MLX5_EXTENDED_UD_AV from the driver, if the flag is not present then * the device supports compact address vector. */ if (ud_common_iface->config.compact_av) { base_av->dqp_dct = mlx5_av_base(&mlx5_av)->dqp_dct & UCT_IB_MLX5_EXTENDED_UD_AV; } else { base_av->dqp_dct = UCT_IB_MLX5_EXTENDED_UD_AV; } ucs_assertv_always((UCT_IB_MLX5_AV_FULL_SIZE > UCT_IB_MLX5_AV_BASE_SIZE) || (base_av->dqp_dct & UCT_IB_MLX5_EXTENDED_UD_AV), "compact address vector not supported, and EXTENDED_AV flag is missing"); if (*is_global) { ucs_assert_always(grh_av != NULL); memcpy(grh_av, mlx5_av_grh(&mlx5_av), sizeof(*grh_av)); } return UCS_OK; }