static ucs_status_t uct_rc_verbs_ep_tag_qp_create(uct_rc_verbs_iface_t *iface, uct_rc_verbs_ep_t *ep) { #if HAVE_IBV_EX_HW_TM struct ibv_qp_cap cap; ucs_status_t status; int ret; if (UCT_RC_VERBS_TM_ENABLED(iface)) { /* Send queue of this QP will be used by FW for HW RNDV. Driver requires * such a QP to be initialized with zero send queue length. */ status = uct_rc_iface_qp_create(&iface->super, IBV_QPT_RC, &ep->tm_qp, &cap, iface->tm.xrq.srq, 0); if (status != UCS_OK) { return status; } status = uct_rc_iface_qp_init(&iface->super, ep->tm_qp); if (status != UCS_OK) { ret = ibv_destroy_qp(ep->tm_qp); if (ret) { ucs_warn("ibv_destroy_qp() returned %d: %m", ret); } return status; } uct_rc_iface_add_ep(&iface->super, &ep->super, ep->tm_qp->qp_num); } #endif return UCS_OK; }
[UCT_RC_TXQP_STAT_SIGNAL] = "signal" } }; #endif ucs_status_t uct_rc_txqp_init(uct_rc_txqp_t *txqp, uct_rc_iface_t *iface, int qp_type, struct ibv_qp_cap *cap UCS_STATS_ARG(ucs_stats_node_t* stats_parent)) { ucs_status_t status; txqp->unsignaled = 0; txqp->available = 0; ucs_queue_head_init(&txqp->outstanding); status = uct_rc_iface_qp_create(iface, qp_type, &txqp->qp, cap); if (status != UCS_OK) { goto err; } status = UCS_STATS_NODE_ALLOC(&txqp->stats, &uct_rc_txqp_stats_class, stats_parent, "-0x%x", txqp->qp->qp_num); if (status != UCS_OK) { goto err_destroy_qp; } return UCS_OK; err_destroy_qp: ibv_destroy_qp(txqp->qp); err:
}; #endif ucs_status_t uct_rc_txqp_init(uct_rc_txqp_t *txqp, uct_rc_iface_t *iface, int qp_type, struct ibv_qp_cap *cap UCS_STATS_ARG(ucs_stats_node_t* stats_parent)) { ucs_status_t status; txqp->unsignaled = 0; txqp->unsignaled_store = 0; txqp->unsignaled_store_count = 0; txqp->available = 0; ucs_queue_head_init(&txqp->outstanding); status = uct_rc_iface_qp_create(iface, qp_type, &txqp->qp, cap, iface->config.tx_qp_len); if (status != UCS_OK) { goto err; } status = UCS_STATS_NODE_ALLOC(&txqp->stats, &uct_rc_txqp_stats_class, stats_parent, "-0x%x", txqp->qp->qp_num); if (status != UCS_OK) { goto err_destroy_qp; } return UCS_OK; err_destroy_qp: ibv_destroy_qp(txqp->qp); err:
static UCS_CLASS_INIT_FUNC(uct_rc_verbs_iface_t, uct_pd_h pd, uct_worker_h worker, const char *dev_name, size_t rx_headroom, const uct_iface_config_t *tl_config) { uct_rc_verbs_iface_config_t *config = ucs_derived_of(tl_config, uct_rc_verbs_iface_config_t); struct ibv_exp_device_attr *dev_attr; size_t am_hdr_size; ucs_status_t status; struct ibv_qp_cap cap; struct ibv_qp *qp; extern uct_iface_ops_t uct_rc_verbs_iface_ops; UCS_CLASS_CALL_SUPER_INIT(uct_rc_iface_t, &uct_rc_verbs_iface_ops, pd, worker, dev_name, rx_headroom, 0, &config->super); /* Initialize inline work request */ memset(&self->inl_am_wr, 0, sizeof(self->inl_am_wr)); self->inl_am_wr.sg_list = self->inl_sge; self->inl_am_wr.num_sge = 2; self->inl_am_wr.opcode = IBV_WR_SEND; self->inl_am_wr.send_flags = IBV_SEND_INLINE; memset(&self->inl_rwrite_wr, 0, sizeof(self->inl_rwrite_wr)); self->inl_rwrite_wr.sg_list = self->inl_sge; self->inl_rwrite_wr.num_sge = 1; self->inl_rwrite_wr.opcode = IBV_WR_RDMA_WRITE; self->inl_rwrite_wr.send_flags = IBV_SEND_SIGNALED | IBV_SEND_INLINE; memset(self->inl_sge, 0, sizeof(self->inl_sge)); /* Configuration */ am_hdr_size = ucs_max(config->max_am_hdr, sizeof(uct_rc_hdr_t)); self->config.short_desc_size = ucs_max(UCT_RC_MAX_ATOMIC_SIZE, am_hdr_size); dev_attr = &uct_ib_iface_device(&self->super.super)->dev_attr; if (IBV_EXP_HAVE_ATOMIC_HCA(dev_attr) || IBV_EXP_HAVE_ATOMIC_GLOB(dev_attr)) { self->config.atomic32_handler = uct_rc_ep_atomic_handler_32_be0; self->config.atomic64_handler = uct_rc_ep_atomic_handler_64_be0; } else if (IBV_EXP_HAVE_ATOMIC_HCA_REPLY_BE(dev_attr)) { self->config.atomic32_handler = uct_rc_ep_atomic_handler_32_be1; self->config.atomic64_handler = uct_rc_ep_atomic_handler_64_be1; } /* Create a dummy QP in order to find out max_inline */ status = uct_rc_iface_qp_create(&self->super, &qp, &cap); if (status != UCS_OK) { goto err; } ibv_destroy_qp(qp); self->config.max_inline = cap.max_inline_data; /* Create AH headers and Atomic mempool */ status = uct_iface_mpool_create(&self->super.super.super.super, sizeof(uct_rc_iface_send_desc_t) + self->config.short_desc_size, sizeof(uct_rc_iface_send_desc_t), UCS_SYS_CACHE_LINE_SIZE, &config->super.super.tx.mp, self->super.config.tx_qp_len, uct_rc_iface_send_desc_init, "rc_verbs_short_desc", &self->short_desc_mp); if (status != UCS_OK) { goto err; } while (self->super.rx.available > 0) { if (uct_rc_verbs_iface_post_recv(self, 1) == 0) { ucs_error("failed to post receives"); status = UCS_ERR_NO_MEMORY; goto err_destroy_short_desc_mp; } } ucs_notifier_chain_add(&worker->progress_chain, uct_rc_verbs_iface_progress, self); return UCS_OK; err_destroy_short_desc_mp: ucs_mpool_destroy(self->short_desc_mp); err: return status; }