ucs_status_t uct_ib_mlx5dv_init_obj(uct_ib_mlx5dv_t *obj, uint64_t obj_type) { ucs_status_t ret = UCS_OK; if (obj_type & MLX5DV_OBJ_QP) { ret = uct_ib_mlx5_get_qp_info(obj->dv.qp.in, ucs_container_of(obj->dv.qp.out, uct_ib_mlx5dv_qp_t, dv)); } if (!ret && (obj_type & MLX5DV_OBJ_CQ)) { ret = uct_ib_mlx5_get_cq(obj->dv.cq.in, ucs_container_of(obj->dv.cq.out, uct_ib_mlx5dv_cq_t, dv)); } if (!ret && (obj_type & MLX5DV_OBJ_SRQ)) { ret = uct_ib_mlx5_get_srq_info(obj->dv.srq.in, ucs_container_of(obj->dv.srq.out, uct_ib_mlx5dv_srq_t, dv)); } return ret; }
static UCS_CLASS_INIT_FUNC(uct_ud_mlx5_iface_t, uct_pd_h pd, uct_worker_h worker, const char *dev_name, size_t rx_headroom, const uct_iface_config_t *tl_config) { uct_ud_iface_config_t *config = ucs_derived_of(tl_config, uct_ud_iface_config_t); ucs_status_t status; int i; ucs_trace_func(""); UCS_CLASS_CALL_SUPER_INIT(uct_ud_iface_t, &uct_ud_mlx5_iface_ops, pd, worker, dev_name, rx_headroom, 0, config); self->super.ops.async_progress = uct_ud_mlx5_iface_async_progress; self->super.ops.tx_skb = uct_ud_mlx5_ep_tx_ctl_skb; status = uct_ib_mlx5_get_cq(self->super.super.send_cq, &self->tx.cq); if (status != UCS_OK) { return status; } if (uct_ib_mlx5_cqe_size(&self->tx.cq) != sizeof(struct mlx5_cqe64)) { ucs_error("TX CQE size (%d) is not %d", uct_ib_mlx5_cqe_size(&self->tx.cq), (int)sizeof(struct mlx5_cqe64)); return UCS_ERR_IO_ERROR; } status = uct_ib_mlx5_get_cq(self->super.super.recv_cq, &self->rx.cq); if (status != UCS_OK) { return UCS_ERR_IO_ERROR; } if (uct_ib_mlx5_cqe_size(&self->rx.cq) != sizeof(struct mlx5_cqe64)) { ucs_error("RX CQE size (%d) is not %d", uct_ib_mlx5_cqe_size(&self->rx.cq), (int)sizeof(struct mlx5_cqe64)); return UCS_ERR_IO_ERROR; } status = uct_ib_mlx5_get_txwq(self->super.super.super.worker, self->super.qp, &self->tx.wq); if (status != UCS_OK) { return UCS_ERR_IO_ERROR; } self->super.tx.available = self->tx.wq.bb_max; status = uct_ib_mlx5_get_rxwq(self->super.qp, &self->rx.wq); if (status != UCS_OK) { return UCS_ERR_IO_ERROR; } /* write buffer sizes */ for (i = 0; i <= self->rx.wq.mask; i++) { self->rx.wq.wqes[i].byte_count = htonl(self->super.super.config.rx_payload_offset + self->super.super.config.seg_size); } while (self->super.rx.available >= self->super.config.rx_max_batch) { uct_ud_mlx5_iface_post_recv(self); } /* TODO: add progress on first ep creation */ uct_worker_progress_register(worker, uct_ud_mlx5_iface_progress, self); uct_ud_leave(&self->super); return UCS_OK; }