static ucs_status_t uct_ib_mlx5_get_cq(struct ibv_cq *cq, uct_ib_mlx5dv_cq_t *mlx5_cq) { #if HAVE_DECL_IBV_MLX5_EXP_GET_CQ_INFO struct ibv_mlx5_cq_info ibv_cq_info; int ret; ret = ibv_mlx5_exp_get_cq_info(cq, &ibv_cq_info); if (ret != 0) { uct_ib_mlx5_obj_error("cq"); return UCS_ERR_NO_DEVICE; } mlx5_cq->dv.buf = ibv_cq_info.buf; mlx5_cq->dv.cqe_cnt = ibv_cq_info.cqe_cnt; mlx5_cq->dv.cqn = ibv_cq_info.cqn; mlx5_cq->dv.cqe_size = ibv_cq_info.cqe_size; #else struct mlx5_cq *mcq = ucs_container_of(cq, struct mlx5_cq, ibv_cq); int ret; if (mcq->cons_index != 0) { ucs_error("CQ consumer index is not 0 (%d)", mcq->cons_index); return UCS_ERR_NO_DEVICE; } mlx5_cq->dv.buf = mcq->active_buf->buf; mlx5_cq->dv.cqe_cnt = mcq->ibv_cq.cqe + 1; mlx5_cq->dv.cqn = mcq->cqn; mlx5_cq->dv.cqe_size = mcq->cqe_sz; #endif return UCS_OK; }
ucs_status_t uct_ib_mlx5_get_cq(struct ibv_cq *cq, uct_ib_mlx5_cq_t *mlx5_cq) { unsigned cqe_size; #if HAVE_DECL_IBV_MLX5_EXP_GET_CQ_INFO struct ibv_mlx5_cq_info ibv_cq_info; int ret; ret = ibv_mlx5_exp_get_cq_info(cq, &ibv_cq_info); if (ret != 0) { return UCS_ERR_NO_DEVICE; } mlx5_cq->cq_buf = ibv_cq_info.buf; mlx5_cq->cq_ci = 0; mlx5_cq->cq_length = ibv_cq_info.cqe_cnt; cqe_size = ibv_cq_info.cqe_size; #else struct mlx5_cq *mcq = ucs_container_of(cq, struct mlx5_cq, ibv_cq); int ret; if (mcq->cons_index != 0) { ucs_error("CQ consumer index is not 0 (%d)", mcq->cons_index); return UCS_ERR_NO_DEVICE; } mlx5_cq->cq_buf = mcq->active_buf->buf; mlx5_cq->cq_ci = 0; mlx5_cq->cq_length = mcq->ibv_cq.cqe + 1; cqe_size = mcq->cqe_sz; #endif /* Move buffer forward for 128b CQE, so we would get pointer to the 2nd * 64b when polling. */ mlx5_cq->cq_buf += cqe_size - sizeof(struct mlx5_cqe64); ret = ibv_exp_cq_ignore_overrun(cq); if (ret != 0) { ucs_error("Failed to modify send CQ to ignore overrun: %s", strerror(ret)); return UCS_ERR_UNSUPPORTED; } mlx5_cq->cqe_size_log = ucs_ilog2(cqe_size); ucs_assert_always((1<<mlx5_cq->cqe_size_log) == cqe_size); return UCS_OK; }