static struct ibv_qp * mlx5_glue_dv_create_qp(struct ibv_context *context, struct ibv_qp_init_attr_ex *qp_init_attr_ex, struct mlx5dv_qp_init_attr *dv_qp_init_attr) { #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr); #else (void)context; (void)qp_init_attr_ex; (void)dv_qp_init_attr; return NULL; #endif }
static ucs_status_t uct_ib_mlx5_check_dc(uct_ib_device_t *dev) { ucs_status_t status = UCS_OK; struct ibv_context *ctx = dev->ibv_context; struct ibv_qp_init_attr_ex qp_attr = {}; struct mlx5dv_qp_init_attr dv_attr = {}; struct ibv_pd *pd; struct ibv_cq *cq; struct ibv_qp *qp; pd = ibv_alloc_pd(ctx); if (pd == NULL) { ucs_error("ibv_alloc_pd() failed: %m"); return UCS_ERR_IO_ERROR; } cq = ibv_create_cq(ctx, 1, NULL, NULL, 0); if (cq == NULL) { ucs_error("ibv_create_cq() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_cq; } qp_attr.send_cq = cq; qp_attr.recv_cq = cq; qp_attr.cap.max_send_wr = 1; qp_attr.cap.max_send_sge = 1; qp_attr.qp_type = IBV_QPT_DRIVER; qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD; qp_attr.pd = pd; dv_attr.comp_mask = MLX5DV_QP_INIT_ATTR_MASK_DC; dv_attr.dc_init_attr.dc_type = MLX5DV_DCTYPE_DCI; /* create DCI qp successful means DC is supported */ qp = mlx5dv_create_qp(ctx, &qp_attr, &dv_attr); if (qp) { ibv_destroy_qp(qp); dev->flags |= UCT_IB_DEVICE_FLAG_DC; } ibv_destroy_cq(cq); err_cq: ibv_dealloc_pd(pd); return status; }