static int mlx5_glue_query_device_ex(struct ibv_context *context, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr) { return ibv_query_device_ex(context, input, attr); }
static ucs_status_t uct_ib_mlx5dv_md_open(struct ibv_device *ibv_device, uct_ib_md_t **p_md) { uint32_t out[UCT_IB_MLX5DV_ST_SZ_DW(query_hca_cap_out)] = {}; uint32_t in[UCT_IB_MLX5DV_ST_SZ_DW(query_hca_cap_in)] = {}; struct mlx5dv_context_attr dv_attr = {}; ucs_status_t status = UCS_OK; int atomic = 0, has_dc = 1; struct ibv_context *ctx; uct_ib_device_t *dev; uct_ib_mlx5_md_t *md; void *cap; int ret; #if HAVE_DECL_MLX5DV_IS_SUPPORTED if (!mlx5dv_is_supported(ibv_device)) { return UCS_ERR_UNSUPPORTED; } #endif dv_attr.flags |= MLX5DV_CONTEXT_FLAGS_DEVX; ctx = mlx5dv_open_device(ibv_device, &dv_attr); if (ctx == NULL) { ucs_debug("mlx5dv_open_device(%s) failed: %m", ibv_get_device_name(ibv_device)); status = UCS_ERR_UNSUPPORTED; goto err; } md = ucs_calloc(1, sizeof(*md), "ib_mlx5_md"); if (md == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context; } md->super.ops = &uct_ib_mlx5dv_md_ops; dev = &md->super.dev; dev->ibv_context = ctx; IBV_EXP_DEVICE_ATTR_SET_COMP_MASK(&dev->dev_attr); ret = ibv_query_device_ex(dev->ibv_context, NULL, &dev->dev_attr); if (ret != 0) { ucs_error("ibv_query_device() returned %d: %m", ret); status = UCS_ERR_IO_ERROR; goto err_free; } cap = UCT_IB_MLX5DV_ADDR_OF(query_hca_cap_out, out, capability); UCT_IB_MLX5DV_SET(query_hca_cap_in, in, opcode, UCT_IB_MLX5_CMD_OP_QUERY_HCA_CAP); UCT_IB_MLX5DV_SET(query_hca_cap_in, in, op_mod, UCT_IB_MLX5_HCA_CAP_OPMOD_GET_MAX | (UCT_IB_MLX5_CAP_GENERAL << 1)); ret = mlx5dv_devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); if (ret == 0) { if (!UCT_IB_MLX5DV_GET(cmd_hca_cap, cap, dct)) { has_dc = 0; } if (UCT_IB_MLX5DV_GET(cmd_hca_cap, cap, compact_address_vector)) { dev->flags |= UCT_IB_DEVICE_FLAG_AV; } if (UCT_IB_MLX5DV_GET(cmd_hca_cap, cap, fixed_buffer_size)) { md->flags |= UCT_IB_MLX5_MD_FLAG_KSM; } if (UCT_IB_MLX5DV_GET(cmd_hca_cap, cap, atomic)) { atomic = 1; } } else if ((errno != EPERM) && (errno != EPROTONOSUPPORT) && (errno != EOPNOTSUPP)) { ucs_error("MLX5_CMD_OP_QUERY_HCA_CAP failed: %m"); status = UCS_ERR_IO_ERROR; goto err_free; } else { status = UCS_ERR_UNSUPPORTED; goto err_free; } if (atomic) { int ops = UCT_IB_MLX5_ATOMIC_OPS_CMP_SWAP | UCT_IB_MLX5_ATOMIC_OPS_FETCH_ADD; uint8_t arg_size; int cap_ops, mode8b; UCT_IB_MLX5DV_SET(query_hca_cap_in, in, op_mod, UCT_IB_MLX5_HCA_CAP_OPMOD_GET_MAX | (UCT_IB_MLX5_CAP_ATOMIC << 1)); ret = mlx5dv_devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); if (ret != 0) { ucs_error("MLX5_CMD_OP_QUERY_HCA_CAP failed: %m"); return UCS_ERR_IO_ERROR; } arg_size = UCT_IB_MLX5DV_GET(atomic_caps, cap, atomic_size_qp); cap_ops = UCT_IB_MLX5DV_GET(atomic_caps, cap, atomic_operations); mode8b = UCT_IB_MLX5DV_GET(atomic_caps, cap, atomic_req_8B_endianness_mode); if ((cap_ops & ops) == ops) { dev->atomic_arg_sizes = sizeof(uint64_t); if (!mode8b) { dev->atomic_arg_sizes_be = sizeof(uint64_t); } } ops |= UCT_IB_MLX5_ATOMIC_OPS_MASKED_CMP_SWAP | UCT_IB_MLX5_ATOMIC_OPS_MASKED_FETCH_ADD; if (has_dc) { arg_size &= UCT_IB_MLX5DV_GET(query_hca_cap_out, out, capability.atomic_caps.atomic_size_dc); } if ((cap_ops & ops) == ops) { dev->ext_atomic_arg_sizes = arg_size; if (mode8b) { arg_size &= ~(sizeof(uint64_t)); } dev->ext_atomic_arg_sizes_be = arg_size; } } if (has_dc) { status = uct_ib_mlx5_check_dc(dev); } dev->flags |= UCT_IB_DEVICE_FLAG_MLX5_PRM; *p_md = &md->super; return status; err_free: ucs_free(md); err_free_context: ibv_close_device(ctx); err: return status; }