int __ibv_query_srq_1_0(struct ibv_srq_1_0 *srq, struct ibv_srq_attr *srq_attr) { return ibv_query_srq(srq->real_srq, srq_attr); }
int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, uint8_t qp_type, uint8_t sgid_idx, union ibv_gid *dgid, uint32_t dqpn, uint32_t rq_psn, uint32_t qkey, bool use_qkey) { struct ibv_qp_attr attr = {}; union ibv_gid ibv_gid = { .global.interface_id = dgid->global.interface_id, .global.subnet_prefix = dgid->global.subnet_prefix }; int rc, attr_mask; attr.qp_state = IBV_QPS_RTR; attr_mask = IBV_QP_STATE; qp->sgid_idx = sgid_idx; switch (qp_type) { case IBV_QPT_RC: attr.path_mtu = IBV_MTU_1024; attr.dest_qp_num = dqpn; attr.max_dest_rd_atomic = 1; attr.min_rnr_timer = 12; attr.ah_attr.port_num = backend_dev->port_num; attr.ah_attr.is_global = 1; attr.ah_attr.grh.hop_limit = 1; attr.ah_attr.grh.dgid = ibv_gid; attr.ah_attr.grh.sgid_index = qp->sgid_idx; attr.rq_psn = rq_psn; attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER; trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, be64_to_cpu(ibv_gid.global. subnet_prefix), be64_to_cpu(ibv_gid.global. interface_id), qp->sgid_idx, dqpn, rq_psn); break; case IBV_QPT_UD: if (use_qkey) { attr.qkey = qkey; attr_mask |= IBV_QP_QKEY; } trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : 0); break; } rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); if (rc) { rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); return -EIO; } return 0; } int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, uint32_t sq_psn, uint32_t qkey, bool use_qkey) { struct ibv_qp_attr attr = {}; int rc, attr_mask; attr.qp_state = IBV_QPS_RTS; attr.sq_psn = sq_psn; attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; switch (qp_type) { case IBV_QPT_RC: attr.timeout = 14; attr.retry_cnt = 7; attr.rnr_retry = 7; attr.max_rd_atomic = 1; attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_MAX_QP_RD_ATOMIC; trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); break; case IBV_QPT_UD: if (use_qkey) { attr.qkey = qkey; attr_mask |= IBV_QP_QKEY; } trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, use_qkey ? qkey : 0); break; } rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); if (rc) { rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); return -EIO; } return 0; } int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, int attr_mask, struct ibv_qp_init_attr *init_attr) { if (!qp->ibqp) { attr->qp_state = IBV_QPS_RTS; return 0; } return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); } void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) { if (qp->ibqp) { ibv_destroy_qp(qp->ibqp); } g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); rdma_protected_gslist_destroy(&qp->cqe_ctx_list); } int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit) { struct ibv_srq_init_attr srq_init_attr = {}; srq_init_attr.attr.max_wr = max_wr; srq_init_attr.attr.max_sge = max_sge; srq_init_attr.attr.srq_limit = srq_limit; srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr); if (!srq->ibsrq) { rdma_error_report("ibv_create_srq failed, errno=%d", errno); return -EIO; } rdma_protected_gslist_init(&srq->cqe_ctx_list); return 0; } int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr) { if (!srq->ibsrq) { return -EINVAL; } return ibv_query_srq(srq->ibsrq, srq_attr); } int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, int srq_attr_mask) { if (!srq->ibsrq) { return -EINVAL; } return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask); } void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res) { if (srq->ibsrq) { ibv_destroy_srq(srq->ibsrq); } g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res); rdma_protected_gslist_destroy(&srq->cqe_ctx_list); } #define CHK_ATTR(req, dev, member, fmt) ({ \ trace_rdma_check_dev_attr(#member, dev.member, req->member); \ if (req->member > dev.member) { \ rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ #member, req->member, dev.member); \ req->member = dev.member; \ } \ }) static int init_device_caps(RdmaBackendDev *backend_dev, struct ibv_device_attr *dev_attr) { struct ibv_device_attr bk_dev_attr; int rc; rc = ibv_query_device(backend_dev->context, &bk_dev_attr); if (rc) { rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); return -EIO; } dev_attr->max_sge = MAX_SGE; dev_attr->max_srq_sge = MAX_SGE; CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d"); CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d"); return 0; } static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, union ibv_gid *my_gid, int paylen) { grh->paylen = htons(paylen); grh->sgid = *sgid; grh->dgid = *my_gid; }
int __ibv_query_srq_1_0(struct ibv_srq_1_0 *srq, struct ibv_srq_attr *srq_attr) { fprintf(stderr, "%s:%s:%d \n", __func__, __FILE__, __LINE__); return ibv_query_srq(srq->real_srq, srq_attr); }