Beispiel #1
0
static void create_reply_ah(struct cmatest_node *node, struct ibv_wc *wc)
{
	struct ibv_qp_attr attr;
	struct ibv_qp_init_attr init_attr;

	node->ah = ibv_create_ah_from_wc(node->pd, wc, node->mem,
					 node->cma_id->port_num);
	node->remote_qpn = ntohl(wc->imm_data);

	ibv_query_qp(node->cma_id->qp, &attr, IBV_QP_QKEY, &init_attr);
	node->remote_qkey = attr.qkey;
}
Beispiel #2
0
/**
 * Retrieve some current parameters from the QP. Right now we only
 * need max_inline_data.
 *
 * @param[in] conn
 */
static void get_qp_param(conn_t *conn)
{
    int rc;
    struct ibv_qp_attr attr;
    struct ibv_qp_init_attr init_attr;

    rc = ibv_query_qp(conn->rdma.cm_id->qp, &attr, IBV_QP_CAP, &init_attr);
    assert(rc == 0);

    if (rc == 0) {
        conn->rdma.max_inline_data = init_attr.cap.max_inline_data;

        /* Limit the send buffer operations from the initiator to 1/4th
         * of the work requests. */
        conn->rdma.max_req_avail = init_attr.cap.max_send_wr / 4;
    }
}
Beispiel #3
0
int __ibv_query_qp_1_0(struct ibv_qp_1_0 *qp, struct ibv_qp_attr *attr,
		       int attr_mask,
		       struct ibv_qp_init_attr_1_0 *init_attr)
{
	struct ibv_qp_init_attr real_init_attr;
	int ret;

	ret = ibv_query_qp(qp->real_qp, attr, attr_mask, &real_init_attr);
	if (ret)
		return ret;

	init_attr->qp_context = qp->qp_context;
	init_attr->send_cq    = real_init_attr.send_cq->cq_context;
	init_attr->recv_cq    = real_init_attr.recv_cq->cq_context;
	init_attr->srq        = real_init_attr.srq->srq_context;
	init_attr->qp_type    = real_init_attr.qp_type;
	init_attr->cap        = real_init_attr.cap;
	init_attr->sq_sig_all = real_init_attr.sq_sig_all;

	return 0;
}
Beispiel #4
0
int __ibv_query_qp_1_0(struct ibv_qp_1_0 *qp, struct ibv_qp_attr *attr,
		       int attr_mask,
		       struct ibv_qp_init_attr_1_0 *init_attr)
{  fprintf(stderr, "%s:%s:%d \n", __func__, __FILE__, __LINE__);
	struct ibv_qp_init_attr real_init_attr;
	int ret;

	ret = ibv_query_qp(qp->real_qp, attr, attr_mask, &real_init_attr);
	if (ret)
		return ret;

	init_attr->qp_context = qp->qp_context;
	init_attr->send_cq    = real_init_attr.send_cq->cq_context;
	init_attr->recv_cq    = real_init_attr.recv_cq->cq_context;
	init_attr->srq        = real_init_attr.srq->srq_context;
	init_attr->qp_type    = real_init_attr.qp_type;
	init_attr->cap        = real_init_attr.cap;
	init_attr->sq_sig_all = real_init_attr.sq_sig_all;

	return 0;
}
/* Load new dlid to the QP */
void mca_btl_openib_load_apm(struct ibv_qp *qp, mca_btl_openib_endpoint_t *ep)
{
    struct ibv_qp_init_attr qp_init_attr;
    struct ibv_qp_attr attr;
    enum ibv_qp_attr_mask mask = 0;
    struct mca_btl_openib_module_t *btl;

    BTL_VERBOSE(("APM: Loading alternative path"));
    assert (NULL != ep);
    btl = ep->endpoint_btl;

    if (ibv_query_qp(qp, &attr, mask, &qp_init_attr))
        BTL_ERROR(("Failed to ibv_query_qp, qp num: %d", qp->qp_num));

    if (mca_btl_openib_component.apm_lmc &&
            attr.ah_attr.src_path_bits - btl->src_path_bits < mca_btl_openib_component.apm_lmc) {
        BTL_VERBOSE(("APM LMC: src: %d btl_src: %d lmc_max: %d",
                    attr.ah_attr.src_path_bits,
                    btl->src_path_bits,
                    mca_btl_openib_component.apm_lmc));
        apm_update_attr(&attr, &mask);
    } else {
        if (mca_btl_openib_component.apm_ports) {
            /* Try to migrate to next port */
            if (OPAL_SUCCESS != apm_update_port(ep, &attr, &mask))
                return;
        } else {
            BTL_ERROR(("Failed to load alternative path, all %d were used",
                        attr.ah_attr.src_path_bits - btl->src_path_bits));
        }
    }

    if (ibv_modify_qp(qp, &attr, mask))
        BTL_ERROR(("Failed to ibv_query_qp, qp num: %d, errno says: %s (%d)",
                   qp->qp_num, strerror(errno), errno));
}
static int create_ah_from_wc_recv(struct pingpong_context *ctx,
		struct perftest_parameters *user_param)
{
	struct ibv_qp_attr attr;
	struct ibv_qp_init_attr init_attr;
	struct ibv_wc wc;
	int ne;

	do {
		ne = ibv_poll_cq(ctx->recv_cq,1,&wc);
	} while (ne == 0);

	if (wc.status || !(wc.opcode & IBV_WC_RECV) || wc.wr_id != 0) {
		fprintf(stderr, "Bad wc status when trying to create AH -- %d -- %d \n",(int)wc.status,(int)wc.wr_id);
		return 1;
	}

	ctx->ah[0] = ibv_create_ah_from_wc(ctx->pd,&wc,(struct ibv_grh*)ctx->buf,ctx->cm_id->port_num);
	user_param->rem_ud_qpn = ntohl(wc.imm_data);
	ibv_query_qp(ctx->qp[0],&attr, IBV_QP_QKEY,&init_attr);
	user_param->rem_ud_qkey = attr.qkey;

	return 0;
}
Beispiel #7
0
int MV_Setup_QPs() {
    int i = 0;
    int port = 0;

    D_PRINT("Num HCAs: %d\n", mvdev.num_hcas);

    mvdev.cq = (struct ibv_cq **) malloc(sizeof(struct ibv_cq *) * mvdev.num_hcas);
    mvdev.ud_qp = (mv_qp *) malloc(sizeof(mv_qp) * mvdev.num_hcas * mvparams.num_qps);

    mvdev.num_cqs = mvdev.num_hcas;
    mvdev.num_ud_qps = mvdev.num_hcas * mvparams.num_qps;

    /* create one data cq for each HCA */
    for(i = 0; i < mvdev.num_hcas; i++) {
        mvdev.cq[i] =
            ibv_create_cq(mvdev.hca[i].context, mvparams.cq_size, NULL,
                    NULL, 0);
        if (!mvdev.cq[i]) {
            error_abort_all(IBV_RETURN_ERR, "Couldn't create Data CQ");
            return 0;
        }
    }

    for(port = 0; port < mvparams.num_qps; port++) {
    for(i = 0; i < mvdev.num_hcas; i++) {
        int index = (port * mvdev.num_hcas) + i;
        D_PRINT("index is %d\n", index);

        /* Setup the UD QP for normal data transfer */
        mv_qp_setup_information si;

        si.send_cq = si.recv_cq = mvdev.cq[i];
        si.sq_psn = mvparams.psn;
        si.pd = mvdev.hca[i].pd;
        si.cap.max_send_wr = mvparams.ud_sq_size;
        si.cap.max_recv_wr = mvparams.ud_rq_size;
        si.cap.max_send_sge = 1;
        si.cap.max_recv_sge = 1;

        if(mvparams.ud_max_inline != -1) {
            si.cap.max_inline_data = mvparams.ud_max_inline;
        } else {
            si.cap.max_inline_data = 0;
        }

        mvdev.ud_qp[index].qp = MV_Setup_UD_QP(&si);

        if(!mvdev.ud_qp[index].qp) {
            error_abort_all(IBV_RETURN_ERR, "Couldn't create data QP");
        }

        mvdev.ud_qp[index].send_wqes_avail = mvparams.ud_sq_size - 50;
        mvdev.ud_qp[index].send_wqes_total = mvparams.ud_sq_size - 50;
        mvdev.ud_qp[index].ext_sendq_head = mvdev.ud_qp[index].ext_sendq_tail = NULL;
        mvdev.ud_qp[index].hca = &(mvdev.hca[i]);
        mvdev.ud_qp[index].ext_sendq_size = 0;
        mvdev.ud_qp[index].unsignaled_count = 0;
        mvdev.ud_qp[index].type = MVDEV_CH_UD_RQ;


        {
            struct ibv_qp_attr attr;
            struct ibv_qp_init_attr init_attr;

            ibv_query_qp(mvdev.ud_qp[index].qp, &attr, 0, &init_attr);
            mvdev.ud_qp[index].max_inline = init_attr.cap.max_inline_data;
        }


        /* get a receive pool setup for this qp */
        mvdev.ud_qp[index].rpool = MV_Create_RPool(mvparams.recvq_size, 100, mvparams.mtu, NULL, &(mvdev.ud_qp[index]));

        D_PRINT("Finished setting up UD QP %d, num: %u\n", i, mvdev.ud_qp[i].qp->qp_num);
    }
    }


    return 1;
}
Beispiel #8
0
static int ibw_setup_cq_qp(struct ibw_conn *conn)
{
	struct ibw_ctx_priv *pctx = talloc_get_type(conn->ctx->internal, struct ibw_ctx_priv);
	struct ibw_conn_priv *pconn = talloc_get_type(conn->internal, struct ibw_conn_priv);
	struct ibv_qp_init_attr init_attr;
	struct ibv_qp_attr attr;
	int rc;

	DEBUG(DEBUG_DEBUG, ("ibw_setup_cq_qp(cmid: %p)\n", pconn->cm_id));

	/* init verbs */
	pconn->verbs_channel = ibv_create_comp_channel(pconn->cm_id->verbs);
	if (!pconn->verbs_channel) {
		sprintf(ibw_lasterr, "ibv_create_comp_channel failed %d\n", errno);
		return -1;
	}
	DEBUG(DEBUG_DEBUG, ("created channel %p\n", pconn->verbs_channel));

	pconn->verbs_channel_event = tevent_add_fd(pctx->ectx, NULL, /* not pconn or conn */
		pconn->verbs_channel->fd, TEVENT_FD_READ, ibw_event_handler_verbs, conn);

	pconn->pd = ibv_alloc_pd(pconn->cm_id->verbs);
	if (!pconn->pd) {
		sprintf(ibw_lasterr, "ibv_alloc_pd failed %d\n", errno);
		return -1;
	}
	DEBUG(DEBUG_DEBUG, ("created pd %p\n", pconn->pd));

	/* init mr */
	if (ibw_init_memory(conn))
		return -1;

	/* init cq */
	pconn->cq = ibv_create_cq(pconn->cm_id->verbs,
		pctx->opts.max_recv_wr + pctx->opts.max_send_wr,
		conn, pconn->verbs_channel, 0);
	if (pconn->cq==NULL) {
		sprintf(ibw_lasterr, "ibv_create_cq failed\n");
		return -1;
	}

	rc = ibv_req_notify_cq(pconn->cq, 0);
	if (rc) {
		sprintf(ibw_lasterr, "ibv_req_notify_cq failed with %d\n", rc);
		return rc;
	}

	/* init qp */
	memset(&init_attr, 0, sizeof(init_attr));
	init_attr.cap.max_send_wr = pctx->opts.max_send_wr;
	init_attr.cap.max_recv_wr = pctx->opts.max_recv_wr;
	init_attr.cap.max_recv_sge = 1;
	init_attr.cap.max_send_sge = 1;
	init_attr.qp_type = IBV_QPT_RC;
	init_attr.send_cq = pconn->cq;
	init_attr.recv_cq = pconn->cq;

	rc = rdma_create_qp(pconn->cm_id, pconn->pd, &init_attr);
	if (rc) {
		sprintf(ibw_lasterr, "rdma_create_qp failed with %d\n", rc);
		return rc;
	}
	/* elase result is in pconn->cm_id->qp */

	rc = ibv_query_qp(pconn->cm_id->qp, &attr, IBV_QP_PATH_MTU, &init_attr);
	if (rc) {
		sprintf(ibw_lasterr, "ibv_query_qp failed with %d\n", rc);
		return rc;
	}

	return ibw_fill_cq(conn);
}
Beispiel #9
0
int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
                              uint8_t qp_type, uint8_t sgid_idx,
                              union ibv_gid *dgid, uint32_t dqpn,
                              uint32_t rq_psn, uint32_t qkey, bool use_qkey)
{
    struct ibv_qp_attr attr = {};
    union ibv_gid ibv_gid = {
        .global.interface_id = dgid->global.interface_id,
        .global.subnet_prefix = dgid->global.subnet_prefix
    };
    int rc, attr_mask;

    attr.qp_state = IBV_QPS_RTR;
    attr_mask = IBV_QP_STATE;

    qp->sgid_idx = sgid_idx;

    switch (qp_type) {
    case IBV_QPT_RC:
        attr.path_mtu               = IBV_MTU_1024;
        attr.dest_qp_num            = dqpn;
        attr.max_dest_rd_atomic     = 1;
        attr.min_rnr_timer          = 12;
        attr.ah_attr.port_num       = backend_dev->port_num;
        attr.ah_attr.is_global      = 1;
        attr.ah_attr.grh.hop_limit  = 1;
        attr.ah_attr.grh.dgid       = ibv_gid;
        attr.ah_attr.grh.sgid_index = qp->sgid_idx;
        attr.rq_psn                 = rq_psn;

        attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |
                     IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC |
                     IBV_QP_MIN_RNR_TIMER;

        trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num,
                                           be64_to_cpu(ibv_gid.global.
                                                       subnet_prefix),
                                           be64_to_cpu(ibv_gid.global.
                                                       interface_id),
                                           qp->sgid_idx, dqpn, rq_psn);
        break;

    case IBV_QPT_UD:
        if (use_qkey) {
            attr.qkey = qkey;
            attr_mask |= IBV_QP_QKEY;
        }
        trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey :
                                           0);
        break;
    }

    rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
    if (rc) {
        rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
        return -EIO;
    }

    return 0;
}

int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
                              uint32_t sq_psn, uint32_t qkey, bool use_qkey)
{
    struct ibv_qp_attr attr = {};
    int rc, attr_mask;

    attr.qp_state = IBV_QPS_RTS;
    attr.sq_psn = sq_psn;
    attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN;

    switch (qp_type) {
    case IBV_QPT_RC:
        attr.timeout       = 14;
        attr.retry_cnt     = 7;
        attr.rnr_retry     = 7;
        attr.max_rd_atomic = 1;

        attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY |
                     IBV_QP_MAX_QP_RD_ATOMIC;
        trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn);
        break;

    case IBV_QPT_UD:
        if (use_qkey) {
            attr.qkey = qkey;
            attr_mask |= IBV_QP_QKEY;
        }
        trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn,
                                           use_qkey ? qkey : 0);
        break;
    }

    rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
    if (rc) {
        rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
        return -EIO;
    }

    return 0;
}

int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
                          int attr_mask, struct ibv_qp_init_attr *init_attr)
{
    if (!qp->ibqp) {
        attr->qp_state = IBV_QPS_RTS;
        return 0;
    }

    return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr);
}

void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res)
{
    if (qp->ibqp) {
        ibv_destroy_qp(qp->ibqp);
    }
    g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res);
    rdma_protected_gslist_destroy(&qp->cqe_ctx_list);
}

int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd,
                            uint32_t max_wr, uint32_t max_sge,
                            uint32_t srq_limit)
{
    struct ibv_srq_init_attr srq_init_attr = {};

    srq_init_attr.attr.max_wr = max_wr;
    srq_init_attr.attr.max_sge = max_sge;
    srq_init_attr.attr.srq_limit = srq_limit;

    srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr);
    if (!srq->ibsrq) {
        rdma_error_report("ibv_create_srq failed, errno=%d", errno);
        return -EIO;
    }

    rdma_protected_gslist_init(&srq->cqe_ctx_list);

    return 0;
}

int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr)
{
    if (!srq->ibsrq) {
        return -EINVAL;
    }

    return ibv_query_srq(srq->ibsrq, srq_attr);
}

int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr,
                int srq_attr_mask)
{
    if (!srq->ibsrq) {
        return -EINVAL;
    }

    return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask);
}

void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res)
{
    if (srq->ibsrq) {
        ibv_destroy_srq(srq->ibsrq);
    }
    g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res);
    rdma_protected_gslist_destroy(&srq->cqe_ctx_list);
}

#define CHK_ATTR(req, dev, member, fmt) ({ \
    trace_rdma_check_dev_attr(#member, dev.member, req->member); \
    if (req->member > dev.member) { \
        rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
                         #member, req->member, dev.member); \
        req->member = dev.member; \
    } \
})

static int init_device_caps(RdmaBackendDev *backend_dev,
                            struct ibv_device_attr *dev_attr)
{
    struct ibv_device_attr bk_dev_attr;
    int rc;

    rc = ibv_query_device(backend_dev->context, &bk_dev_attr);
    if (rc) {
        rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno);
        return -EIO;
    }

    dev_attr->max_sge = MAX_SGE;
    dev_attr->max_srq_sge = MAX_SGE;

    CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64);
    CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d");
    CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d");

    return 0;
}

static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
                                 union ibv_gid *my_gid, int paylen)
{
    grh->paylen = htons(paylen);
    grh->sgid = *sgid;
    grh->dgid = *my_gid;
}
Beispiel #10
0
void ibv_attach_device (struct netif *netif)
{
  struct ibvif *ibvif;
  uint8_t mac[6] = {0x00, 0x02, 0xc9, 0xa4, 0x59, 0x41};
  struct ibv_qp_attr qp_attr, query_attr;
  struct ibv_qp_init_attr init_attr; 
  int    qp_flags, attr_mask;
  uint8_t port_num = 1;
  fr_attr fr;
  int start, end, index, i; 

  ibvif = (struct ibvif *)netif->state;
  
  /* Attaching the qp to the spec */
  memset(&fr.attr_info, 0 , sizeof(struct ibv_exp_flow_attr));
  fr.attr_info.type = IBV_EXP_FLOW_ATTR_NORMAL;
  fr.attr_info.size = sizeof(struct flow_rules);
  fr.attr_info.priority = 0;
  fr.attr_info.num_of_specs = 3;
  fr.attr_info.port = port_num;
  fr.attr_info.flags = 0;

  memset(&fr.spec_info, 0 , sizeof(struct ibv_exp_flow_spec_eth));
  fr.spec_info.type = IBV_EXP_FLOW_SPEC_ETH;
  fr.spec_info.size = sizeof(struct ibv_exp_flow_spec_eth);
  fr.spec_info.val.ether_type = IP_ETHER_TYPE;
  fr.spec_info.mask.ether_type = 0xffff;
  memcpy(fr.spec_info.val.dst_mac, mac, sizeof(fr.spec_info.mask.dst_mac));
  memset(fr.spec_info.mask.dst_mac, 0xff, sizeof(fr.spec_info.mask.dst_mac));

  memset(&fr.ip_spec_info, 0 , sizeof(struct ibv_exp_flow_spec_ipv4));
  fr.ip_spec_info.type = IBV_EXP_FLOW_SPEC_IPV4;
  fr.ip_spec_info.size = sizeof(struct ibv_exp_flow_spec_ipv4);
  fr.ip_spec_info.val.dst_ip = inet_addr("10.0.0.1");
  fr.ip_spec_info.mask.dst_ip = 0xffffffff;
  if (netif->prot_thread->cpu == 6) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.3");
  } else if (netif->prot_thread->cpu == 7) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.5");
  } else if (netif->prot_thread->cpu == 8) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.6");
  } else if (netif->prot_thread->cpu == 9) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.7");
  } else if (netif->prot_thread->cpu == 10) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.8");
  } else if (netif->prot_thread->cpu == 11) {
    fr.ip_spec_info.val.src_ip = inet_addr("10.0.0.9");
  }


  fr.ip_spec_info.mask.src_ip = 0xffffffff;

  memset(&fr.tcp_spec_info, 0 , sizeof(struct ibv_exp_flow_spec_tcp_udp));
  fr.tcp_spec_info.type = IBV_EXP_FLOW_SPEC_TCP;
  fr.tcp_spec_info.size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
  fr.tcp_spec_info.val.dst_port = bswap_16(90);
  fr.tcp_spec_info.mask.dst_port = 0xffff;

  ibvif->flow = ibv_exp_create_flow(ibvif->qp, &fr.attr_info);
  if (!ibvif->flow) {
    perror("IBV can't create flow\n");
    exit(1);
  }

  /*start =  netif->prot_thread->cpu * 100;
  end = start + 100;
  index = 0;

  for (i=start; i<end; i++) { 
    fr.tcp_spec_info.val.src_port = htons(i);
    fr.tcp_spec_info.mask.src_port = 0xffff;

    ibvif->flow[index] = ibv_exp_create_flow(ibvif->qp, &fr.attr_info);
    if (!ibvif->flow[index]) {
      perror("IBV can't create flow\n");
      exit(1);
    }
    index++;
  }*/

  /* modify QP to send and receive */

  qp_flags = IBV_QP_STATE;
  memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
  qp_attr.qp_state = IBV_QPS_RTR;
  qp_attr.ah_attr.src_path_bits = 0;
  qp_attr.ah_attr.port_num = 1;
  //qp_attr.ah_attr.is_global  = 0;
  //qp_attr.ah_attr.sl = 1;
  if (ibv_modify_qp(ibvif->qp, &qp_attr, qp_flags)) {
    perror("IBV can't set state to RTR\n");
    exit(1);
  }

  qp_flags = IBV_QP_STATE;
  memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
  qp_attr.qp_state = IBV_QPS_RTS;
  qp_attr.ah_attr.src_path_bits = 0;
  qp_attr.ah_attr.port_num = 1;
  //qp_attr.ah_attr.is_global  = 0;
  //qp_attr.ah_attr.sl = 1;
  if (ibv_modify_qp(ibvif->qp, &qp_attr, qp_flags)) {
    perror("IBV can't set state to RTS\n");
    exit(1);
  }

  ibv_query_qp(ibvif->qp, &query_attr, attr_mask, &init_attr);

  infini_post_recv(ibvif);

  netif->flags =  NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP | NETIF_FLAG_UP;

}
Beispiel #11
0
mvdev_channel_rc * MV_Setup_RC(mvdev_connection_t *c, int buf_size, int type, int rq_size) {
    int hca = 0;
    mv_qp_setup_information si;

    /* TODO: use buf_size */

    mvdev_channel_rc *ch = (mvdev_channel_rc *) malloc(sizeof(mvdev_channel_rc));

    ch->buffer_size = buf_size;
    ch->type = type;
    ch->next = NULL;

    mv_qp * qp = &(ch->qp[0]);

    si.send_cq = si.recv_cq = mvdev.cq[hca];

    if(MVDEV_CH_RC_SRQ == type) {
        mv_srq * srq = MV_Get_SRQ(buf_size);
        si.srq = srq->srq;
        si.cap.max_recv_wr = 0;
    } else {
        si.cap.max_recv_wr = rq_size + 15;
        si.srq = NULL;
    }

    si.pd = mvdev.hca[hca].pd;
    si.sq_psn = mvparams.psn;
    si.cap.max_send_sge = 1;
    si.cap.max_recv_sge = 1;
    si.cap.max_send_wr = mvparams.rc_sq_size;

    if(-1 != mvparams.rc_max_inline) {
        si.cap.max_inline_data = mvparams.rc_max_inline;
    } else {
        si.cap.max_inline_data = 0;
    }

    qp->qp = MV_Create_RC_QP(&si);
    qp->send_wqes_avail = mvparams.rc_sq_size - 5;
    qp->send_wqes_total = mvparams.rc_sq_size - 5;
    qp->ext_sendq_head = qp->ext_sendq_tail = NULL;
    qp->ext_backlogq_head = qp->ext_backlogq_tail = NULL;
    qp->hca = &(mvdev.hca[hca]);
    qp->ext_sendq_size = 0;
    qp->unsignaled_count = 0;
    qp->max_send_size = buf_size;
    qp->type = type;

    {
        struct ibv_qp_attr attr;
        struct ibv_qp_init_attr init_attr;

        ibv_query_qp(qp->qp, &attr, 0, &init_attr);
        qp->max_inline = init_attr.cap.max_inline_data;
    }

    if(MVDEV_CH_RC_RQ == type) {
        qp->send_credits_remaining = rq_size;
    }
     
    return ch;
}