static int hns_roce_u_v1_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
				   int attr_mask)
{
	int ret;
	struct ibv_modify_qp cmd = {};
	struct hns_roce_qp *hr_qp = to_hr_qp(qp);

	ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof(cmd));

	if (!ret && (attr_mask & IBV_QP_STATE) &&
	    attr->qp_state == IBV_QPS_RESET) {
		hns_roce_v1_cq_clean(to_hr_cq(qp->recv_cq), qp->qp_num,
				     qp->srq ? to_hr_srq(qp->srq) : NULL);
		if (qp->send_cq != qp->recv_cq)
			hns_roce_v1_cq_clean(to_hr_cq(qp->send_cq), qp->qp_num,
					     NULL);

		hns_roce_init_qp_indices(to_hr_qp(qp));
	}

	if (!ret && (attr_mask & IBV_QP_PORT)) {
		hr_qp->port_num = attr->port_num;
		printf("hr_qp->port_num= 0x%x\n", hr_qp->port_num);
	}

	hr_qp->sl = attr->ah_attr.sl;

	return ret;
}
static int hns_roce_u_v1_destroy_qp(struct ibv_qp *ibqp)
{
	int ret;
	struct hns_roce_qp *qp = to_hr_qp(ibqp);

	pthread_mutex_lock(&to_hr_ctx(ibqp->context)->qp_table_mutex);
	ret = ibv_cmd_destroy_qp(ibqp);
	if (ret) {
		pthread_mutex_unlock(&to_hr_ctx(ibqp->context)->qp_table_mutex);
		return ret;
	}

	hns_roce_lock_cqs(ibqp);

	__hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), ibqp->qp_num,
			       ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);

	if (ibqp->send_cq != ibqp->recv_cq)
		__hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), ibqp->qp_num,
				       NULL);

	hns_roce_clear_qp(to_hr_ctx(ibqp->context), ibqp->qp_num);

	hns_roce_unlock_cqs(ibqp);
	pthread_mutex_unlock(&to_hr_ctx(ibqp->context)->qp_table_mutex);

	free(qp->sq.wrid);
	if (qp->rq.wqe_cnt)
		free(qp->rq.wrid);

	hns_roce_free_buf(&qp->buf);
	free(qp);

	return ret;
}
Exemple #3
0
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		       int attr_mask, struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	enum ib_qp_state cur_state, new_state;
	struct device *dev = hr_dev->dev;
	int ret = -EINVAL;
	int p;
	enum ib_mtu active_mtu;

	mutex_lock(&hr_qp->mutex);

	cur_state = attr_mask & IB_QP_CUR_STATE ?
		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
	new_state = attr_mask & IB_QP_STATE ?
		    attr->qp_state : cur_state;

	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
				IB_LINK_LAYER_ETHERNET)) {
		dev_err(dev, "ib_modify_qp_is_ok failed\n");
		goto out;
	}

	if ((attr_mask & IB_QP_PORT) &&
	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
		dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
			attr->port_num);
		goto out;
	}

	if (attr_mask & IB_QP_PKEY_INDEX) {
		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
			dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
				attr->pkey_index);
			goto out;
		}
	}

	if (attr_mask & IB_QP_PATH_MTU) {
		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
		active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);

		if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
		    attr->path_mtu > IB_MTU_4096) ||
		    (hr_dev->caps.max_mtu == IB_MTU_2048 &&
		    attr->path_mtu > IB_MTU_2048) ||
		    attr->path_mtu < IB_MTU_256 ||
		    attr->path_mtu > active_mtu) {
			dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
				attr->path_mtu);
			goto out;
		}
	}

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
		dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
			attr->max_rd_atomic);
		goto out;
	}

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
		dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
			attr->max_dest_rd_atomic);
		goto out;
	}

	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
		ret = 0;
		goto out;
	}

	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
				    new_state);

out:
	mutex_unlock(&hr_qp->mutex);

	return ret;
}
static int hns_roce_u_v1_post_recv(struct ibv_qp *ibvqp, struct ibv_recv_wr *wr,
				   struct ibv_recv_wr **bad_wr)
{
	int ret = 0;
	int nreq;
	int ind;
	struct ibv_sge *sg;
	struct hns_roce_rc_rq_wqe *rq_wqe;
	struct hns_roce_qp *qp = to_hr_qp(ibvqp);
	struct hns_roce_context *ctx = to_hr_ctx(ibvqp->context);

	pthread_spin_lock(&qp->rq.lock);

	/* check that state is OK to post receive */
	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
		if (hns_roce_wq_overflow(&qp->rq, nreq,
					 to_hr_cq(qp->ibv_qp.recv_cq))) {
			ret = -1;
			*bad_wr = wr;
			goto out;
		}

		if (wr->num_sge > qp->rq.max_gs) {
			ret = -1;
			*bad_wr = wr;
			goto out;
		}

		rq_wqe = get_recv_wqe(qp, ind);
		if (wr->num_sge > HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM) {
			ret = -1;
			*bad_wr = wr;
			goto out;
		}

		if (wr->num_sge == HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM) {
			roce_set_field(rq_wqe->u32_2,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_M,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_S,
				       HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM);
			sg = wr->sg_list;

			rq_wqe->va0 = htole64(sg->addr);
			rq_wqe->l_key0 = htole32(sg->lkey);
			rq_wqe->length0 = htole32(sg->length);

			sg = wr->sg_list + 1;

			rq_wqe->va1 = htole64(sg->addr);
			rq_wqe->l_key1 = htole32(sg->lkey);
			rq_wqe->length1 = htole32(sg->length);
		} else if (wr->num_sge == HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM - 1) {
			roce_set_field(rq_wqe->u32_2,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_M,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_S,
				       HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM - 1);
			sg = wr->sg_list;

			rq_wqe->va0 = htole64(sg->addr);
			rq_wqe->l_key0 = htole32(sg->lkey);
			rq_wqe->length0 = htole32(sg->length);

		} else if (wr->num_sge == HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM - 2) {
			roce_set_field(rq_wqe->u32_2,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_M,
				       RC_RQ_WQE_NUMBER_OF_DATA_SEG_S,
				       HNS_ROCE_RC_RQ_WQE_MAX_SGE_NUM - 2);
		}

		qp->rq.wrid[ind] = wr->wr_id;

		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
	}

out:
	if (nreq) {
		qp->rq.head += nreq;

		hns_roce_update_rq_head(ctx, qp->ibv_qp.qp_num,
				    qp->rq.head & ((qp->rq.wqe_cnt << 1) - 1));
	}

	pthread_spin_unlock(&qp->rq.lock);

	return ret;
}
static int hns_roce_u_v1_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
				   struct ibv_send_wr **bad_wr)
{
	unsigned int ind;
	void *wqe;
	int nreq;
	int ps_opcode, i;
	int ret = 0;
	struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
	struct hns_roce_wqe_data_seg *dseg = NULL;
	struct hns_roce_qp *qp = to_hr_qp(ibvqp);
	struct hns_roce_context *ctx = to_hr_ctx(ibvqp->context);

	pthread_spin_lock(&qp->sq.lock);

	/* check that state is OK to post send */
	ind = qp->sq.head;

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
		if (hns_roce_wq_overflow(&qp->sq, nreq,
					 to_hr_cq(qp->ibv_qp.send_cq))) {
			ret = -1;
			*bad_wr = wr;
			goto out;
		}
		if (wr->num_sge > qp->sq.max_gs) {
			ret = -1;
			*bad_wr = wr;
			printf("wr->num_sge(<=%d) = %d, check failed!\r\n",
				qp->sq.max_gs, wr->num_sge);
			goto out;
		}

		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
		memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));

		qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
		for (i = 0; i < wr->num_sge; i++)
			ctrl->msg_length = htole32(le32toh(ctrl->msg_length) +
						   wr->sg_list[i].length);

		ctrl->flag |= htole32(((wr->send_flags & IBV_SEND_SIGNALED) ?
				HNS_ROCE_WQE_CQ_NOTIFY : 0) |
			      (wr->send_flags & IBV_SEND_SOLICITED ?
				HNS_ROCE_WQE_SE : 0) |
			      ((wr->opcode == IBV_WR_SEND_WITH_IMM ||
			       wr->opcode == IBV_WR_RDMA_WRITE_WITH_IMM) ?
				HNS_ROCE_WQE_IMM : 0) |
			      (wr->send_flags & IBV_SEND_FENCE ?
				HNS_ROCE_WQE_FENCE : 0));

		if (wr->opcode == IBV_WR_SEND_WITH_IMM ||
		    wr->opcode == IBV_WR_RDMA_WRITE_WITH_IMM)
			ctrl->imm_data = htole32(be32toh(wr->imm_data));

		wqe += sizeof(struct hns_roce_wqe_ctrl_seg);

		/* set remote addr segment */
		switch (ibvqp->qp_type) {
		case IBV_QPT_RC:
			switch (wr->opcode) {
			case IBV_WR_RDMA_READ:
				ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
					      wr->wr.rdma.rkey);
				break;
			case IBV_WR_RDMA_WRITE:
			case IBV_WR_RDMA_WRITE_WITH_IMM:
				ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
					      wr->wr.rdma.rkey);
				break;
			case IBV_WR_SEND:
			case IBV_WR_SEND_WITH_IMM:
				ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
				break;
			case IBV_WR_ATOMIC_CMP_AND_SWP:
			case IBV_WR_ATOMIC_FETCH_AND_ADD:
			default:
				ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
				break;
			}
			ctrl->flag |= htole32(ps_opcode);
			wqe  += sizeof(struct hns_roce_wqe_raddr_seg);
			break;
		case IBV_QPT_UC:
		case IBV_QPT_UD:
		default:
			break;
		}

		dseg = wqe;

		/* Inline */
		if (wr->send_flags & IBV_SEND_INLINE && wr->num_sge) {
			if (le32toh(ctrl->msg_length) > qp->max_inline_data) {
				ret = -1;
				*bad_wr = wr;
				printf("inline data len(1-32)=%d, send_flags = 0x%x, check failed!\r\n",
					wr->send_flags, ctrl->msg_length);
				return ret;
			}

			for (i = 0; i < wr->num_sge; i++) {
				memcpy(wqe,
				     ((void *) (uintptr_t) wr->sg_list[i].addr),
				     wr->sg_list[i].length);
				wqe = wqe + wr->sg_list[i].length;
			}

			ctrl->flag |= htole32(HNS_ROCE_WQE_INLINE);
		} else {
			/* set sge */
			for (i = 0; i < wr->num_sge; i++)
				set_data_seg(dseg+i, wr->sg_list + i);

			ctrl->flag |=
			       htole32(wr->num_sge << HNS_ROCE_WQE_SGE_NUM_BIT);
		}

		ind++;
	}

out:
	/* Set DB return */
	if (likely(nreq)) {
		qp->sq.head += nreq;

		hns_roce_update_sq_head(ctx, qp->ibv_qp.qp_num,
				qp->port_num - 1, qp->sl,
				qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
	}

	pthread_spin_unlock(&qp->sq.lock);

	return ret;
}