示例#1
0
/* called by the create qp verb */
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
		     struct ib_qp_init_attr *init, struct ib_udata *udata,
		     struct ib_pd *ibpd)
{
	int err;
	struct rxe_cq *rcq = to_rcq(init->recv_cq);
	struct rxe_cq *scq = to_rcq(init->send_cq);
	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;

	rxe_add_ref(pd);
	rxe_add_ref(rcq);
	rxe_add_ref(scq);
	if (srq)
		rxe_add_ref(srq);

	qp->pd			= pd;
	qp->rcq			= rcq;
	qp->scq			= scq;
	qp->srq			= srq;
	qp->udata		= udata;

	rxe_qp_init_misc(rxe, qp, init);

	err = rxe_qp_init_req(rxe, qp, init, context, udata);
	if (err)
		goto err1;

	err = rxe_qp_init_resp(rxe, qp, init, context, udata);
	if (err)
		goto err2;

	qp->attr.qp_state = IB_QPS_RESET;
	qp->valid = 1;

	return 0;

err2:
	rxe_queue_cleanup(qp->sq.queue);
err1:
	if (srq)
		rxe_drop_ref(srq);
	rxe_drop_ref(scq);
	rxe_drop_ref(rcq);
	rxe_drop_ref(pd);

	return err;
}
示例#2
0
文件: rxe_net.c 项目: avagin/linux
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
	int err;

	skb->destructor = rxe_skb_tx_dtor;
	skb->sk = pkt->qp->sk->sk;

	rxe_add_ref(pkt->qp);
	atomic_inc(&pkt->qp->skb_out);

	if (skb->protocol == htons(ETH_P_IP)) {
		err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
		err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
	} else {
		pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
		atomic_dec(&pkt->qp->skb_out);
		rxe_drop_ref(pkt->qp);
		kfree_skb(skb);
		return -EINVAL;
	}

	if (unlikely(net_xmit_eval(err))) {
		pr_debug("error sending packet: %d\n", err);
		return -EAGAIN;
	}

	return 0;
}
示例#3
0
文件: rxe_verbs.c 项目: avagin/linux
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
				     u64 start,
				     u64 length,
				     u64 iova,
				     int access, struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(ibpd->device);
	struct rxe_pd *pd = to_rpd(ibpd);
	struct rxe_mem *mr;

	mr = rxe_alloc(&rxe->mr_pool);
	if (!mr) {
		err = -ENOMEM;
		goto err2;
	}

	rxe_add_index(mr);

	rxe_add_ref(pd);

	err = rxe_mem_init_user(pd, start, length, iova,
				access, udata, mr);
	if (err)
		goto err3;

	return &mr->ibmr;

err3:
	rxe_drop_ref(pd);
	rxe_drop_index(mr);
	rxe_drop_ref(mr);
err2:
	return ERR_PTR(err);
}
示例#4
0
文件: rxe_verbs.c 项目: avagin/linux
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
				  u32 max_num_sg, struct ib_udata *udata)
{
	struct rxe_dev *rxe = to_rdev(ibpd->device);
	struct rxe_pd *pd = to_rpd(ibpd);
	struct rxe_mem *mr;
	int err;

	if (mr_type != IB_MR_TYPE_MEM_REG)
		return ERR_PTR(-EINVAL);

	mr = rxe_alloc(&rxe->mr_pool);
	if (!mr) {
		err = -ENOMEM;
		goto err1;
	}

	rxe_add_index(mr);

	rxe_add_ref(pd);

	err = rxe_mem_init_fast(pd, max_num_sg, mr);
	if (err)
		goto err2;

	return &mr->ibmr;

err2:
	rxe_drop_ref(pd);
	rxe_drop_index(mr);
	rxe_drop_ref(mr);
err1:
	return ERR_PTR(err);
}
示例#5
0
文件: rxe_verbs.c 项目: avagin/linux
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{
	struct rxe_dev *rxe = to_rdev(ibpd->device);
	struct rxe_pd *pd = to_rpd(ibpd);
	struct rxe_mem *mr;
	int err;

	mr = rxe_alloc(&rxe->mr_pool);
	if (!mr) {
		err = -ENOMEM;
		goto err1;
	}

	rxe_add_index(mr);

	rxe_add_ref(pd);

	err = rxe_mem_init_dma(pd, access, mr);
	if (err)
		goto err2;

	return &mr->ibmr;

err2:
	rxe_drop_ref(pd);
	rxe_drop_index(mr);
	rxe_drop_ref(mr);
err1:
	return ERR_PTR(err);
}
示例#6
0
static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
{
	int err;
	struct rxe_dev *rxe = to_rdev(ibpd->device);
	struct rxe_pd *pd = to_rpd(ibpd);
	struct rxe_ah *ah;

	err = rxe_av_chk_attr(rxe, attr);
	if (err)
		goto err1;

	ah = rxe_alloc(&rxe->ah_pool);
	if (!ah) {
		err = -ENOMEM;
		goto err1;
	}

	rxe_add_ref(pd);
	ah->pd = pd;

	err = rxe_init_av(rxe, attr, &ah->av);
	if (err)
		goto err2;

	return &ah->ibah;

err2:
	rxe_drop_ref(pd);
	rxe_drop_ref(ah);
err1:
	return ERR_PTR(err);
}
示例#7
0
文件: rxe_verbs.c 项目: avagin/linux
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
			  struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(ibsrq->device);
	struct rxe_pd *pd = to_rpd(ibsrq->pd);
	struct rxe_srq *srq = to_rsrq(ibsrq);
	struct rxe_create_srq_resp __user *uresp = NULL;

	if (udata) {
		if (udata->outlen < sizeof(*uresp))
			return -EINVAL;
		uresp = udata->outbuf;
	}

	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
	if (err)
		goto err1;

	err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
	if (err)
		goto err1;

	rxe_add_ref(pd);
	srq->pd = pd;

	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
	if (err)
		goto err2;

	return 0;

err2:
	rxe_drop_ref(pd);
	rxe_drop_ref(srq);
err1:
	return err;
}
示例#8
0
static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
				     struct ib_srq_init_attr *init,
				     struct ib_udata *udata)
{
	int err;
	struct rxe_dev *rxe = to_rdev(ibpd->device);
	struct rxe_pd *pd = to_rpd(ibpd);
	struct rxe_srq *srq;
	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;

	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
	if (err)
		goto err1;

	srq = rxe_alloc(&rxe->srq_pool);
	if (!srq) {
		err = -ENOMEM;
		goto err1;
	}

	rxe_add_index(srq);
	rxe_add_ref(pd);
	srq->pd = pd;

	err = rxe_srq_from_init(rxe, srq, init, context, udata);
	if (err)
		goto err2;

	return &srq->ibsrq;

err2:
	rxe_drop_ref(pd);
	rxe_drop_index(srq);
	rxe_drop_ref(srq);
err1:
	return ERR_PTR(err);
}
示例#9
0
文件: rxe_req.c 项目: lumag/linux
int rxe_requester(void *arg)
{
	struct rxe_qp *qp = (struct rxe_qp *)arg;
	struct rxe_pkt_info pkt;
	struct sk_buff *skb;
	struct rxe_send_wqe *wqe;
	enum rxe_hdr_mask mask;
	int payload;
	int mtu;
	int opcode;
	int ret;
	struct rxe_send_wqe rollback_wqe;
	u32 rollback_psn;

	rxe_add_ref(qp);

next_wqe:
	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
		goto exit;

	if (unlikely(qp->req.state == QP_STATE_RESET)) {
		qp->req.wqe_index = consumer_index(qp->sq.queue);
		qp->req.opcode = -1;
		qp->req.need_rd_atomic = 0;
		qp->req.wait_psn = 0;
		qp->req.need_retry = 0;
		goto exit;
	}

	if (unlikely(qp->req.need_retry)) {
		req_retry(qp);
		qp->req.need_retry = 0;
	}

	wqe = req_next_wqe(qp);
	if (unlikely(!wqe))
		goto exit;

	if (wqe->mask & WR_REG_MASK) {
		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
			struct rxe_mem *rmr;

			rmr = rxe_pool_get_index(&rxe->mr_pool,
						 wqe->wr.ex.invalidate_rkey >> 8);
			if (!rmr) {
				pr_err("No mr for key %#x\n",
				       wqe->wr.ex.invalidate_rkey);
				wqe->state = wqe_state_error;
				wqe->status = IB_WC_MW_BIND_ERR;
				goto exit;
			}
			rmr->state = RXE_MEM_STATE_FREE;
			rxe_drop_ref(rmr);
			wqe->state = wqe_state_done;
			wqe->status = IB_WC_SUCCESS;
		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
			struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);

			rmr->state = RXE_MEM_STATE_VALID;
			rmr->access = wqe->wr.wr.reg.access;
			rmr->lkey = wqe->wr.wr.reg.key;
			rmr->rkey = wqe->wr.wr.reg.key;
			wqe->state = wqe_state_done;
			wqe->status = IB_WC_SUCCESS;
		} else {
			goto exit;
		}
		qp->req.wqe_index = next_index(qp->sq.queue,
						qp->req.wqe_index);
		goto next_wqe;
	}
示例#10
0
int rxe_completer(void *arg)
{
	struct rxe_qp *qp = (struct rxe_qp *)arg;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	struct rxe_send_wqe *wqe = wqe;
	struct sk_buff *skb = NULL;
	struct rxe_pkt_info *pkt = NULL;
	enum comp_state state;

	rxe_add_ref(qp);

	if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
	    qp->req.state == QP_STATE_RESET) {
		rxe_drain_resp_pkts(qp, qp->valid &&
				    qp->req.state == QP_STATE_ERROR);
		goto exit;
	}

	if (qp->comp.timeout) {
		qp->comp.timeout_retry = 1;
		qp->comp.timeout = 0;
	} else {
		qp->comp.timeout_retry = 0;
	}

	if (qp->req.need_retry)
		goto exit;

	state = COMPST_GET_ACK;

	while (1) {
		pr_debug("qp#%d state = %s\n", qp_num(qp),
			 comp_state_name[state]);
		switch (state) {
		case COMPST_GET_ACK:
			skb = skb_dequeue(&qp->resp_pkts);
			if (skb) {
				pkt = SKB_TO_PKT(skb);
				qp->comp.timeout_retry = 0;
			}
			state = COMPST_GET_WQE;
			break;

		case COMPST_GET_WQE:
			state = get_wqe(qp, pkt, &wqe);
			break;

		case COMPST_CHECK_PSN:
			state = check_psn(qp, pkt, wqe);
			break;

		case COMPST_CHECK_ACK:
			state = check_ack(qp, pkt, wqe);
			break;

		case COMPST_READ:
			state = do_read(qp, pkt, wqe);
			break;

		case COMPST_ATOMIC:
			state = do_atomic(qp, pkt, wqe);
			break;

		case COMPST_WRITE_SEND:
			if (wqe->state == wqe_state_pending &&
			    wqe->last_psn == pkt->psn)
				state = COMPST_COMP_ACK;
			else
				state = COMPST_UPDATE_COMP;
			break;

		case COMPST_COMP_ACK:
			state = complete_ack(qp, pkt, wqe);
			break;

		case COMPST_COMP_WQE:
			state = complete_wqe(qp, pkt, wqe);
			break;

		case COMPST_UPDATE_COMP:
			if (pkt->mask & RXE_END_MASK)
				qp->comp.opcode = -1;
			else
				qp->comp.opcode = pkt->opcode;

			if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
				qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;

			if (qp->req.wait_psn) {
				qp->req.wait_psn = 0;
				rxe_run_task(&qp->req.task, 1);
			}

			state = COMPST_DONE;
			break;

		case COMPST_DONE:
			if (pkt) {
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
			}
			goto done;

		case COMPST_EXIT:
			if (qp->comp.timeout_retry && wqe) {
				state = COMPST_ERROR_RETRY;
				break;
			}

			/* re reset the timeout counter if
			 * (1) QP is type RC
			 * (2) the QP is alive
			 * (3) there is a packet sent by the requester that
			 *     might be acked (we still might get spurious
			 *     timeouts but try to keep them as few as possible)
			 * (4) the timeout parameter is set
			 */
			if ((qp_type(qp) == IB_QPT_RC) &&
			    (qp->req.state == QP_STATE_READY) &&
			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
			    qp->qp_timeout_jiffies)
				mod_timer(&qp->retrans_timer,
					  jiffies + qp->qp_timeout_jiffies);
			WARN_ON_ONCE(skb);
			goto exit;

		case COMPST_ERROR_RETRY:
			/* we come here if the retry timer fired and we did
			 * not receive a response packet. try to retry the send
			 * queue if that makes sense and the limits have not
			 * been exceeded. remember that some timeouts are
			 * spurious since we do not reset the timer but kick
			 * it down the road or let it expire
			 */

			/* there is nothing to retry in this case */
			if (!wqe || (wqe->state == wqe_state_posted)) {
				WARN_ON_ONCE(skb);
				goto exit;
			}

			if (qp->comp.retry_cnt > 0) {
				if (qp->comp.retry_cnt != 7)
					qp->comp.retry_cnt--;

				/* no point in retrying if we have already
				 * seen the last ack that the requester could
				 * have caused
				 */
				if (psn_compare(qp->req.psn,
						qp->comp.psn) > 0) {
					/* tell the requester to retry the
					 * send queue next time around
					 */
					rxe_counter_inc(rxe,
							RXE_CNT_COMP_RETRY);
					qp->req.need_retry = 1;
					rxe_run_task(&qp->req.task, 1);
				}

				if (pkt) {
					rxe_drop_ref(pkt->qp);
					kfree_skb(skb);
					skb = NULL;
				}

				WARN_ON_ONCE(skb);
				goto exit;

			} else {
				rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
				wqe->status = IB_WC_RETRY_EXC_ERR;
				state = COMPST_ERROR;
			}
			break;

		case COMPST_RNR_RETRY:
			if (qp->comp.rnr_retry > 0) {
				if (qp->comp.rnr_retry != 7)
					qp->comp.rnr_retry--;

				qp->req.need_retry = 1;
				pr_debug("qp#%d set rnr nak timer\n",
					 qp_num(qp));
				mod_timer(&qp->rnr_nak_timer,
					  jiffies + rnrnak_jiffies(aeth_syn(pkt)
						& ~AETH_TYPE_MASK));
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
				goto exit;
			} else {
				rxe_counter_inc(rxe,
						RXE_CNT_RNR_RETRY_EXCEEDED);
				wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
				state = COMPST_ERROR;
			}
			break;

		case COMPST_ERROR:
			WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
			do_complete(qp, wqe);
			rxe_qp_error(qp);

			if (pkt) {
				rxe_drop_ref(pkt->qp);
				kfree_skb(skb);
				skb = NULL;
			}

			WARN_ON_ONCE(skb);
			goto exit;
		}
	}

exit:
	/* we come here if we are done with processing and want the task to
	 * exit from the loop calling us
	 */
	WARN_ON_ONCE(skb);
	rxe_drop_ref(qp);
	return -EAGAIN;

done:
	/* we come here if we have processed a packet we want the task to call
	 * us again to see if there is anything else to do
	 */
	WARN_ON_ONCE(skb);
	rxe_drop_ref(qp);
	return 0;
}