Example #1
0
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
{
	struct c4iw_cq *chp;
	struct c4iw_qp *qhp;
	u32 cqid;

	spin_lock(&dev->lock);
	qhp = get_qhp(dev, CQE_QPID(err_cqe));
	if (!qhp) {
		printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
		       CQE_QPID(err_cqe),
		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
		       CQE_WRID_LOW(err_cqe));
		spin_unlock(&dev->lock);
		goto out;
	}

	if (SQ_TYPE(err_cqe))
		cqid = qhp->attr.scq;
	else
		cqid = qhp->attr.rcq;
	chp = get_chp(dev, cqid);
	if (!chp) {
		printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
		       cqid, CQE_QPID(err_cqe),
		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
		       CQE_WRID_LOW(err_cqe));
		spin_unlock(&dev->lock);
		goto out;
	}

	c4iw_qp_add_ref(&qhp->ibqp);
	atomic_inc(&chp->refcnt);
	spin_unlock(&dev->lock);

	/* Bad incoming write */
	if (RQ_TYPE(err_cqe) &&
	    (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
		goto done;
	}

	switch (CQE_STATUS(err_cqe)) {

	/* Completion Events */
	case T4_ERR_SUCCESS:
		printk(KERN_ERR MOD "AE with status 0!\n");
		break;

	case T4_ERR_STAG:
	case T4_ERR_PDID:
	case T4_ERR_QPID:
	case T4_ERR_ACCESS:
	case T4_ERR_WRAP:
	case T4_ERR_BOUND:
	case T4_ERR_INVALIDATE_SHARED_MR:
	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
		break;

	/* Device Fatal Errors */
	case T4_ERR_ECC:
	case T4_ERR_ECC_PSTAG:
	case T4_ERR_INTERNAL_ERR:
		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
		break;

	/* QP Fatal Errors */
	case T4_ERR_OUT_OF_RQE:
	case T4_ERR_PBL_ADDR_BOUND:
	case T4_ERR_CRC:
	case T4_ERR_MARKER:
	case T4_ERR_PDU_LEN_ERR:
	case T4_ERR_DDP_VERSION:
	case T4_ERR_RDMA_VERSION:
	case T4_ERR_OPCODE:
	case T4_ERR_DDP_QUEUE_NUM:
	case T4_ERR_MSN:
	case T4_ERR_TBIT:
	case T4_ERR_MO:
	case T4_ERR_MSN_GAP:
	case T4_ERR_MSN_RANGE:
	case T4_ERR_RQE_ADDR_BOUND:
	case T4_ERR_IRD_OVERFLOW:
		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
		break;

	default:
		printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
		       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
		break;
	}
done:
	if (atomic_dec_and_test(&chp->refcnt))
		wake_up(&chp->wait);
	c4iw_qp_rem_ref(&qhp->ibqp);
out:
	return;
}
Example #2
0
File: iwch_ev.c Project: 710leo/LVS
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
{
	struct iwch_dev *rnicp;
	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
	struct iwch_cq *chp;
	struct iwch_qp *qhp;
	u32 cqid = RSPQ_CQID(rsp_msg);

	rnicp = (struct iwch_dev *) rdev_p->ulp;
	spin_lock(&rnicp->lock);
	chp = get_chp(rnicp, cqid);
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
	if (!chp || !qhp) {
		printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
		       cqid, CQE_QPID(rsp_msg->cqe),
		       CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
		       CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
		       CQE_WRID_LOW(rsp_msg->cqe));
		spin_unlock(&rnicp->lock);
		goto out;
	}
	iwch_qp_add_ref(&qhp->ibqp);
	atomic_inc(&chp->refcnt);
	spin_unlock(&rnicp->lock);

	/*
	 * 1) completion of our sending a TERMINATE.
	 * 2) incoming TERMINATE message.
	 */
	if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
	    (CQE_STATUS(rsp_msg->cqe) == 0)) {
		if (SQ_TYPE(rsp_msg->cqe)) {
			PDBG("%s QPID 0x%x ep %p disconnecting\n",
			     __func__, qhp->wq.qpid, qhp->ep);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		} else {
			PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
			     qhp->wq.qpid);
			post_qp_event(rnicp, chp, rsp_msg,
				      IB_EVENT_QP_REQ_ERR, 0);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		}
		goto done;
	}

	/* Bad incoming Read request */
	if (SQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	/* Bad incoming write */
	if (RQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	switch (CQE_STATUS(rsp_msg->cqe)) {

	/* Completion Events */
	case TPT_ERR_SUCCESS:

		/*
		 * Confirm the destination entry if this is a RECV completion.
		 */
		if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
			dst_confirm(qhp->ep->dst);
		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
		break;

	case TPT_ERR_STAG:
	case TPT_ERR_PDID:
	case TPT_ERR_QPID:
	case TPT_ERR_ACCESS:
	case TPT_ERR_WRAP:
	case TPT_ERR_BOUND:
	case TPT_ERR_INVALIDATE_SHARED_MR:
	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
		break;

	/* Device Fatal Errors */
	case TPT_ERR_ECC:
	case TPT_ERR_ECC_PSTAG:
	case TPT_ERR_INTERNAL_ERR:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
		break;

	/* QP Fatal Errors */
	case TPT_ERR_OUT_OF_RQE:
	case TPT_ERR_PBL_ADDR_BOUND:
	case TPT_ERR_CRC:
	case TPT_ERR_MARKER:
	case TPT_ERR_PDU_LEN_ERR:
	case TPT_ERR_DDP_VERSION:
	case TPT_ERR_RDMA_VERSION:
	case TPT_ERR_OPCODE:
	case TPT_ERR_DDP_QUEUE_NUM:
	case TPT_ERR_MSN:
	case TPT_ERR_TBIT:
	case TPT_ERR_MO:
	case TPT_ERR_MSN_GAP:
	case TPT_ERR_MSN_RANGE:
	case TPT_ERR_RQE_ADDR_BOUND:
	case TPT_ERR_IRD_OVERFLOW:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;

	default:
		printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n",
		       CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;
	}
done:
	if (atomic_dec_and_test(&chp->refcnt))
	        wake_up(&chp->wait);
	iwch_qp_rem_ref(&qhp->ibqp);
out:
	dev_kfree_skb_irq(skb);
}