/* * Get one cq entry from c4iw and map it to openib. * * Returns: * 0 cqe returned * -ENODATA EMPTY; * -EAGAIN caller must try again * any other -errno fatal error */ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) { struct c4iw_srq *srq = NULL; struct c4iw_qp *qhp = NULL; struct t4_cqe *rd_cqe; int ret; ret = t4_next_cqe(&chp->cq, &rd_cqe); if (ret) return ret; qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); if (qhp) { spin_lock(&qhp->lock); srq = qhp->srq; if (srq) spin_lock(&srq->lock); ret = __c4iw_poll_cq_one(chp, qhp, wc, srq); spin_unlock(&qhp->lock); if (srq) spin_unlock(&srq->lock); } else { ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL); } return ret; }
static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct respQ_msg_t *rsp_msg, enum ib_event_type ib_event, int send_term) { struct ib_event event; struct iwch_qp_attributes attrs; struct iwch_qp *qhp; spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); if (!qhp) { printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", __func__, CQE_STATUS(rsp_msg->cqe), CQE_QPID(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { PDBG("%s AE received after RTS - " "qp state %d qpid 0x%x status 0x%x\n", __func__, qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__, CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); atomic_inc(&qhp->refcnt); spin_unlock(&rnicp->lock); event.event = ib_event; event.device = chp->ibcq.device; if (ib_event == IB_EVENT_CQ_ERR) event.element.cq = &chp->ibcq; else event.element.qp = &qhp->ibqp; if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); if (qhp->attr.state == IWCH_QP_STATE_RTS) { attrs.next_state = IWCH_QP_STATE_TERMINATE; iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); if (send_term) iwch_post_terminate(qhp, rsp_msg); } if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); }
static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct respQ_msg_t *rsp_msg, enum ib_event_type ib_event, int send_term) { struct ib_event event; struct iwch_qp_attributes attrs; struct iwch_qp *qhp; spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); if (!qhp) { printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", __func__, CQE_STATUS(rsp_msg->cqe), CQE_QPID(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { PDBG("%s AE received after RTS - " "qp state %d qpid 0x%x status 0x%x\n", __func__, qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__, CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); atomic_inc(&qhp->refcnt); spin_unlock(&rnicp->lock); <<<<<<< HEAD
/* * Get one cq entry from c4iw and map it to openib. * * Returns: * 0 cqe returned * -ENODATA EMPTY; * -EAGAIN caller must try again * any other -errno fatal error */ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) { struct c4iw_qp *qhp = NULL; struct t4_cqe cqe = {0, 0}, *rd_cqe; struct t4_wq *wq; u32 credit = 0; u8 cqe_flushed; u64 cookie = 0; int ret; ret = t4_next_cqe(&chp->cq, &rd_cqe); if (ret) return ret; qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); if (!qhp) wq = NULL; else { spin_lock(&qhp->lock); wq = &(qhp->wq); } ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); if (ret) goto out; wc->wr_id = cookie; wc->qp = &qhp->ibqp; wc->vendor_err = CQE_STATUS(&cqe); wc->wc_flags = 0; PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x " "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); if (CQE_TYPE(&cqe) == 0) { if (!CQE_STATUS(&cqe)) wc->byte_len = CQE_LEN(&cqe); else wc->byte_len = 0; wc->opcode = IB_WC_RECV; if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV || CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); wc->wc_flags |= IB_WC_WITH_INVALIDATE; } } else { switch (CQE_OPCODE(&cqe)) { case FW_RI_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case FW_RI_READ_REQ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = CQE_LEN(&cqe); break; case FW_RI_SEND_WITH_INV: case FW_RI_SEND_WITH_SE_INV: wc->opcode = IB_WC_SEND; wc->wc_flags |= IB_WC_WITH_INVALIDATE; break; case FW_RI_SEND: case FW_RI_SEND_WITH_SE: wc->opcode = IB_WC_SEND; break; case FW_RI_BIND_MW: wc->opcode = IB_WC_BIND_MW; break; case FW_RI_LOCAL_INV: wc->opcode = IB_WC_LOCAL_INV; break; case FW_RI_FAST_REGISTER: wc->opcode = IB_WC_FAST_REG_MR; break; default: printk(KERN_ERR MOD "Unexpected opcode %d " "in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); ret = -EINVAL; goto out; } } if (cqe_flushed) wc->status = IB_WC_WR_FLUSH_ERR; else { switch (CQE_STATUS(&cqe)) { case T4_ERR_SUCCESS: wc->status = IB_WC_SUCCESS; break; case T4_ERR_STAG: wc->status = IB_WC_LOC_ACCESS_ERR; break; case T4_ERR_PDID: wc->status = IB_WC_LOC_PROT_ERR; break; case T4_ERR_QPID: case T4_ERR_ACCESS: wc->status = IB_WC_LOC_ACCESS_ERR; break; case T4_ERR_WRAP: wc->status = IB_WC_GENERAL_ERR; break; case T4_ERR_BOUND: wc->status = IB_WC_LOC_LEN_ERR; break; case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: wc->status = IB_WC_MW_BIND_ERR; break; case T4_ERR_CRC: case T4_ERR_MARKER: case T4_ERR_PDU_LEN_ERR: case T4_ERR_OUT_OF_RQE: case T4_ERR_DDP_VERSION: case T4_ERR_RDMA_VERSION: case T4_ERR_DDP_QUEUE_NUM: case T4_ERR_MSN: case T4_ERR_TBIT: case T4_ERR_MO: case T4_ERR_MSN_RANGE: case T4_ERR_IRD_OVERFLOW: case T4_ERR_OPCODE: case T4_ERR_INTERNAL_ERR: wc->status = IB_WC_FATAL_ERR; break; case T4_ERR_SWFLUSH: wc->status = IB_WC_WR_FLUSH_ERR; break; default: printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for QPID=0x%0x\n", CQE_STATUS(&cqe), CQE_QPID(&cqe)); ret = -EINVAL; } } out: if (wq) spin_unlock(&qhp->lock); return ret; }
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) { struct c4iw_cq *chp; struct c4iw_qp *qhp; u32 cqid; spin_lock(&dev->lock); qhp = get_qhp(dev, CQE_QPID(err_cqe)); if (!qhp) { printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); spin_unlock(&dev->lock); goto out; } if (SQ_TYPE(err_cqe)) cqid = qhp->attr.scq; else cqid = qhp->attr.rcq; chp = get_chp(dev, cqid); if (!chp) { printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", cqid, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); spin_unlock(&dev->lock); goto out; } c4iw_qp_add_ref(&qhp->ibqp); atomic_inc(&chp->refcnt); spin_unlock(&dev->lock); /* Bad incoming write */ if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) { post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR); goto done; } switch (CQE_STATUS(err_cqe)) { /* Completion Events */ case T4_ERR_SUCCESS: printk(KERN_ERR MOD "AE with status 0!\n"); break; case T4_ERR_STAG: case T4_ERR_PDID: case T4_ERR_QPID: case T4_ERR_ACCESS: case T4_ERR_WRAP: case T4_ERR_BOUND: case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR); break; /* Device Fatal Errors */ case T4_ERR_ECC: case T4_ERR_ECC_PSTAG: case T4_ERR_INTERNAL_ERR: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL); break; /* QP Fatal Errors */ case T4_ERR_OUT_OF_RQE: case T4_ERR_PBL_ADDR_BOUND: case T4_ERR_CRC: case T4_ERR_MARKER: case T4_ERR_PDU_LEN_ERR: case T4_ERR_DDP_VERSION: case T4_ERR_RDMA_VERSION: case T4_ERR_OPCODE: case T4_ERR_DDP_QUEUE_NUM: case T4_ERR_MSN: case T4_ERR_TBIT: case T4_ERR_MO: case T4_ERR_MSN_GAP: case T4_ERR_MSN_RANGE: case T4_ERR_RQE_ADDR_BOUND: case T4_ERR_IRD_OVERFLOW: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; default: printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n", CQE_STATUS(err_cqe), qhp->wq.sq.qid); post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; } done: if (atomic_dec_and_test(&chp->refcnt)) wake_up(&chp->wait); c4iw_qp_rem_ref(&qhp->ibqp); out: return; }
/* * Get one cq entry from cxio and map it to openib. * * Returns: * 0 cqe returned * -ENOBUFS EMPTY; * -EAGAIN caller must try again * any other neg errno fatal error */ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp, struct ib_wc *wc) { struct iwch_qp *qhp = NULL; struct t3_cqe cqe, *rd_cqe; struct t3_wq *wq; u32 credit = 0; u8 cqe_flushed; u64 cookie; int ret = 1; rd_cqe = cxio_next_cqe(&chp->cq); if (!rd_cqe) return 0; qhp = get_qhp(rhp, CQE_QPID(*rd_cqe)); if (!qhp) wq = NULL; else { mtx_lock(&qhp->lock); wq = &(qhp->wq); } ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); if (t3a_device(chp->rhp) && credit) { CTR3(KTR_IW_CXGB, "%s updating %d cq credits on id %d", __FUNCTION__, credit, chp->cq.cqid); cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); } if (ret) { ret = -EAGAIN; goto out; } ret = 1; wc->wr_id = cookie; wc->qp = &qhp->ibqp; wc->vendor_err = CQE_STATUS(cqe); CTR4(KTR_IW_CXGB, "iwch_poll_cq_one qpid 0x%x type %d opcode %d status 0x%x", CQE_QPID(cqe), CQE_TYPE(cqe), CQE_OPCODE(cqe), CQE_STATUS(cqe)); CTR3(KTR_IW_CXGB, "wrid hi 0x%x lo 0x%x cookie 0x%llx", CQE_WRID_HI(cqe), CQE_WRID_LOW(cqe), (unsigned long long) cookie); if (CQE_TYPE(cqe) == 0) { if (!CQE_STATUS(cqe)) wc->byte_len = CQE_LEN(cqe); else wc->byte_len = 0; wc->opcode = IB_WC_RECV; } else { switch (CQE_OPCODE(cqe)) { case T3_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case T3_READ_REQ: wc->opcode = IB_WC_RDMA_READ; wc->byte_len = CQE_LEN(cqe); break; case T3_SEND: case T3_SEND_WITH_SE: wc->opcode = IB_WC_SEND; break; case T3_BIND_MW: wc->opcode = IB_WC_BIND_MW; break; /* these aren't supported yet */ case T3_SEND_WITH_INV: case T3_SEND_WITH_SE_INV: case T3_LOCAL_INV: case T3_FAST_REGISTER: default: log(LOG_ERR, "Unexpected opcode %d " "in the CQE received for QPID=0x%0x\n", CQE_OPCODE(cqe), CQE_QPID(cqe)); ret = -EINVAL; goto out; } } if (cqe_flushed) wc->status = IB_WC_WR_FLUSH_ERR; else { switch (CQE_STATUS(cqe)) { case TPT_ERR_SUCCESS: wc->status = IB_WC_SUCCESS; break; case TPT_ERR_STAG: wc->status = IB_WC_LOC_ACCESS_ERR; break; case TPT_ERR_PDID: wc->status = IB_WC_LOC_PROT_ERR; break; case TPT_ERR_QPID: case TPT_ERR_ACCESS: wc->status = IB_WC_LOC_ACCESS_ERR; break; case TPT_ERR_WRAP: wc->status = IB_WC_GENERAL_ERR; break; case TPT_ERR_BOUND: wc->status = IB_WC_LOC_LEN_ERR; break; case TPT_ERR_INVALIDATE_SHARED_MR: case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: wc->status = IB_WC_MW_BIND_ERR; break; case TPT_ERR_CRC: case TPT_ERR_MARKER: case TPT_ERR_PDU_LEN_ERR: case TPT_ERR_OUT_OF_RQE: case TPT_ERR_DDP_VERSION: case TPT_ERR_RDMA_VERSION: case TPT_ERR_DDP_QUEUE_NUM: case TPT_ERR_MSN: case TPT_ERR_TBIT: case TPT_ERR_MO: case TPT_ERR_MSN_RANGE: case TPT_ERR_IRD_OVERFLOW: case TPT_ERR_OPCODE: wc->status = IB_WC_FATAL_ERR; break; case TPT_ERR_SWFLUSH: wc->status = IB_WC_WR_FLUSH_ERR; break; default: log(LOG_ERR, "Unexpected cqe_status 0x%x for " "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe)); ret = -EINVAL; } } out: if (wq) mtx_unlock(&qhp->lock); return ret; }
/* * Move all CQEs from the HWCQ into the SWCQ. * Deal with out-of-order and/or completions that complete * prior unsignalled WRs. */ void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp) { struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct c4iw_qp *qhp; struct t4_swsqe *swsqe; int ret; pr_debug("cqid 0x%x\n", chp->cq.cqid); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); /* * This logic is similar to poll_cq(), but not quite the same * unfortunately. Need to move pertinent HW CQEs to the SW CQ but * also do any translation magic that poll_cq() normally does. */ while (!ret) { qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); /* * drop CQEs with no associated QP */ if (qhp == NULL) goto next_cqe; if (flush_qhp != qhp) { spin_lock(&qhp->lock); if (qhp->wq.flushed == 1) goto next_cqe; } if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) goto next_cqe; if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { /* If we have reached here because of async * event or other error, and have egress error * then drop */ if (CQE_TYPE(hw_cqe) == 1) goto next_cqe; /* drop peer2peer RTR reads. */ if (CQE_WRID_STAG(hw_cqe) == 1) goto next_cqe; /* * Eat completions for unsignaled read WRs. */ if (!qhp->wq.sq.oldest_read->signaled) { advance_oldest_read(&qhp->wq); goto next_cqe; } /* * Don't write to the HWCQ, create a new read req CQE * in local memory and move it into the swcq. */ create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); hw_cqe = &read_cqe; advance_oldest_read(&qhp->wq); } /* if its a SQ completion, then do the magic to move all the * unsignaled and now in-order completions into the swcq. */ if (SQ_TYPE(hw_cqe)) { swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe->cqe = *hw_cqe; swsqe->complete = 1; flush_completed_wrs(&qhp->wq, &chp->cq); } else { swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; *swcqe = *hw_cqe; swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); t4_swcq_produce(&chp->cq); } next_cqe: t4_hwcq_consume(&chp->cq); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); if (qhp && flush_qhp != qhp) spin_unlock(&qhp->lock); } }
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) { struct iwch_dev *rnicp; struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; struct iwch_cq *chp; struct iwch_qp *qhp; u32 cqid = RSPQ_CQID(rsp_msg); rnicp = (struct iwch_dev *) rdev_p->ulp; spin_lock(&rnicp->lock); chp = get_chp(rnicp, cqid); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); if (!chp || !qhp) { printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n", cqid, CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); spin_unlock(&rnicp->lock); goto out; } iwch_qp_add_ref(&qhp->ibqp); atomic_inc(&chp->refcnt); spin_unlock(&rnicp->lock); /* * 1) completion of our sending a TERMINATE. * 2) incoming TERMINATE message. */ if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) && (CQE_STATUS(rsp_msg->cqe) == 0)) { if (SQ_TYPE(rsp_msg->cqe)) { PDBG("%s QPID 0x%x ep %p disconnecting\n", __func__, qhp->wq.qpid, qhp->ep); iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); } else { PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__, qhp->wq.qpid); post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 0); iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); } goto done; } /* Bad incoming Read request */ if (SQ_TYPE(rsp_msg->cqe) && (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) { post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); goto done; } /* Bad incoming write */ if (RQ_TYPE(rsp_msg->cqe) && (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) { post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); goto done; } switch (CQE_STATUS(rsp_msg->cqe)) { /* Completion Events */ case TPT_ERR_SUCCESS: /* * Confirm the destination entry if this is a RECV completion. */ if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) dst_confirm(qhp->ep->dst); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); break; case TPT_ERR_STAG: case TPT_ERR_PDID: case TPT_ERR_QPID: case TPT_ERR_ACCESS: case TPT_ERR_WRAP: case TPT_ERR_BOUND: case TPT_ERR_INVALIDATE_SHARED_MR: case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); break; /* Device Fatal Errors */ case TPT_ERR_ECC: case TPT_ERR_ECC_PSTAG: case TPT_ERR_INTERNAL_ERR: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1); break; /* QP Fatal Errors */ case TPT_ERR_OUT_OF_RQE: case TPT_ERR_PBL_ADDR_BOUND: case TPT_ERR_CRC: case TPT_ERR_MARKER: case TPT_ERR_PDU_LEN_ERR: case TPT_ERR_DDP_VERSION: case TPT_ERR_RDMA_VERSION: case TPT_ERR_OPCODE: case TPT_ERR_DDP_QUEUE_NUM: case TPT_ERR_MSN: case TPT_ERR_TBIT: case TPT_ERR_MO: case TPT_ERR_MSN_GAP: case TPT_ERR_MSN_RANGE: case TPT_ERR_RQE_ADDR_BOUND: case TPT_ERR_IRD_OVERFLOW: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); break; default: printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n", CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); break; } done: if (atomic_dec_and_test(&chp->refcnt)) wake_up(&chp->wait); iwch_qp_rem_ref(&qhp->ibqp); out: dev_kfree_skb_irq(skb); }