static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) { unsigned n; if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_put_ss(&qp->r_sge); if (clr_sends) { while (qp->s_last != qp->s_head) { struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); unsigned i; for (i = 0; i < wqe->wr.num_sge; i++) { struct hfi1_sge *sge = &wqe->sg_list[i]; hfi1_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); if (++qp->s_last >= qp->s_size) qp->s_last = 0; } if (qp->s_rdma_mr) { hfi1_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } } if (qp->ibqp.qp_type != IB_QPT_RC) return; for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { struct hfi1_ack_entry *e = &qp->s_ack_queue[n]; if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && e->rdma_sge.mr) { hfi1_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } } }
/* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct hfi1_lkey_table *rkt; struct hfi1_pd *pd; struct hfi1_sge_state *ss; rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } ss->num_sge = j; ss->total_len = qp->r_len; ret = 1; goto bail; bad_lkey: while (j) { struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; hfi1_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; }
/** * hfi1_error_qp - put a QP into the error state * @qp: the QP to put into the error state * @err: the receive completion error to signal if a RWQE is active * * Flushes both send and receive work queues. * Returns true if last WQE event should be generated. * The QP r_lock and s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) { struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct ib_wc wc; int ret = 0; if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) goto bail; qp->state = IB_QPS_ERR; if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); del_timer(&qp->s_timer); } if (qp->s_flags & HFI1_S_ANY_WAIT_SEND) qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND; write_seqlock(&dev->iowait_lock); if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; list_del_init(&qp->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } write_sequnlock(&dev->iowait_lock); if (!(qp->s_flags & HFI1_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { hfi1_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } flush_tx_list(qp); } /* Schedule the sending tasklet to drain the send work queue. */ if (qp->s_last != qp->s_head) hfi1_schedule_send(qp); clear_mr_refs(qp, 0); memset(&wc, 0, sizeof(wc)); wc.qp = &qp->ibqp; wc.opcode = IB_WC_RECV; if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) { wc.wr_id = qp->r_wr_id; wc.status = err; hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wc.status = IB_WC_WR_FLUSH_ERR; if (qp->r_rq.wq) { struct hfi1_rwq *wq; u32 head; u32 tail; spin_lock(&qp->r_rq.lock); /* sanity check pointers before trusting them */ wq = qp->r_rq.wq; head = wq->head; if (head >= qp->r_rq.size) head = 0; tail = wq->tail; if (tail >= qp->r_rq.size) tail = 0; while (tail != head) { wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; if (++tail >= qp->r_rq.size) tail = 0; hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wq->tail = tail; spin_unlock(&qp->r_rq.lock); } else if (qp->ibqp.event_handler) ret = 1; bail: return ret; }