void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); ipath_dbg("Send queue error on QP%d/%d: err: %d\n", qp->ibqp.qp_num, qp->remote_qpn, wc->status); spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); if (++qp->s_last >= qp->s_size) qp->s_last = 0; wc->status = IB_WC_WR_FLUSH_ERR; while (qp->s_last != qp->s_head) { wqe = get_swqe_ptr(qp, qp->s_last); wc->wr_id = wqe->wr.wr_id; wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); if (++qp->s_last >= qp->s_size) qp->s_last = 0; } qp->s_cur = qp->s_tail = qp->s_head; qp->state = IB_QPS_SQE; }
static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) { unsigned n; if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_put_ss(&qp->r_sge); if (clr_sends) { while (qp->s_last != qp->s_head) { struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); unsigned i; for (i = 0; i < wqe->wr.num_sge; i++) { struct hfi1_sge *sge = &wqe->sg_list[i]; hfi1_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); if (++qp->s_last >= qp->s_size) qp->s_last = 0; } if (qp->s_rdma_mr) { hfi1_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } } if (qp->ibqp.qp_type != IB_QPT_RC) return; for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { struct hfi1_ack_entry *e = &qp->s_ack_queue[n]; if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && e->rdma_sge.mr) { hfi1_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } } }
/** * ipath_ruc_loopback - handle UC and RC lookback requests * @sqp: the sending QP * * This is called from ipath_do_send() to * forward a WQE addressed to the same HCA. * Note that although we are single threaded due to the tasklet, we still * have to protect against post_send(). We don't have to worry about * receive interrupts since this is a connected protocol and all packets * will pass through here. */ static void ipath_ruc_loopback(struct ipath_qp *sqp) { struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); struct ipath_qp *qp; struct ipath_swqe *wqe; struct ipath_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; atomic64_t *maddr; enum ib_wc_status send_status; /* * Note that we check the responder QP state after * checking the requester's state. */ qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= IPATH_S_BUSY; again: if (sqp->s_last == sqp->s_head) goto clr_busy; wqe = get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work reqeust. */ if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; goto flush_send; } /* * We can rely on the entry not changing without the s_lock * being held until we update s_last. * We increment s_cur to indicate s_last is in progress. */ if (sqp->s_last == sqp->s_cur) { if (++sqp->s_cur >= sqp->s_size) sqp->s_cur = 0; } spin_unlock_irqrestore(&sqp->s_lock, flags); if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { dev->n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. */ if (sqp->ibqp.qp_type == IB_QPT_RC) send_status = IB_WC_RETRY_EXC_ERR; else send_status = IB_WC_SUCCESS; goto serr; } memset(&wc, 0, sizeof wc); send_status = IB_WC_SUCCESS; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_IMM: wc.wc_flags = IB_WC_WITH_IMM; wc.imm_data = wqe->wr.ex.imm_data; /* FALLTHROUGH */ case IB_WR_SEND: if (!ipath_get_rwqe(qp, 0)) goto rnr_nak; break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.imm_data = wqe->wr.ex.imm_data; if (!ipath_get_rwqe(qp, 1)) goto rnr_nak; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; if (wqe->length == 0) break; if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_WRITE))) goto acc_err; break; case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), wqe->wr.wr.atomic.remote_addr, wqe->wr.wr.atomic.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; sdata = wqe->wr.wr.atomic.compare_add; *(u64 *) sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); goto send_comp; default: send_status = IB_WC_LOC_QP_OP_ERR; goto serr; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = sqp->s_len; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); ipath_copy_sge(&qp->r_sge, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } sqp->s_len -= len; } if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; ipath_send_complete(sqp, wqe, send_status); goto again; rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. */ if (sqp->s_rnr_retry == 0) { send_status = IB_WC_RNR_RETRY_EXC_ERR; goto serr; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK)) goto clr_busy; sqp->s_flags |= IPATH_S_WAITING; dev->n_rnr_naks++; sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer]; ipath_insert_rnr_queue(sqp); goto clr_busy; inv_err: send_status = IB_WC_REM_INV_REQ_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; acc_err: send_status = IB_WC_REM_ACCESS_ERR; wc.status = IB_WC_LOC_PROT_ERR; err: /* responder goes to error state */ ipath_rc_error(qp, wc.status); serr: spin_lock_irqsave(&sqp->s_lock, flags); ipath_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR); sqp->s_flags &= ~IPATH_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = sqp->ibqp.device; ev.element.qp = &sqp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); } goto done; } clr_busy: sqp->s_flags &= ~IPATH_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: if (qp && atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); }
/** * ipath_post_rc_send - post RC and UC sends * @qp: the QP to post on * @wr: the work request to send */ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) { struct ipath_swqe *wqe; unsigned long flags; u32 next; int i, j; int acc; int ret; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) { ret = -EINVAL; goto bail; } } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { ret = -EINVAL; goto bail; } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) { ret = -EINVAL; goto bail; } /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) { ret = -ENOMEM; goto bail; } spin_lock_irqsave(&qp->s_lock, flags); next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->ssn = qp->s_ssn++; wqe->sg_list[0].mr = NULL; wqe->sg_list[0].vaddr = NULL; wqe->sg_list[0].length = 0; wqe->sg_list[0].sge_length = 0; wqe->length = 0; acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0, j = 0; i < wr->num_sge; i++) { if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } if (wr->sg_list[i].length == 0) continue; if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table, &wqe->sg_list[j], &wr->sg_list[i], acc)) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } wqe->length += wr->sg_list[i].length; j++; } wqe->wr.num_sge = j; qp->s_head = next; spin_unlock_irqrestore(&qp->s_lock, flags); if (qp->ibqp.qp_type == IB_QPT_UC) ipath_do_uc_send((unsigned long) qp); else ipath_do_rc_send((unsigned long) qp); ret = 0; bail: return ret; }
/** * ipath_ruc_loopback - handle UC and RC lookback requests * @sqp: the loopback QP * @wc: the work completion entry * * This is called from ipath_do_uc_send() or ipath_do_rc_send() to * forward a WQE addressed to the same HCA. * Note that although we are single threaded due to the tasklet, we still * have to protect against post_send(). We don't have to worry about * receive interrupts since this is a connected protocol and all packets * will pass through here. */ void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc) { struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); struct ipath_qp *qp; struct ipath_swqe *wqe; struct ipath_sge *sge; unsigned long flags; u64 sdata; qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); if (!qp) { dev->n_pkt_drops++; return; } again: spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) { spin_unlock_irqrestore(&sqp->s_lock, flags); goto done; } /* Get the next send request. */ if (sqp->s_last == sqp->s_head) { /* Send work queue is empty. */ spin_unlock_irqrestore(&sqp->s_lock, flags); goto done; } /* * We can rely on the entry not changing without the s_lock * being held until we update s_last. */ wqe = get_swqe_ptr(sqp, sqp->s_last); spin_unlock_irqrestore(&sqp->s_lock, flags); wc->wc_flags = 0; wc->imm_data = 0; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_IMM: wc->wc_flags = IB_WC_WITH_IMM; wc->imm_data = wqe->wr.imm_data; /* FALLTHROUGH */ case IB_WR_SEND: spin_lock_irqsave(&qp->r_rq.lock, flags); if (!ipath_get_rwqe(qp, 0)) { rnr_nak: spin_unlock_irqrestore(&qp->r_rq.lock, flags); /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; if (sqp->s_rnr_retry == 0) { wc->status = IB_WC_RNR_RETRY_EXC_ERR; goto err; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; dev->n_rnr_naks++; sqp->s_rnr_timeout = ib_ipath_rnr_table[sqp->s_min_rnr_timer]; ipath_insert_rnr_queue(sqp); goto done; } spin_unlock_irqrestore(&qp->r_rq.lock, flags); break; case IB_WR_RDMA_WRITE_WITH_IMM: wc->wc_flags = IB_WC_WITH_IMM; wc->imm_data = wqe->wr.imm_data; spin_lock_irqsave(&qp->r_rq.lock, flags); if (!ipath_get_rwqe(qp, 1)) goto rnr_nak; spin_unlock_irqrestore(&qp->r_rq.lock, flags); /* FALLTHROUGH */ case IB_WR_RDMA_WRITE: if (wqe->length == 0) break; if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_WRITE))) { acc_err: wc->status = IB_WC_REM_ACCESS_ERR; err: wc->wr_id = wqe->wr.wr_id; wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; wc->vendor_err = 0; wc->byte_len = 0; wc->qp_num = sqp->ibqp.qp_num; wc->src_qp = sqp->remote_qpn; wc->pkey_index = 0; wc->slid = sqp->remote_ah_attr.dlid; wc->sl = sqp->remote_ah_attr.sl; wc->dlid_path_bits = 0; wc->port_num = 0; ipath_sqerror_qp(sqp, wc); goto done; } break; case IB_WR_RDMA_READ: if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto acc_err; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64), wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ sdata = wqe->wr.wr.atomic.swap; spin_lock_irqsave(&dev->pending_lock, flags); qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) *(u64 *) qp->r_sge.sge.vaddr = qp->r_atomic_data + sdata; else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add) *(u64 *) qp->r_sge.sge.vaddr = sdata; spin_unlock_irqrestore(&dev->pending_lock, flags); *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data; goto send_comp; default: goto done; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = sqp->s_len; if (len > sge->length) len = sge->length; BUG_ON(len == 0); ipath_copy_sge(&qp->r_sge, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } sqp->s_len -= len; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE || wqe->wr.opcode == IB_WR_RDMA_READ) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc->opcode = IB_WC_RECV; wc->wr_id = qp->r_wr_id; wc->status = IB_WC_SUCCESS; wc->vendor_err = 0; wc->byte_len = wqe->length; wc->qp_num = qp->ibqp.qp_num; wc->src_qp = qp->remote_qpn; /* XXX do we know which pkey matched? Only needed for GSI. */ wc->pkey_index = 0; wc->slid = qp->remote_ah_attr.dlid; wc->sl = qp->remote_ah_attr.sl; wc->dlid_path_bits = 0; /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { wc->wr_id = wqe->wr.wr_id; wc->status = IB_WC_SUCCESS; wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; wc->vendor_err = 0; wc->byte_len = wqe->length; wc->qp_num = sqp->ibqp.qp_num; wc->src_qp = 0; wc->pkey_index = 0; wc->slid = 0; wc->sl = 0; wc->dlid_path_bits = 0; wc->port_num = 0; ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0); } /* Update s_last now that we are finished with the SWQE */ spin_lock_irqsave(&sqp->s_lock, flags); if (++sqp->s_last >= sqp->s_size) sqp->s_last = 0; spin_unlock_irqrestore(&sqp->s_lock, flags); goto again; done: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); }
static void ipath_ruc_loopback(struct ipath_qp *sqp) { struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); struct ipath_qp *qp; struct ipath_swqe *wqe; struct ipath_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; atomic64_t *maddr; enum ib_wc_status send_status; qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= IPATH_S_BUSY; again: if (sqp->s_last == sqp->s_head) goto clr_busy; wqe = get_swqe_ptr(sqp, sqp->s_last); if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND)) goto clr_busy; send_status = IB_WC_WR_FLUSH_ERR; goto flush_send; } if (sqp->s_last == sqp->s_cur) { if (++sqp->s_cur >= sqp->s_size) sqp->s_cur = 0; } spin_unlock_irqrestore(&sqp->s_lock, flags); if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { dev->n_pkt_drops++; if (sqp->ibqp.qp_type == IB_QPT_RC) send_status = IB_WC_RETRY_EXC_ERR; else send_status = IB_WC_SUCCESS; goto serr; } memset(&wc, 0, sizeof wc); send_status = IB_WC_SUCCESS; sqp->s_sge.sge = wqe->sg_list[0]; sqp->s_sge.sg_list = wqe->sg_list + 1; sqp->s_sge.num_sge = wqe->wr.num_sge; sqp->s_len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND_WITH_IMM: wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; case IB_WR_SEND: if (!ipath_get_rwqe(qp, 0)) goto rnr_nak; break; case IB_WR_RDMA_WRITE_WITH_IMM: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; if (!ipath_get_rwqe(qp, 1)) goto rnr_nak; case IB_WR_RDMA_WRITE: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; if (wqe->length == 0) break; if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_WRITE))) goto acc_err; break; case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, wqe->wr.wr.rdma.remote_addr, wqe->wr.wr.rdma.rkey, IB_ACCESS_REMOTE_READ))) goto acc_err; qp->r_sge.sge = wqe->sg_list[0]; qp->r_sge.sg_list = wqe->sg_list + 1; qp->r_sge.num_sge = wqe->wr.num_sge; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), wqe->wr.wr.atomic.remote_addr, wqe->wr.wr.atomic.rkey, IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; maddr = (atomic64_t *) qp->r_sge.sge.vaddr; sdata = wqe->wr.wr.atomic.compare_add; *(u64 *) sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); goto send_comp; default: send_status = IB_WC_LOC_QP_OP_ERR; goto serr; } sge = &sqp->s_sge.sge; while (sqp->s_len) { u32 len = sqp->s_len; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); ipath_copy_sge(&qp->r_sge, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } sqp->s_len -= len; } if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; ipath_send_complete(sqp, wqe, send_status); goto again; rnr_nak: if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; if (sqp->s_rnr_retry == 0) { send_status = IB_WC_RNR_RETRY_EXC_ERR; goto serr; } if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK)) goto clr_busy; sqp->s_flags |= IPATH_S_WAITING; dev->n_rnr_naks++; sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer]; ipath_insert_rnr_queue(sqp); goto clr_busy; inv_err: send_status = IB_WC_REM_INV_REQ_ERR; wc.status = IB_WC_LOC_QP_OP_ERR; goto err; acc_err: send_status = IB_WC_REM_ACCESS_ERR; wc.status = IB_WC_LOC_PROT_ERR; err: ipath_rc_error(qp, wc.status); serr: spin_lock_irqsave(&sqp->s_lock, flags); ipath_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR); sqp->s_flags &= ~IPATH_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = sqp->ibqp.device; ev.element.qp = &sqp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); } goto done; } clr_busy: sqp->s_flags &= ~IPATH_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: if (qp && atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); }
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) { struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ib_wc wc; int ret = 0; ipath_dbg("QP%d/%d in error state (%d)\n", qp->ibqp.qp_num, qp->remote_qpn, err); spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); wc.vendor_err = 0; wc.byte_len = 0; wc.imm_data = 0; wc.qp = &qp->ibqp; wc.src_qp = 0; wc.wc_flags = 0; wc.pkey_index = 0; wc.slid = 0; wc.sl = 0; wc.dlid_path_bits = 0; wc.port_num = 0; if (qp->r_wrid_valid) { qp->r_wrid_valid = 0; wc.wr_id = qp->r_wr_id; wc.opcode = IB_WC_RECV; wc.status = err; ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wc.status = IB_WC_WR_FLUSH_ERR; while (qp->s_last != qp->s_head) { struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); wc.wr_id = wqe->wr.wr_id; wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; if (++qp->s_last >= qp->s_size) qp->s_last = 0; ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); } qp->s_cur = qp->s_tail = qp->s_head; qp->s_hdrwords = 0; qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; if (qp->r_rq.wq) { struct ipath_rwq *wq; u32 head; u32 tail; spin_lock(&qp->r_rq.lock); /* sanity check pointers before trusting them */ wq = qp->r_rq.wq; head = wq->head; if (head >= qp->r_rq.size) head = 0; tail = wq->tail; if (tail >= qp->r_rq.size) tail = 0; wc.opcode = IB_WC_RECV; while (tail != head) { wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; if (++tail >= qp->r_rq.size) tail = 0; ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); } wq->tail = tail; spin_unlock(&qp->r_rq.lock); } else if (qp->ibqp.event_handler) ret = 1; return ret; }