/* * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct qib_lkey_table *rkt; struct qib_pd *pd; struct qib_sge_state *ss; rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } ss->num_sge = j; ss->total_len = qp->r_len; ret = 1; goto bail; bad_lkey: while (j) { struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; atomic_dec(&sge->mr->refcount); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; }
static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) { int user = to_ipd(qp->ibqp.pd)->user; int i, j, ret; struct ib_wc wc; qp->r_len = 0; for (i = j = 0; i < wqe->num_sge; i++) { if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ if ((user && wqe->sg_list[i].lkey == 0) || !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; } qp->r_sge.sge = qp->r_sg_list[0]; qp->r_sge.sg_list = qp->r_sg_list + 1; qp->r_sge.num_sge = j; ret = 1; goto bail; bad_lkey: wc.wr_id = wqe->wr_id; wc.status = IB_WC_LOC_PROT_ERR; wc.opcode = IB_WC_RECV; wc.vendor_err = 0; wc.byte_len = 0; wc.imm_data = 0; wc.qp = &qp->ibqp; wc.src_qp = 0; wc.wc_flags = 0; wc.pkey_index = 0; wc.slid = 0; wc.sl = 0; wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal solicited completion event. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; }
/** * ipath_post_srq_receive - post a receive on a shared receive queue * @ibsrq: the SRQ to post the receive on * @wr: the list of work requests to post * @bad_wr: the first WR to cause a problem is put here * * This may be called from interrupt context. */ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_srq *srq = to_isrq(ibsrq); struct ipath_ibdev *dev = to_idev(ibsrq->device); unsigned long flags; int ret; for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; int i, j; if (wr->num_sge > srq->rq.max_sge) { *bad_wr = wr; ret = -ENOMEM; goto bail; } spin_lock_irqsave(&srq->rq.lock, flags); next = srq->rq.head + 1; if (next >= srq->rq.size) next = 0; if (next == srq->rq.tail) { spin_unlock_irqrestore(&srq->rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); wqe->wr_id = wr->wr_id; wqe->sg_list[0].mr = NULL; wqe->sg_list[0].vaddr = NULL; wqe->sg_list[0].length = 0; wqe->sg_list[0].sge_length = 0; wqe->length = 0; for (i = 0, j = 0; i < wr->num_sge; i++) { /* Check LKEY */ if (to_ipd(srq->ibsrq.pd)->user && wr->sg_list[i].lkey == 0) { spin_unlock_irqrestore(&srq->rq.lock, flags); *bad_wr = wr; ret = -EINVAL; goto bail; } if (wr->sg_list[i].length == 0) continue; if (!ipath_lkey_ok(&dev->lk_table, &wqe->sg_list[j], &wr->sg_list[i], IB_ACCESS_LOCAL_WRITE)) { spin_unlock_irqrestore(&srq->rq.lock, flags); *bad_wr = wr; ret = -EINVAL; goto bail; } wqe->length += wr->sg_list[i].length; j++; } wqe->num_sge = j; srq->rq.head = next; spin_unlock_irqrestore(&srq->rq.lock, flags); } ret = 0; bail: return ret; }
/** * ipath_post_rc_send - post RC and UC sends * @qp: the QP to post on * @wr: the work request to send */ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr) { struct ipath_swqe *wqe; unsigned long flags; u32 next; int i, j; int acc; int ret; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) { ret = -EINVAL; goto bail; } } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { ret = -EINVAL; goto bail; } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) { ret = -EINVAL; goto bail; } /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) { ret = -ENOMEM; goto bail; } spin_lock_irqsave(&qp->s_lock, flags); next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->ssn = qp->s_ssn++; wqe->sg_list[0].mr = NULL; wqe->sg_list[0].vaddr = NULL; wqe->sg_list[0].length = 0; wqe->sg_list[0].sge_length = 0; wqe->length = 0; acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0, j = 0; i < wr->num_sge; i++) { if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } if (wr->sg_list[i].length == 0) continue; if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table, &wqe->sg_list[j], &wr->sg_list[i], acc)) { spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; goto bail; } wqe->length += wr->sg_list[i].length; j++; } wqe->wr.num_sge = j; qp->s_head = next; spin_unlock_irqrestore(&qp->s_lock, flags); if (qp->ibqp.qp_type == IB_QPT_UC) ipath_do_uc_send((unsigned long) qp); else ipath_do_rc_send((unsigned long) qp); ret = 0; bail: return ret; }