static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { int err; struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_modify_srq_cmd ucmd = {}; if (udata) { if (udata->inlen < sizeof(ucmd)) return -EINVAL; err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); if (err) return err; } err = rxe_srq_chk_attr(rxe, srq, attr, mask); if (err) goto err1; err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); if (err) goto err1; return 0; err1: return err; }
static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) { struct rxe_srq *srq = to_rsrq(ibsrq); if (srq->rq.queue) rxe_queue_cleanup(srq->rq.queue); rxe_drop_ref(srq->pd); rxe_drop_ref(srq); }
static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct rxe_srq *srq = to_rsrq(ibsrq); if (srq->error) return -EINVAL; attr->max_wr = srq->rq.queue->buf->index_mask; attr->max_sge = srq->rq.max_sge; attr->srq_limit = srq->limit; return 0; }
static int rxe_destroy_srq(struct ib_srq *ibsrq) { struct rxe_srq *srq = to_rsrq(ibsrq); if (srq->rq.queue) rxe_queue_cleanup(srq->rq.queue); rxe_drop_ref(srq->pd); rxe_drop_index(srq); rxe_drop_ref(srq); return 0; }
/* called by the create qp verb */ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct ib_udata *udata, struct ib_pd *ibpd) { int err; struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL; rxe_add_ref(pd); rxe_add_ref(rcq); rxe_add_ref(scq); if (srq) rxe_add_ref(srq); qp->pd = pd; qp->rcq = rcq; qp->scq = scq; qp->srq = srq; qp->udata = udata; rxe_qp_init_misc(rxe, qp, init); err = rxe_qp_init_req(rxe, qp, init, context, udata); if (err) goto err1; err = rxe_qp_init_resp(rxe, qp, init, context, udata); if (err) goto err2; qp->attr.qp_state = IB_QPS_RESET; qp->valid = 1; return 0; err2: rxe_queue_cleanup(qp->sq.queue); err1: if (srq) rxe_drop_ref(srq); rxe_drop_ref(scq); rxe_drop_ref(rcq); rxe_drop_ref(pd); return err; }
static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { int err; struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_dev *rxe = to_rdev(ibsrq->device); err = rxe_srq_chk_attr(rxe, srq, attr, mask); if (err) goto err1; err = rxe_srq_from_attr(rxe, srq, attr, mask, udata); if (err) goto err1; return 0; err1: return err; }
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, struct ib_udata *udata) { int err; struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_pd *pd = to_rpd(ibsrq->pd); struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_create_srq_resp __user *uresp = NULL; if (udata) { if (udata->outlen < sizeof(*uresp)) return -EINVAL; uresp = udata->outbuf; } err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); if (err) goto err1; err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem); if (err) goto err1; rxe_add_ref(pd); srq->pd = pd; err = rxe_srq_from_init(rxe, srq, init, udata, uresp); if (err) goto err2; return 0; err2: rxe_drop_ref(pd); rxe_drop_ref(srq); err1: return err; }
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); spin_lock_irqsave(&srq->rq.producer_lock, flags); while (wr) { err = post_one_recv(&srq->rq, wr); if (unlikely(err)) break; wr = wr->next; } spin_unlock_irqrestore(&srq->rq.producer_lock, flags); if (err) *bad_wr = wr; return err; }