int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata) { int err; struct rxe_queue *q = srq->rq.queue; struct mminfo mi = { .offset = 1, .size = 0}; if (mask & IB_SRQ_MAX_WR) { /* Check that we can write the mminfo struct to user space */ if (udata && udata->inlen >= sizeof(__u64)) { __u64 mi_addr; /* Get address of user space mminfo struct */ err = ib_copy_from_udata(&mi_addr, udata, sizeof(mi_addr)); if (err) goto err1; udata->outbuf = (void __user *)(unsigned long)mi_addr; udata->outlen = sizeof(mi); if (!access_ok(VERIFY_WRITE, (void __user *)udata->outbuf, udata->outlen)) { err = -EFAULT; goto err1; } } err = rxe_queue_resize(q, &attr->max_wr, rcv_wqe_size(srq->rq.max_sge), srq->rq.queue->ip ? srq->rq.queue->ip->context : NULL, udata, &srq->rq.producer_lock, &srq->rq.consumer_lock); if (err) goto err2; } if (mask & IB_SRQ_LIMIT) srq->limit = attr->srq_limit; return 0; err2: rxe_queue_cleanup(q); srq->rq.queue = NULL; err1: return err; }
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; if (!qp->srq) { qp->rq.max_wr = init->cap.max_recv_wr; qp->rq.max_sge = init->cap.max_recv_sge; wqe_size = rcv_wqe_size(qp->rq.max_sge); pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size); if (!qp->rq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, false, context, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { kvfree(qp->rq.queue->buf); kfree(qp->rq.queue); return err; } } spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, rxe_responder, "resp"); qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; qp->resp.state = QP_STATE_RESET; return 0; }
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int srq_wqe_size; struct rxe_queue *q; srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; srq->srq_num = srq->pelem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); spin_lock_init(&srq->rq.producer_lock); spin_lock_init(&srq->rq.consumer_lock); q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size); if (!q) { pr_warn("unable to allocate queue for srq\n"); return -ENOMEM; } srq->rq.queue = q; err = do_mmap_info(rxe, udata, false, context, q->buf, q->buf_size, &q->ip); if (err) return err; if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) { if (copy_to_user(udata->outbuf + sizeof(struct mminfo), &srq->srq_num, sizeof(u32))) return -EFAULT; } return 0; }