static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; qp->sk->sk->sk_user_data = qp; qp->sq.max_wr = init->cap.max_send_wr; qp->sq.max_sge = init->cap.max_send_sge; qp->sq.max_inline = init->cap.max_inline_data; wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + qp->sq.max_sge * sizeof(struct ib_sge), sizeof(struct rxe_send_wqe) + qp->sq.max_inline); qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size); if (!qp->sq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, true, context, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); if (err) { kvfree(qp->sq.queue->buf); kfree(qp->sq.queue); return err; } qp->req.wqe_index = producer_index(qp->sq.queue); qp->req.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, qp, rxe_requester, "req"); rxe_init_task(rxe, &qp->comp.task, qp, rxe_completer, "comp"); qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ if (init->qp_type == IB_QPT_RC) { timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); timer_setup(&qp->retrans_timer, retransmit_timer, 0); } return 0; }
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; qp->sq.max_wr = init->cap.max_send_wr; qp->sq.max_sge = init->cap.max_send_sge; qp->sq.max_inline = init->cap.max_inline_data; wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + qp->sq.max_sge*sizeof(struct ib_sge), sizeof(struct rxe_send_wqe) + qp->sq.max_inline); qp->sq.queue = rxe_queue_init(rxe, (unsigned int *)&qp->sq.max_wr, wqe_size); if (!qp->sq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, sizeof(struct mminfo), context, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); if (err) { vfree(qp->sq.queue->buf); kfree(qp->sq.queue); } qp->req.wqe_index = producer_index(qp->sq.queue); qp->req.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, &rxe_fast_req, qp, rxe_requester, "req"); rxe_init_task(rxe, &qp->comp.task, &rxe_fast_comp, qp, rxe_completer, "comp"); init_timer(&qp->rnr_nak_timer); qp->rnr_nak_timer.function = rnr_nak_timer; qp->rnr_nak_timer.data = (unsigned long)qp; init_timer(&qp->retrans_timer); qp->retrans_timer.function = retransmit_timer; qp->retrans_timer.data = (unsigned long)qp; qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ return 0; }
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; if (!qp->srq) { qp->rq.max_wr = init->cap.max_recv_wr; qp->rq.max_sge = init->cap.max_recv_sge; wqe_size = rcv_wqe_size(qp->rq.max_sge); pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size); if (!qp->rq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, false, context, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { kvfree(qp->rq.queue->buf); kfree(qp->rq.queue); return err; } } spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, rxe_responder, "resp"); qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; qp->resp.state = QP_STATE_RESET; return 0; }
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, struct ib_ucontext *context, struct ib_udata *udata, spinlock_t *producer_lock, spinlock_t *consumer_lock) { struct rxe_queue *new_q; unsigned int num_elem = *num_elem_p; int err; unsigned long flags = 0, flags1; new_q = rxe_queue_init(q->rxe, &num_elem, elem_size); if (!new_q) return -ENOMEM; err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf, new_q->buf_size, &new_q->ip); if (err) { vfree(new_q->buf); kfree(new_q); goto err1; } spin_lock_irqsave(consumer_lock, flags1); if (producer_lock) { spin_lock_irqsave(producer_lock, flags); err = resize_finish(q, new_q, num_elem); spin_unlock_irqrestore(producer_lock, flags); } else { err = resize_finish(q, new_q, num_elem); } spin_unlock_irqrestore(consumer_lock, flags1); rxe_queue_cleanup(new_q); /* new/old dep on err */ if (err) goto err1; *num_elem_p = num_elem; return 0; err1: return err; }
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int srq_wqe_size; struct rxe_queue *q; srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; srq->srq_num = srq->pelem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); spin_lock_init(&srq->rq.producer_lock); spin_lock_init(&srq->rq.consumer_lock); q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size); if (!q) { pr_warn("unable to allocate queue for srq\n"); return -ENOMEM; } srq->rq.queue = q; err = do_mmap_info(rxe, udata, false, context, q->buf, q->buf_size, &q->ip); if (err) return err; if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) { if (copy_to_user(udata->outbuf + sizeof(struct mminfo), &srq->srq_num, sizeof(u32))) return -EFAULT; } return 0; }