static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; qp->sk->sk->sk_user_data = qp; qp->sq.max_wr = init->cap.max_send_wr; qp->sq.max_sge = init->cap.max_send_sge; qp->sq.max_inline = init->cap.max_inline_data; wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + qp->sq.max_sge * sizeof(struct ib_sge), sizeof(struct rxe_send_wqe) + qp->sq.max_inline); qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size); if (!qp->sq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, true, context, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); if (err) { kvfree(qp->sq.queue->buf); kfree(qp->sq.queue); return err; } qp->req.wqe_index = producer_index(qp->sq.queue); qp->req.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, qp, rxe_requester, "req"); rxe_init_task(rxe, &qp->comp.task, qp, rxe_completer, "comp"); qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ if (init->qp_type == IB_QPT_RC) { timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); timer_setup(&qp->retrans_timer, retransmit_timer, 0); } return 0; }
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; qp->sq.max_wr = init->cap.max_send_wr; qp->sq.max_sge = init->cap.max_send_sge; qp->sq.max_inline = init->cap.max_inline_data; wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + qp->sq.max_sge*sizeof(struct ib_sge), sizeof(struct rxe_send_wqe) + qp->sq.max_inline); qp->sq.queue = rxe_queue_init(rxe, (unsigned int *)&qp->sq.max_wr, wqe_size); if (!qp->sq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, sizeof(struct mminfo), context, qp->sq.queue->buf, qp->sq.queue->buf_size, &qp->sq.queue->ip); if (err) { vfree(qp->sq.queue->buf); kfree(qp->sq.queue); } qp->req.wqe_index = producer_index(qp->sq.queue); qp->req.state = QP_STATE_RESET; qp->req.opcode = -1; qp->comp.opcode = -1; spin_lock_init(&qp->sq.sq_lock); skb_queue_head_init(&qp->req_pkts); rxe_init_task(rxe, &qp->req.task, &rxe_fast_req, qp, rxe_requester, "req"); rxe_init_task(rxe, &qp->comp.task, &rxe_fast_comp, qp, rxe_completer, "comp"); init_timer(&qp->rnr_nak_timer); qp->rnr_nak_timer.function = rnr_nak_timer; qp->rnr_nak_timer.data = (unsigned long)qp; init_timer(&qp->retrans_timer); qp->retrans_timer.function = retransmit_timer; qp->retrans_timer.data = (unsigned long)qp; qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ return 0; }
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_ucontext *context, struct ib_udata *udata) { int err; int wqe_size; if (!qp->srq) { qp->rq.max_wr = init->cap.max_recv_wr; qp->rq.max_sge = init->cap.max_recv_sge; wqe_size = rcv_wqe_size(qp->rq.max_sge); pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size); if (!qp->rq.queue) return -ENOMEM; err = do_mmap_info(rxe, udata, false, context, qp->rq.queue->buf, qp->rq.queue->buf_size, &qp->rq.queue->ip); if (err) { kvfree(qp->rq.queue->buf); kfree(qp->rq.queue); return err; } } spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, rxe_responder, "resp"); qp->resp.opcode = OPCODE_NONE; qp->resp.msn = 0; qp->resp.state = QP_STATE_RESET; return 0; }
/* initialize rxe device state */ static int rxe_init(struct rxe_dev *rxe) { int err; /* init default device parameters */ rxe_init_device_param(rxe); err = rxe_init_ports(rxe); if (err) goto err1; err = rxe_init_pools(rxe); if (err) goto err2; /* init packet counters */ atomic_set(&rxe->req_skb_in, 0); atomic_set(&rxe->resp_skb_in, 0); atomic_set(&rxe->req_skb_out, 0); atomic_set(&rxe->resp_skb_out, 0); /* init pending mmap list */ spin_lock_init(&rxe->mmap_offset_lock); spin_lock_init(&rxe->pending_lock); INIT_LIST_HEAD(&rxe->pending_mmaps); /* init arbiter */ spin_lock_init(&rxe->arbiter.list_lock); INIT_LIST_HEAD(&rxe->arbiter.qp_list); rxe_init_task(rxe, &rxe->arbiter.task, &rxe_fast_arb, rxe, rxe_arbiter, "arb"); return 0; err2: rxe_cleanup_ports(rxe); err1: return err; }