int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) { struct c2_dev *c2dev; struct c2wr_ep_listen_create_req wr; struct c2wr_ep_listen_create_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.local_addr = cm_id->local_addr.sin_addr.s_addr; wr.local_port = cm_id->local_addr.sin_port; wr.backlog = cpu_to_be32(backlog); wr.user_context = (u64) (unsigned long) cm_id; vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; reply = (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply)) != 0) goto bail1; cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; vq_repbuf_free(c2dev, reply); vq_req_free(c2dev, vq_req); return 0; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
int c2_llp_service_destroy(struct iw_cm_id *cm_id) { struct c2_dev *c2dev; struct c2wr_ep_listen_destroy_req wr; struct c2wr_ep_listen_destroy_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } if ((err = c2_errno(reply)) != 0) goto bail1; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct c2_dev *c2dev; struct c2wr_cr_reject_req wr; struct c2_vq_req *vq_req; struct c2wr_cr_reject_rep *reply; int err; c2dev = to_c2dev(cm_id->device); vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; c2_wr_set_id(&wr, CCWR_CR_REJECT); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; reply = (struct c2wr_cr_reject_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct c2_dev *c2dev = to_c2dev(ibcq->device); struct c2_cq *cq = to_c2cq(ibcq); unsigned long flags; int npolled, err; spin_lock_irqsave(&cq->lock, flags); for (npolled = 0; npolled < num_entries; ++npolled) { err = c2_poll_one(c2dev, cq, entry + npolled); if (err) break; } spin_unlock_irqrestore(&cq->lock, flags); return npolled; }
int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, struct ib_recv_wr **bad_wr) { struct c2_dev *c2dev = to_c2dev(ibqp->device); struct c2_qp *qp = to_c2qp(ibqp); union c2wr wr; unsigned long lock_flags; int err = 0; if (qp->state > IB_QPS_RTS) return -EINVAL; /* * Try and post each work request */ while (ib_wr) { u32 tot_len; u8 actual_sge_count; if (ib_wr->num_sge > qp->recv_sgl_depth) { err = -EINVAL; break; } /* * Create local host-copy of the WR */ wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id; c2_wr_set_id(&wr, CCWR_RECV); c2_wr_set_flags(&wr, 0); /* sge_count is limited to eight bits. */ BUG_ON(ib_wr->num_sge >= 256); err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data), ib_wr->sg_list, ib_wr->num_sge, &tot_len, &actual_sge_count); c2_wr_set_sge_count(&wr, actual_sge_count); /* * If we had an error on the last wr build, then * break out. Possible errors include bogus WR * type, and a bogus SGL length... */ if (err) { break; } spin_lock_irqsave(&qp->lock, lock_flags); err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); if (err) { spin_unlock_irqrestore(&qp->lock, lock_flags); break; } /* * Enqueue mq index to activity FIFO */ c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); spin_unlock_irqrestore(&qp->lock, lock_flags); ib_wr = ib_wr->next; } if (err) *bad_wr = ib_wr; return err; }
int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, struct ib_send_wr **bad_wr) { struct c2_dev *c2dev = to_c2dev(ibqp->device); struct c2_qp *qp = to_c2qp(ibqp); union c2wr wr; unsigned long lock_flags; int err = 0; u32 flags; u32 tot_len; u8 actual_sge_count; u32 msg_size; if (qp->state > IB_QPS_RTS) return -EINVAL; while (ib_wr) { flags = 0; wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id; if (ib_wr->send_flags & IB_SEND_SIGNALED) { flags |= SQ_SIGNALED; } switch (ib_wr->opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND) { if (ib_wr->send_flags & IB_SEND_SOLICITED) c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE); else c2_wr_set_id(&wr, C2_WR_TYPE_SEND); wr.sqwr.send.remote_stag = 0; } else { if (ib_wr->send_flags & IB_SEND_SOLICITED) c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV); else c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV); wr.sqwr.send.remote_stag = cpu_to_be32(ib_wr->ex.invalidate_rkey); } msg_size = sizeof(struct c2wr_send_req) + sizeof(struct c2_data_addr) * ib_wr->num_sge; if (ib_wr->num_sge > qp->send_sgl_depth) { err = -EINVAL; break; } if (ib_wr->send_flags & IB_SEND_FENCE) { flags |= SQ_READ_FENCE; } err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data), ib_wr->sg_list, ib_wr->num_sge, &tot_len, &actual_sge_count); wr.sqwr.send.sge_len = cpu_to_be32(tot_len); c2_wr_set_sge_count(&wr, actual_sge_count); break; case IB_WR_RDMA_WRITE: c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE); msg_size = sizeof(struct c2wr_rdma_write_req) + (sizeof(struct c2_data_addr) * ib_wr->num_sge); if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { err = -EINVAL; break; } if (ib_wr->send_flags & IB_SEND_FENCE) { flags |= SQ_READ_FENCE; } wr.sqwr.rdma_write.remote_stag = cpu_to_be32(ib_wr->wr.rdma.rkey); wr.sqwr.rdma_write.remote_to = cpu_to_be64(ib_wr->wr.rdma.remote_addr); err = move_sgl((struct c2_data_addr *) & (wr.sqwr.rdma_write.data), ib_wr->sg_list, ib_wr->num_sge, &tot_len, &actual_sge_count); wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len); c2_wr_set_sge_count(&wr, actual_sge_count); break; case IB_WR_RDMA_READ: c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ); msg_size = sizeof(struct c2wr_rdma_read_req); /* IWarp only suppots 1 sge for RDMA reads */ if (ib_wr->num_sge > 1) { err = -EINVAL; break; } /* * Move the local and remote stag/to/len into the WR. */ wr.sqwr.rdma_read.local_stag = cpu_to_be32(ib_wr->sg_list->lkey); wr.sqwr.rdma_read.local_to = cpu_to_be64(ib_wr->sg_list->addr); wr.sqwr.rdma_read.remote_stag = cpu_to_be32(ib_wr->wr.rdma.rkey); wr.sqwr.rdma_read.remote_to = cpu_to_be64(ib_wr->wr.rdma.remote_addr); wr.sqwr.rdma_read.length = cpu_to_be32(ib_wr->sg_list->length); break; default: /* error */ msg_size = 0; err = -EINVAL; break; } /* * If we had an error on the last wr build, then * break out. Possible errors include bogus WR * type, and a bogus SGL length... */ if (err) { break; } /* * Store flags */ c2_wr_set_flags(&wr, flags); /* * Post the puppy! */ spin_lock_irqsave(&qp->lock, lock_flags); err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); if (err) { spin_unlock_irqrestore(&qp->lock, lock_flags); break; } /* * Enqueue mq index to activity FIFO. */ c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); spin_unlock_irqrestore(&qp->lock, lock_flags); ib_wr = ib_wr->next; } if (err) *bad_wr = ib_wr; return err; }
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct ib_qp *ibqp; struct c2_qp *qp; struct c2wr_qp_connect_req *wr; struct c2_vq_req *vq_req; int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail0; } err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail1; } c2_wr_set_id(wr, CCWR_QP_CONNECT); wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->qp_handle = qp->adapter_handle; wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; wr->remote_port = cm_id->remote_addr.sin_port; if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; err = vq_send_wr(c2dev, (union c2wr *) wr); vq_req_free(c2dev, vq_req); bail1: kfree(wr); bail0: if (err) { cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; }
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct c2_qp *qp; struct ib_qp *ibqp; struct c2wr_cr_accept_req *wr; struct c2_vq_req *vq_req; struct c2wr_cr_accept_rep *reply; int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail0; } vq_req->qp = qp; vq_req->cm_id = cm_id; vq_req->event = IW_CM_EVENT_ESTABLISHED; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail1; } c2_wr_set_id(wr, CCWR_CR_ACCEPT); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; wr->qp_handle = qp->adapter_handle; cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; cm_id->provider_data = qp; if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail1; } if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail1; reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); if (!err) c2_set_qp_state(qp, C2_QP_STATE_RTS); bail1: kfree(wr); vq_req_free(c2dev, vq_req); bail0: if (err) { cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; }
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct ib_qp *ibqp; struct c2_qp *qp; struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ struct c2_vq_req *vq_req; int err; struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; if (cm_id->remote_addr.ss_family != AF_INET) return -ENOSYS; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Associate QP <--> CM_ID */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; /* * only support the max private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail0; } /* * Set the rdma read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* * Create and send a WR_QP_CONNECT... */ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail1; } c2_wr_set_id(wr, CCWR_QP_CONNECT); wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->qp_handle = qp->adapter_handle; wr->remote_addr = raddr->sin_addr.s_addr; wr->remote_port = raddr->sin_port; /* * Move any private data from the callers's buf into * the WR. */ if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* * Send WR to adapter. NOTE: There is no synch reply from * the adapter. */ err = vq_send_wr(c2dev, (union c2wr *) wr); vq_req_free(c2dev, vq_req); bail1: kfree(wr); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; }
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct c2_dev *c2dev; struct c2wr_cr_reject_req wr; struct c2_vq_req *vq_req; struct c2wr_cr_reject_rep *reply; int err; c2dev = to_c2dev(cm_id->device); /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_CR_REJECT); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_cr_reject_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); /* * free vq stuff */ vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct c2_qp *qp; struct ib_qp *ibqp; struct c2wr_cr_accept_req *wr; /* variable length WR */ struct c2_vq_req *vq_req; struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Set the RDMA read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail0; } vq_req->qp = qp; vq_req->cm_id = cm_id; vq_req->event = IW_CM_EVENT_ESTABLISHED; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail1; } /* Build the WR */ c2_wr_set_id(wr, CCWR_CR_ACCEPT); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; wr->qp_handle = qp->adapter_handle; /* Replace the cr_handle with the QP after accept */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; cm_id->provider_data = qp; /* Validate private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail1; } if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail1; /* Check that reply is present */ reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); if (!err) c2_set_qp_state(qp, C2_QP_STATE_RTS); bail1: kfree(wr); vq_req_free(c2dev, vq_req); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; }
int c2_llp_service_destroy(struct iw_cm_id *cm_id) { struct c2_dev *c2dev; struct c2wr_ep_listen_destroy_req wr; struct c2wr_ep_listen_destroy_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) { struct c2_dev *c2dev; struct c2wr_ep_listen_create_req wr; struct c2wr_ep_listen_create_rep *reply; struct c2_vq_req *vq_req; int err; struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; if (cm_id->local_addr.ss_family != AF_INET) return -ENOSYS; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.local_addr = laddr->sin_addr.s_addr; wr.local_port = laddr->sin_port; wr.backlog = cpu_to_be32(backlog); wr.user_context = (u64) (unsigned long) cm_id; /* * Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply)) != 0) goto bail1; /* * Keep the adapter handle. Used in subsequent destroy */ cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; /* * free vq stuff */ vq_repbuf_free(c2dev, reply); vq_req_free(c2dev, vq_req); return 0; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }