static void roq_eth_cleanup_ofa(struct roq_eth_priv *vdev) { int i; if (vdev->send_cq) ib_destroy_cq(vdev->send_cq); if (vdev->recv_cq && vdev->recv_cq != vdev->send_cq) ib_destroy_cq(vdev->recv_cq); if (vdev->qps) { for (i = 0; i < vdev->part_size; i++) if (vdev->qps[i]) ib_destroy_qp(vdev->qps[i]); kfree(vdev->qps); } if (vdev->qps_rem) { for (i = 0; i < vdev->rem_part_size; i++) if (vdev->qps_rem[i]) ib_destroy_qp(vdev->qps_rem[i]); kfree(vdev->qps_rem); } if (vdev->kpd) ib_dealloc_pd(vdev->kpd); vdev->qps = vdev->qps_rem = NULL; vdev->recv_cq = vdev->send_cq = NULL; vdev->kpd = NULL; return; }
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp_resp resp; struct ib_qp *qp; struct ib_uqp_object *uobj; int ret = -EINVAL; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; memset(&resp, 0, sizeof resp); mutex_lock(&ib_uverbs_idr_mutex); qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); if (!qp || qp->uobject->context != file->ucontext) goto out; uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); if (!list_empty(&uobj->mcast_list)) { ret = -EBUSY; goto out; } ret = ib_destroy_qp(qp); if (ret) goto out; idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); mutex_lock(&file->mutex); list_del(&uobj->uevent.uobject.list); mutex_unlock(&file->mutex); ib_uverbs_release_uevent(file, &uobj->uevent); resp.events_reported = uobj->uevent.events_reported; kfree(uobj); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) ret = -EFAULT; out: mutex_unlock(&ib_uverbs_idr_mutex); return ret ? ret : in_len; }
/** * Shut down CMRC connection gracefully * * @v cmrc Communication-Managed Reliable Connection * * The Infiniband data structures are not reference-counted or * guarded. It is therefore unsafe to shut them down while we may be * in the middle of a callback from the Infiniband stack (e.g. in a * receive completion handler). * * This shutdown process will run some time after the call to * ib_cmrc_close(), after control has returned out of the Infiniband * core, and will shut down the Infiniband interfaces cleanly. * * The shutdown process holds an implicit reference on the CMRC * connection, ensuring that the structure is not freed before the * shutdown process has run. */ static void ib_cmrc_shutdown ( struct ib_cmrc_connection *cmrc ) { DBGC ( cmrc, "CMRC %p shutting down\n", cmrc ); /* Shut down Infiniband interface */ ib_destroy_conn ( cmrc->ibdev, cmrc->qp, cmrc->conn ); ib_destroy_qp ( cmrc->ibdev, cmrc->qp ); ib_destroy_cq ( cmrc->ibdev, cmrc->cq ); ib_close ( cmrc->ibdev ); /* Drop the remaining reference */ ref_put ( &cmrc->refcnt ); }
static void verbs_remove_device (struct ib_device *dev) { printk (KERN_INFO "IB remove device called. Name = %s\n", dev->name); if (ah) ib_destroy_ah (ah); if (qp) ib_destroy_qp (qp); if (send_cq) ib_destroy_cq (send_cq); if (recv_cq) ib_destroy_cq (recv_cq); if (mr) ib_dereg_mr (mr); if (pd) ib_dealloc_pd (pd); }
static void rdma_destroy_trans(struct p9_trans_rdma *rdma) { if (!rdma) return; if (rdma->qp && !IS_ERR(rdma->qp)) ib_destroy_qp(rdma->qp); if (rdma->pd && !IS_ERR(rdma->pd)) ib_dealloc_pd(rdma->pd); if (rdma->cq && !IS_ERR(rdma->cq)) ib_free_cq(rdma->cq); if (rdma->cm_id && !IS_ERR(rdma->cm_id)) rdma_destroy_id(rdma->cm_id); kfree(rdma); }
void IBInterface::close() { #ifdef EQ_MEASURE_TIME LBINFO << " Time total to ReadNB : " << _floatTimeReadNB << std::endl; LBINFO << " Time total to ReadSync : " << _floatTimeReadSync << std::endl; LBINFO << " Time total to WaitPoll : " << _timeTotalWaitPoll << std::endl; LBINFO << " Time total to copyBuffer : " << _timeCopyBufferRead << std::endl; LBINFO << " Time total to WaitObj : " << _timeTotalWaitobj << std::endl; LBINFO << " Time total to Write : " << _timeTotalWrite << std::endl; LBINFO << " Time total to WaitWrite : " << _timeTotalWriteWait << std::endl; LBINFO << " Time total to copyBuffer : " << _timeCopyBufferWrite << std::endl; #endif ib_api_status_t status; if( _queuePair ) { status = ib_destroy_qp( _queuePair, 0 ); _queuePair = 0; } for (int i = 0; i < EQ_NUMBLOCKMEMORY ; i++) { _writeBlocks[i]->close(); _readBlocks[i]->close(); } _dlid = 0; }
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_create_qp cmd; struct ib_uverbs_create_qp_resp resp; struct ib_udata udata; struct ib_uqp_object *uobj; struct ib_pd *pd; struct ib_cq *scq, *rcq; struct ib_srq *srq; struct ib_qp *qp; struct ib_qp_init_attr attr; int ret; if (out_len < sizeof resp) return -ENOSPC; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; INIT_UDATA(&udata, buf + sizeof cmd, (unsigned long) cmd.response + sizeof resp, in_len - sizeof cmd, out_len - sizeof resp); uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) return -ENOMEM; mutex_lock(&ib_uverbs_idr_mutex); pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; if (!pd || pd->uobject->context != file->ucontext || !scq || scq->uobject->context != file->ucontext || !rcq || rcq->uobject->context != file->ucontext || (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { ret = -EINVAL; goto err_up; } attr.event_handler = ib_uverbs_qp_event_handler; attr.qp_context = file; attr.send_cq = scq; attr.recv_cq = rcq; attr.srq = srq; attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd.qp_type; attr.cap.max_send_wr = cmd.max_send_wr; attr.cap.max_recv_wr = cmd.max_recv_wr; attr.cap.max_send_sge = cmd.max_send_sge; attr.cap.max_recv_sge = cmd.max_recv_sge; attr.cap.max_inline_data = cmd.max_inline_data; uobj->uevent.uobject.user_handle = cmd.user_handle; uobj->uevent.uobject.context = file->ucontext; uobj->uevent.events_reported = 0; INIT_LIST_HEAD(&uobj->uevent.event_list); INIT_LIST_HEAD(&uobj->mcast_list); qp = pd->device->create_qp(pd, &attr, &udata); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto err_up; } qp->device = pd->device; qp->pd = pd; qp->send_cq = attr.send_cq; qp->recv_cq = attr.recv_cq; qp->srq = attr.srq; qp->uobject = &uobj->uevent.uobject; qp->event_handler = attr.event_handler; qp->qp_context = attr.qp_context; qp->qp_type = attr.qp_type; atomic_inc(&pd->usecnt); atomic_inc(&attr.send_cq->usecnt); atomic_inc(&attr.recv_cq->usecnt); if (attr.srq) atomic_inc(&attr.srq->usecnt); memset(&resp, 0, sizeof resp); resp.qpn = qp->qp_num; retry: if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) { ret = -ENOMEM; goto err_destroy; } ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id); if (ret == -EAGAIN) goto retry; if (ret) goto err_destroy; resp.qp_handle = uobj->uevent.uobject.id; resp.max_recv_sge = attr.cap.max_recv_sge; resp.max_send_sge = attr.cap.max_send_sge; resp.max_recv_wr = attr.cap.max_recv_wr; resp.max_send_wr = attr.cap.max_send_wr; resp.max_inline_data = attr.cap.max_inline_data; if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) { ret = -EFAULT; goto err_idr; } mutex_lock(&file->mutex); list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); mutex_unlock(&file->mutex); mutex_unlock(&ib_uverbs_idr_mutex); return in_len; err_idr: idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id); err_destroy: ib_destroy_qp(qp); atomic_dec(&pd->usecnt); atomic_dec(&attr.send_cq->usecnt); atomic_dec(&attr.recv_cq->usecnt); if (attr.srq) atomic_dec(&attr.srq->usecnt); err_up: mutex_unlock(&ib_uverbs_idr_mutex); kfree(uobj); return ret; }
static void krping_free_qp(struct krping_cb *cb) { ib_destroy_qp(cb->qp); ib_destroy_cq(cb->cq); ib_dealloc_pd(cb->pd); }
int ipoib_qp_create(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; u16 pkey_index; struct ib_qp_attr qp_attr; int attr_mask; /* * Search through the port P_Key table for the requested pkey value. * The port has to be assigned to the respective IB partition in * advance. */ ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index); if (ret) { clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); return ret; } set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); qp_attr.qp_state = IB_QPS_INIT; qp_attr.qkey = 0; qp_attr.port_num = priv->port; qp_attr.pkey_index = pkey_index; attr_mask = IB_QP_QKEY | IB_QP_PORT | IB_QP_PKEY_INDEX | IB_QP_STATE; ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret); goto out_fail; } qp_attr.qp_state = IB_QPS_RTR; /* Can't set this in a INIT->RTR transition */ attr_mask &= ~IB_QP_PORT; ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret); goto out_fail; } qp_attr.qp_state = IB_QPS_RTS; qp_attr.sq_psn = 0; attr_mask |= IB_QP_SQ_PSN; attr_mask &= ~IB_QP_PKEY_INDEX; ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret); goto out_fail; } return 0; out_fail: ib_destroy_qp(priv->qp); priv->qp = NULL; return -EINVAL; }
int roq_eth_rem_init_qp(struct net_device *ndev) { struct roq_eth_priv *vdev = netdev_priv(ndev); struct ib_qp_init_attr create_qp_attrs; struct ib_qp_attr qp_attr; enum ib_qp_attr_mask qp_attr_mask; char *argv[] = {"/etc/init.d/post_discovery", NULL}; char *env[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", "LD_LIBRARY_PATH=/lib:/usr/lib", NULL}; int i, rank, size, ret = 0; if (vdev->send_cq == NULL || vdev->recv_cq == NULL || vdev->srq == NULL || vdev->kpd == NULL) { vdev->fix_rem = 1; pr_warn("roq_eth_rem_init: return w/o discovery\n"); return 0; } /* clean old remote qps */ if (vdev->rem_part_size) { for (i = 0; i < vdev->rem_part_size; i++) ib_destroy_qp(vdev->qps_rem[i]); kfree(vdev->qps_rem); } vdev->rem_part_size = RoQ_NetworkSize(vdev->netdesc_rem); rank = roq_tcoords_to_rank(vdev->netdesc, vdev->personality.Network_Config.Acoord, vdev->personality.Network_Config.Bcoord, vdev->personality.Network_Config.Ccoord, vdev->personality.Network_Config.Dcoord, vdev->personality.Network_Config.Ecoord); if (IS_ERR_VALUE(rank)) { ret = -EINVAL; pr_warn("roq_eth_rem_init: invalid rank\n"); goto out; } memset(&create_qp_attrs, 0, sizeof(struct ib_qp_init_attr)); create_qp_attrs.send_cq = vdev->send_cq; create_qp_attrs.recv_cq = vdev->recv_cq; /* set some more parameters */ create_qp_attrs.qp_type = IB_QPT_UD; create_qp_attrs.event_handler = NULL; create_qp_attrs.qp_context = NULL; create_qp_attrs.srq = vdev->srq; create_qp_attrs.cap.max_send_wr = MAX_TX_SKBS; create_qp_attrs.cap.max_recv_wr = 1; create_qp_attrs.cap.max_send_sge = 1; create_qp_attrs.cap.max_recv_sge = 1; create_qp_attrs.cap.max_inline_data = 0; size = sizeof(struct ib_qp *) * vdev->rem_part_size; vdev->qps_rem = (struct ib_qp **)kmalloc(size, GFP_KERNEL); if (!vdev->qps_rem) { pr_warn("roq_eth_rem_init_qp: remote QP alloc failed"); ret = -ENOMEM; goto out; } for (i = 0; i < vdev->rem_part_size; i++) { vdev->qps_rem[i] = ib_create_qp(vdev->kpd, &create_qp_attrs); if (IS_ERR(vdev->qps_rem[i])) { pr_warn("roq_eth_rem_init_qp: error creating qp %p\n", vdev->qps_rem[i]); ret = PTR_ERR(vdev->qps_rem[i]); goto out; } } for (i = 0; i < vdev->rem_part_size; i++) { qp_attr_mask = 0; qp_attr_mask |= IB_QP_STATE; qp_attr.qp_state = IB_QPS_RTS; qp_attr_mask |= IB_QP_AV; /* this QP will send to peer rank i (zero based) */ qp_attr.ah_attr.dlid = 0x8000 | i; qp_attr_mask |= IB_QP_DEST_QPN; /* * this QP will send to peer qp num rank + 1 * (QP zero is reserved) */ qp_attr.dest_qp_num = rank + 1; ib_modify_qp(vdev->qps_rem[i], &qp_attr, qp_attr_mask); } ret = call_usermodehelper(argv[0], argv, env, UMH_WAIT_EXEC); out: return ret; }