int main(int argc, char **argv) { struct sockaddr_in addr; struct rdma_cm_event *event = NULL; struct rdma_cm_id *listener = NULL; struct rdma_event_channel *ec = NULL; uint16_t port = 0; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; TEST_Z(ec = rdma_create_event_channel()); TEST_NZ(rdma_create_id(ec, &listener, NULL, RDMA_PS_TCP)); TEST_NZ(rdma_bind_addr(listener, (struct sockaddr *)&addr)); TEST_NZ(rdma_listen(listener, 10)); /* backlog=10 is arbitrary */ port = ntohs(rdma_get_src_port(listener)); printf("listening on port %d.\n", port); while (rdma_get_cm_event(ec, &event) == 0) { struct rdma_cm_event event_copy; memcpy(&event_copy, event, sizeof(*event)); rdma_ack_cm_event(event); if (on_event(&event_copy)) break; } rdma_destroy_id(listener); rdma_destroy_event_channel(ec); return 0; }
/* * Connect unconnected endpoint. */ int rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) { struct rdma_cm_id *id, *old; int rc = 0; int retry_count = 0; if (ep->rep_connected != 0) { struct rpcrdma_xprt *xprt; retry: dprintk("RPC: %s: reconnecting...\n", __func__); rpcrdma_ep_disconnect(ep, ia); rpcrdma_flush_cqs(ep); xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); ia->ri_ops->ro_reset(xprt); id = rpcrdma_create_id(xprt, ia, (struct sockaddr *)&xprt->rx_data.addr); if (IS_ERR(id)) { rc = -EHOSTUNREACH; goto out; } /* TEMP TEMP TEMP - fail if new device: * Deregister/remarshal *all* requests! * Close and recreate adapter, pd, etc! * Re-determine all attributes still sane! * More stuff I haven't thought of! * Rrrgh! */ if (ia->ri_id->device != id->device) { printk("RPC: %s: can't reconnect on " "different device!\n", __func__); rdma_destroy_id(id); rc = -ENETUNREACH; goto out; } /* END TEMP */ rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); if (rc) { dprintk("RPC: %s: rdma_create_qp failed %i\n", __func__, rc); rdma_destroy_id(id); rc = -ENETUNREACH; goto out; } write_lock(&ia->ri_qplock); old = ia->ri_id; ia->ri_id = id; write_unlock(&ia->ri_qplock); rdma_destroy_qp(old); rdma_destroy_id(old); } else {
static int fio_rdmaio_close_file(struct thread_data *td, struct fio_file *f) { struct rdmaio_data *rd = td->io_ops->data; struct ibv_send_wr *bad_wr; /* unregister rdma buffer */ /* * Client sends notification to the server side */ /* refer to: http://linux.die.net/man/7/rdma_cm */ if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) || (rd->rdma_protocol == FIO_RDMA_MEM_READ))) { if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) { log_err("fio: ibv_post_send fail"); return 1; } dprint(FD_IO, "fio: close information sent success\n"); rdma_poll_wait(td, IBV_WC_SEND); } if (rd->is_client == 1) rdma_disconnect(rd->cm_id); else { rdma_disconnect(rd->child_cm_id); #if 0 rdma_disconnect(rd->cm_id); #endif } #if 0 if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) { log_err("fio: wait for RDMA_CM_EVENT_DISCONNECTED\n"); return 1; } #endif ibv_destroy_cq(rd->cq); ibv_destroy_qp(rd->qp); if (rd->is_client == 1) rdma_destroy_id(rd->cm_id); else { rdma_destroy_id(rd->child_cm_id); rdma_destroy_id(rd->cm_id); } ibv_destroy_comp_channel(rd->channel); ibv_dealloc_pd(rd->pd); return 0; }
static int connect_client (struct rdma_cm_id *client) { if (!client) return -1; if ( -1 == kiro_attach_qp (client)) { g_critical ("Could not create a QP for the new connection"); rdma_destroy_id (client); return -1; } struct kiro_connection_context *ctx = (struct kiro_connection_context *)g_try_malloc0 (sizeof (struct kiro_connection_context)); if (!ctx) { g_critical ("Failed to create connection context"); rdma_destroy_id (client); return -1; } ctx->cf_mr_recv = kiro_create_rdma_memory (client->pd, sizeof (struct kiro_ctrl_msg), IBV_ACCESS_LOCAL_WRITE); ctx->cf_mr_send = kiro_create_rdma_memory (client->pd, sizeof (struct kiro_ctrl_msg), IBV_ACCESS_LOCAL_WRITE); if (!ctx->cf_mr_recv || !ctx->cf_mr_send) { g_critical ("Failed to register control message memory"); goto error; } ctx->cf_mr_recv->size = ctx->cf_mr_send->size = sizeof (struct kiro_ctrl_msg); client->context = ctx; if (rdma_post_recv (client, client, ctx->cf_mr_recv->mem, ctx->cf_mr_recv->size, ctx->cf_mr_recv->mr)) { g_critical ("Posting preemtive receive for connection failed: %s", strerror (errno)); goto error; } if (rdma_accept (client, NULL)) { g_warning ("Failed to establish connection to the client: %s", strerror (errno)); goto error; } g_debug ("Client connection setup successfull"); return 0; error: rdma_reject (client, NULL, 0); kiro_destroy_connection_context (&ctx); rdma_destroy_id (client); return -1; }
int xfer_rdma_finalize(struct xfer_data *data) { struct rdma_cm_event *event; int rc; if (data->servername) { rc = rdma_disconnect(data->cm_id); if (rc) { perror("rdma_disconnect"); fprintf(stderr, "%d:%s: rdma disconnect error\n", pid, __func__); return -1; } } rdma_get_cm_event(data->cm_channel, &event); if (event->event != RDMA_CM_EVENT_DISCONNECTED) fprintf(stderr, "%d:%s: unexpected event during disconnect %d\n", pid, __func__, event->event); rdma_ack_cm_event(event); rdma_destroy_id(data->cm_id); rdma_destroy_event_channel(data->cm_channel); return 0; }
/// The IBConnectionGroup default destructor. virtual ~IBConnectionGroup() { for (auto& c : conn_) c = nullptr; if (listen_id_) { int err = rdma_destroy_id(listen_id_); if (err) { L_(error) << "rdma_destroy_id() failed"; } listen_id_ = nullptr; } if (cq_) { int err = ibv_destroy_cq(cq_); if (err) { L_(error) << "ibv_destroy_cq() failed"; } cq_ = nullptr; } if (pd_) { int err = ibv_dealloc_pd(pd_); if (err) { L_(error) << "ibv_dealloc_pd() failed"; } pd_ = nullptr; } rdma_destroy_event_channel(ec_); }
void destroy_connection(rdma_conn_t *conn) { rdma_destroy_qp(conn->id); ibv_dereg_mr(conn->send_mr); ibv_dereg_mr(conn->recv_mr); ibv_dereg_mr(conn->data_mr); ibv_dereg_mr(conn->addr_mr); if (conn->send_msg) { free(conn->send_msg); conn->send_msg = NULL; } if (conn->recv_msg) { free(conn->recv_msg); conn->recv_msg = NULL; } rdma_destroy_id(conn->id); if (conn) { free(conn); conn = NULL; } }
//static int run(int argc, char **argv) int ibrdma_send(char* host, char* port, void* data, uint64_t size) { struct addrinfo *addr; struct rdma_cm_id *cmid= NULL; struct rdma_event_channel *ec = NULL; struct rdma_conn_param cm_params; TEST_NZ(getaddrinfo(host, port, NULL, &addr)); TEST_Z(ec = rdma_create_event_channel()); TEST_NZ(rdma_create_id(ec, &cmid, NULL, RDMA_PS_TCP)); TEST_NZ(rdma_resolve_addr(cmid, NULL, addr->ai_addr, TIMEOUT_IN_MS)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ADDR_RESOLVED)); freeaddrinfo(addr); build_connection(cmid); TEST_NZ(rdma_resolve_route(cmid, TIMEOUT_IN_MS)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ROUTE_RESOLVED)); build_params(&cm_params); TEST_NZ(rdma_connect(cmid, &cm_params)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ESTABLISHED)); on_connect(cmid->context); /* Init MSG send to start RDMA*/ init_tfile(data, size); send_init(cmid->context); /*----------------------------*/ TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_DISCONNECTED)); rdma_destroy_id(cmid); rdma_destroy_event_channel(ec); return 0; }
static int ucma_process_conn_req(struct cma_event *evt, uint32_t handle) { struct cma_id_private *id_priv; int ret; id_priv = ucma_alloc_id(evt->id_priv->id.channel, evt->id_priv->id.context, evt->id_priv->id.ps); if (!id_priv) { ucma_destroy_kern_id(evt->id_priv->id.channel->fd, handle); ret = ERR(ENOMEM); goto err; } evt->event.listen_id = &evt->id_priv->id; evt->event.id = &id_priv->id; id_priv->handle = handle; ret = ucma_query_route(&id_priv->id); if (ret) { rdma_destroy_id(&id_priv->id); goto err; } return 0; err: ucma_complete_event(evt->id_priv); return ret; }
void diod_rdma_shutdown (diod_rdma_t rdma) { if (rdma->listen_id) rdma_destroy_id(rdma->listen_id); rdma->listen_id = NULL; }
/** * releases the FMR pool, QP and CMA ID objects, returns 0 on success, * -1 on failure */ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) { BUG_ON(ib_conn == NULL); iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", ib_conn, ib_conn->cma_id, ib_conn->fmr_pool, ib_conn->qp); /* qp is created only once both addr & route are resolved */ if (ib_conn->fmr_pool != NULL) ib_destroy_fmr_pool(ib_conn->fmr_pool); if (ib_conn->qp != NULL) rdma_destroy_qp(ib_conn->cma_id); /* if cma handler context, the caller acts s.t the cma destroy the id */ if (ib_conn->cma_id != NULL && can_destroy_id) rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); return 0; }
static int fi_ibv_pep_setname(fid_t pep_fid, void *addr, size_t addrlen) { struct fi_ibv_pep *pep; int ret; pep = container_of(pep_fid, struct fi_ibv_pep, pep_fid); if (pep->src_addrlen && (addrlen != pep->src_addrlen)) { FI_INFO(&fi_ibv_prov, FI_LOG_FABRIC, "addrlen expected: %d, got: %d.\n", pep->src_addrlen, addrlen); return -FI_EINVAL; } /* Re-create id if already bound */ if (pep->bound) { ret = rdma_destroy_id(pep->id); if (ret) { FI_INFO(&fi_ibv_prov, FI_LOG_FABRIC, "Unable to destroy previous rdma_cm_id\n"); return -errno; } ret = rdma_create_id(NULL, &pep->id, NULL, RDMA_PS_TCP); if (ret) { FI_INFO(&fi_ibv_prov, FI_LOG_FABRIC, "Unable to create rdma_cm_id\n"); return -errno; } } ret = rdma_bind_addr(pep->id, (struct sockaddr *)addr); if (ret) { FI_INFO(&fi_ibv_prov, FI_LOG_FABRIC, "Unable to bind addres to rdma_cm_id\n"); return -errno; } return 0; }
static void rdma_trans_destroy(void *a) { Rdmatrans *rdma; struct ibv_qp_attr attr; rdma = a; if (rdma->connected) rdma_disconnect(rdma->cm_id); if (rdma->qp) { attr.qp_state = IBV_QPS_ERR; ibv_modify_qp(rdma->qp, &attr, IBV_QP_STATE); ibv_destroy_qp(rdma->qp); } if (rdma->cq) ibv_destroy_cq(rdma->cq); if (rdma->ch) ibv_destroy_comp_channel(rdma->ch); if (rdma->snd_mr) ibv_dereg_mr(rdma->snd_mr); if (rdma->snd_buf) free(rdma->snd_buf); if (rdma->rcv_mr) ibv_dereg_mr(rdma->rcv_mr); if (rdma->rcv_buf) free(rdma->rcv_buf); if (rdma->pd) ibv_dealloc_pd(rdma->pd); if (rdma->cm_id) rdma_destroy_id(rdma->cm_id); }
static void isert_kref_free(struct kref *kref) { struct isert_connection *isert_conn = container_of(kref, struct isert_connection, kref); struct isert_device *isert_dev = isert_conn->isert_dev; struct isert_cq *cq = isert_conn->qp->recv_cq->cq_context; TRACE_ENTRY(); pr_info("isert_conn_free conn:%p\n", isert_conn); isert_free_conn_resources(isert_conn); isert_conn_qp_destroy(isert_conn); mutex_lock(&dev_list_mutex); isert_dev->cq_qps[cq->idx]--; list_del(&isert_conn->portal_node); isert_deref_device(isert_dev); if (unlikely(isert_conn->portal->state == ISERT_PORTAL_INACTIVE)) isert_portal_free(isert_conn->portal); mutex_unlock(&dev_list_mutex); rdma_destroy_id(isert_conn->cm_id); isert_conn_kfree(isert_conn); module_put(THIS_MODULE); TRACE_EXIT(); }
void on_disconnect(struct rdma_cm_id *id) { struct timeval start, end, dt; gettimeofday(&start, NULL); struct connection *conn = (struct connection *)id->context; printf("disconnected.\n"); rdma_destroy_qp(id); ibv_dereg_mr(conn->send_region_mr); ibv_dereg_mr(conn->recv_region_mr); ibv_dereg_mr(conn->send_msg_mr); ibv_dereg_mr(conn->recv_msg_mr); free(conn->send_region); free(conn->recv_region); free(conn->recv_msg); free(conn->send_msg); rdma_destroy_id(id); free(conn); gettimeofday(&end, NULL); timersub(&end, &start, &dt); long usec = dt.tv_usec + 1000000 * dt.tv_sec; printf("[Derigester] takes %ld micro_secs.\n", usec); return; /* exit event loop */ }
static int iser_free_ib_conn_res(struct iser_conn *ib_conn) { BUG_ON(ib_conn == NULL); iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", ib_conn, ib_conn->cma_id, ib_conn->fmr_pool, ib_conn->qp); if (ib_conn->fmr_pool != NULL) ib_destroy_fmr_pool(ib_conn->fmr_pool); if (ib_conn->qp != NULL) rdma_destroy_qp(ib_conn->cma_id); if (ib_conn->cma_id != NULL) rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; ib_conn->qp = NULL; ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); return 0; }
static void destroy_node(struct cmatest_node *node) { if (!node->cma_id) return; if (node->cma_id->qp) rdma_destroy_qp(node->cma_id); if (node->cq[SEND_CQ_INDEX]) ibv_destroy_cq(node->cq[SEND_CQ_INDEX]); if (node->cq[RECV_CQ_INDEX]) ibv_destroy_cq(node->cq[RECV_CQ_INDEX]); if (node->mem) { ibv_dereg_mr(node->mr); free(node->mem); } if (node->pd) ibv_dealloc_pd(node->pd); /* Destroy the RDMA ID after all device resources */ rdma_destroy_id(node->cma_id); }
/** * Cleanup a conn_t struct. * * @param[in] conn */ void conn_fini(void *arg) { conn_t *conn = arg; #if WITH_TRANSPORT_IB if (conn->transport.type == CONN_TYPE_RDMA) { if (conn->rdma.cm_id) { if (conn->rdma.cm_id->qp) rdma_destroy_qp(conn->rdma.cm_id); rdma_destroy_id(conn->rdma.cm_id); conn->rdma.cm_id = NULL; } } #endif #if WITH_TRANSPORT_UDP && WITH_RUDP if (conn->transport.type == CONN_TYPE_UDP) { atomic_set(&conn->udp.send_seq_num, 0); atomic_set(&conn->udp.recv_seq_num, 0); } #endif pthread_mutex_destroy(&conn->mutex); #if WITH_TRANSPORT_IB || WITH_TRANSPORT_UDP pthread_cond_destroy(&conn->move_wait); #endif }
static void rpcrdma_destroy_id(struct rdma_cm_id *id) { if (id) { module_put(id->device->owner); rdma_destroy_id(id); } }
static int rping_run_server(struct rping_cb *cb) { struct ibv_recv_wr *bad_wr; int ret; ret = rping_bind_server(cb); if (ret) return ret; sem_wait(&cb->sem); if (cb->state != CONNECT_REQUEST) { fprintf(stderr, "wait for CONNECT_REQUEST state %d\n", cb->state); return -1; } ret = rping_setup_qp(cb, cb->child_cm_id); if (ret) { fprintf(stderr, "setup_qp failed: %d\n", ret); return ret; } ret = rping_setup_buffers(cb); if (ret) { fprintf(stderr, "rping_setup_buffers failed: %d\n", ret); goto err1; } ret = ibv_post_recv(cb->qp, &cb->rq_wr, &bad_wr); if (ret) { fprintf(stderr, "ibv_post_recv failed: %d\n", ret); goto err2; } pthread_create(&cb->cqthread, NULL, cq_thread, cb); ret = rping_accept(cb); if (ret) { fprintf(stderr, "connect error %d\n", ret); goto err2; } ret = rping_test_server(cb); if (ret) { fprintf(stderr, "rping server failed: %d\n", ret); goto err3; } ret = 0; err3: rdma_disconnect(cb->child_cm_id); pthread_join(cb->cqthread, NULL); rdma_destroy_id(cb->child_cm_id); err2: rping_free_buffers(cb); err1: rping_free_qp(cb); return ret; }
int ibrdma_transfer(struct transfer_info *tfi, int num_tfi) { struct addrinfo *addr; struct rdma_cm_id *cmid= NULL; struct rdma_event_channel *ec = NULL; struct rdma_conn_param cm_params; TEST_NZ(getaddrinfo(host, port, NULL, &addr)); TEST_Z(ec = rdma_create_event_channel()); TEST_NZ(rdma_create_id(ec, &cmid, NULL, RDMA_PS_TCP)); TEST_NZ(rdma_resolve_addr(cmid, NULL, addr->ai_addr, TIMEOUT_IN_MS)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ADDR_RESOLVED)); freeaddrinfo(addr); build_connection(cmid); TEST_NZ(rdma_resolve_route(cmid, TIMEOUT_IN_MS)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ROUTE_RESOLVED)); build_params(&cm_params); TEST_NZ(rdma_connect(cmid, &cm_params)); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_ESTABLISHED)); on_connect(cmid->context); TEST_NZ(wait_for_event(ec, RDMA_CM_EVENT_DISCONNECTED)); rdma_destroy_id(&cmid); rdma_destroy_event_channel(&ec); return 0; }
int RDMA_Active_Finalize(struct RDMA_communicator *comm) { TEST_NZ(wait_for_event(comm->ec, RDMA_CM_EVENT_DISCONNECTED)); rdma_destroy_id(comm->cm_id); rdma_destroy_event_channel(comm->ec); return 0; }
static int alloc_nodes(void) { int ret, i; test.nodes = malloc(sizeof *test.nodes * connections); if (!test.nodes) { printf("cmatose: unable to allocate memory for test nodes\n"); return -ENOMEM; } memset(test.nodes, 0, sizeof *test.nodes * connections); for (i = 0; i < connections; i++) { test.nodes[i].id = i; if (dst_addr) { ret = rdma_create_id(test.channel, &test.nodes[i].cma_id, &test.nodes[i], hints.ai_port_space); if (ret) goto err; } } return 0; err: while (--i >= 0) rdma_destroy_id(test.nodes[i].cma_id); free(test.nodes); return ret; }
static ssize_t fi_ibv_eq_cm_process_event(struct fi_ibv_eq *eq, struct rdma_cm_event *cma_event, uint32_t *event, struct fi_eq_cm_entry *entry, size_t len) { fid_t fid; size_t datalen; fid = cma_event->id->context; switch (cma_event->event) { // case RDMA_CM_EVENT_ADDR_RESOLVED: // return 0; // case RDMA_CM_EVENT_ROUTE_RESOLVED: // return 0; case RDMA_CM_EVENT_CONNECT_REQUEST: *event = FI_CONNREQ; entry->info = fi_ibv_eq_cm_getinfo(eq->fab, cma_event); if (!entry->info) { rdma_destroy_id(cma_event->id); return 0; } break; case RDMA_CM_EVENT_ESTABLISHED: *event = FI_CONNECTED; entry->info = NULL; break; case RDMA_CM_EVENT_DISCONNECTED: *event = FI_SHUTDOWN; entry->info = NULL; break; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: eq->err.fid = fid; eq->err.err = cma_event->status; return -FI_EAVAIL; case RDMA_CM_EVENT_REJECTED: eq->err.fid = fid; eq->err.err = ECONNREFUSED; eq->err.prov_errno = cma_event->status; return -FI_EAVAIL; case RDMA_CM_EVENT_DEVICE_REMOVAL: eq->err.fid = fid; eq->err.err = ENODEV; return -FI_EAVAIL; case RDMA_CM_EVENT_ADDR_CHANGE: eq->err.fid = fid; eq->err.err = EADDRNOTAVAIL; return -FI_EAVAIL; default: return 0; } entry->fid = fid; datalen = MIN(len - sizeof(*entry), cma_event->param.conn.private_data_len); if (datalen) memcpy(entry->data, cma_event->param.conn.private_data, datalen); return sizeof(*entry) + datalen; }
static void rds_rdma_listen_stop(void) { if (rds_rdma_listen_id) { rdsdebug("cm %p\n", rds_rdma_listen_id); rdma_destroy_id(rds_rdma_listen_id); rds_rdma_listen_id = NULL; } }
void network_release() { ibv_dereg_mr(mr_data); rdma_destroy_qp(cm_id); ibv_destroy_cq(cq); ibv_destroy_comp_channel(comp_chan); rdma_destroy_id(cm_id); rdma_destroy_event_channel(cm_channel); }
/* * Create a listening RDMA service endpoint. */ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, int flags) { struct rdma_cm_id *listen_id; struct svcxprt_rdma *cma_xprt; struct svc_xprt *xprt; int ret; dprintk("svcrdma: Creating RDMA socket\n"); if (sa->sa_family != AF_INET) { dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); return ERR_PTR(-EAFNOSUPPORT); } cma_xprt = rdma_create_xprt(serv, 1); if (!cma_xprt) return ERR_PTR(-ENOMEM); xprt = &cma_xprt->sc_xprt; listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(listen_id)) { ret = PTR_ERR(listen_id); dprintk("svcrdma: rdma_create_id failed = %d\n", ret); goto err0; } ret = rdma_bind_addr(listen_id, sa); if (ret) { dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); goto err1; } cma_xprt->sc_cm_id = listen_id; ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { dprintk("svcrdma: rdma_listen failed = %d\n", ret); goto err1; } /* * We need to use the address from the cm_id in case the * caller specified 0 for the port number. */ sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); return &cma_xprt->sc_xprt; err1: rdma_destroy_id(listen_id); err0: kfree(cma_xprt); return ERR_PTR(ret); }
static int run_server(void) { struct rdma_cm_id *listen_id; int i, ret; printf("udaddy: starting server\n"); ret = rdma_create_id(test.channel, &listen_id, &test, port_space); if (ret) { perror("udaddy: listen request failed"); return ret; } if (src_addr) { ret = get_addr(src_addr, &test.src_in); if (ret) goto out; } else test.src_in.sin_family = PF_INET; test.src_in.sin_port = port; ret = rdma_bind_addr(listen_id, test.src_addr); if (ret) { perror("udaddy: bind address failed"); return ret; } ret = rdma_listen(listen_id, 0); if (ret) { perror("udaddy: failure trying to listen"); goto out; } connect_events(); if (message_count) { printf("receiving data transfers\n"); ret = poll_cqs(); if (ret) goto out; printf("sending replies\n"); for (i = 0; i < connections; i++) { ret = post_sends(&test.nodes[i], IBV_SEND_SIGNALED); if (ret) goto out; } ret = poll_cqs(); if (ret) goto out; printf("data transfers complete\n"); } out: rdma_destroy_id(listen_id); return ret; }
IBConnection::~IBConnection() { if (cm_id_) { int err = rdma_destroy_id(cm_id_); if (err) { L_(error) << "rdma_destroy_id() failed"; } cm_id_ = nullptr; } }
static void *rping_persistent_server_thread(void *arg) { struct rping_cb *cb = arg; struct ibv_recv_wr *bad_wr; int ret; ret = rping_setup_qp(cb, cb->child_cm_id); if (ret) { fprintf(stderr, "setup_qp failed: %d\n", ret); goto err0; } ret = rping_setup_buffers(cb); if (ret) { fprintf(stderr, "rping_setup_buffers failed: %d\n", ret); goto err1; } ret = ibv_post_recv(cb->qp, &cb->rq_wr, &bad_wr); if (ret) { fprintf(stderr, "ibv_post_recv failed: %d\n", ret); goto err2; } ret = pthread_create(&cb->cqthread, NULL, cq_thread, cb); if (ret) { perror("pthread_create"); goto err2; } ret = rping_accept(cb); if (ret) { fprintf(stderr, "connect error %d\n", ret); goto err3; } rping_test_server(cb); rdma_disconnect(cb->child_cm_id); pthread_join(cb->cqthread, NULL); rping_free_buffers(cb); rping_free_qp(cb); rdma_destroy_id(cb->child_cm_id); free_cb(cb); return NULL; err3: pthread_cancel(cb->cqthread); pthread_join(cb->cqthread, NULL); err2: rping_free_buffers(cb); err1: rping_free_qp(cb); err0: free_cb(cb); return NULL; }