boost::shared_ptr<receiver> accept( Parcelport & parcelport, util::memory_chunk_pool & pool, boost::system::error_code &ec) { boost::shared_ptr<receiver> rcv; rdma_cm_event event; if(!get_next_event(event_channel_, event, this, ec)) { return rcv; } if(event.event == RDMA_CM_EVENT_CONNECT_REQUEST) { rdma_conn_param cm_params; std::memset(&cm_params, 0, sizeof(rdma_conn_param)); cm_params.initiator_depth = cm_params.responder_resources = 1; cm_params.rnr_retry_count = 7; // infinite retry rcv.reset(new receiver(parcelport, pool)); rcv->context().build_connection(parcelport, event.id, ec); if(ec) { rcv.reset(); return rcv; } rcv->context().on_preconnect(event.id, ec); if(ec) { rcv.reset(); return rcv; } rdma_accept(event.id, &cm_params); pending_recv_list.push_back(std::make_pair(event, rcv)); rcv.reset(); return rcv; } if(event.event == RDMA_CM_EVENT_ESTABLISHED) { for(pending_recv_list_type::iterator it = pending_recv_list.begin(); it != pending_recv_list.end();) { if(it->first.id == event.id) { rcv = it->second; rcv->context().on_connection(event.id, ec); it = pending_recv_list.erase(it); break; } else { ++it; } } HPX_ASSERT(rcv); } return rcv; }
static int fi_ibv_msg_ep_accept(struct fid_ep *ep, const void *param, size_t paramlen) { struct fi_ibv_msg_ep *_ep; struct rdma_conn_param conn_param; struct fi_ibv_connreq *connreq; int ret; _ep = container_of(ep, struct fi_ibv_msg_ep, ep_fid); if (!_ep->id->qp) { ret = ep->fid.ops->control(&ep->fid, FI_ENABLE, NULL); if (ret) return ret; } memset(&conn_param, 0, sizeof conn_param); conn_param.private_data = param; conn_param.private_data_len = paramlen; conn_param.responder_resources = RDMA_MAX_RESP_RES; conn_param.initiator_depth = RDMA_MAX_INIT_DEPTH; conn_param.flow_control = 1; conn_param.rnr_retry_count = 7; if (_ep->srq_ep) conn_param.srq = 1; ret = rdma_accept(_ep->id, &conn_param); if (ret) return -errno; connreq = container_of(_ep->info->handle, struct fi_ibv_connreq, handle); free(connreq); return 0; }
static int krping_accept(struct krping_cb *cb) { struct rdma_conn_param conn_param; int ret; DEBUG_LOG(PFX "accepting client connection request\n"); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; ret = rdma_accept(cb->child_cm_id, &conn_param); if (ret) { log(LOG_ERR, "rdma_accept error: %d\n", ret); return ret; } if (!cb->wlat && !cb->rlat && !cb->bw) { krping_wait(cb, CONNECTED); if (cb->state == ERROR) { log(LOG_ERR, "wait for CONNECTED state %d\n", cb->state); return -1; } } return 0; }
void RDMAServerSocket::accept(client_t client_id) const { ibv_qp_init_attr qp_attr = {}; qp_attr.qp_type = IBV_QPT_RC; qp_attr.cap.max_send_wr = 256; qp_attr.cap.max_recv_wr = 0; qp_attr.cap.max_send_sge = 1; qp_attr.cap.max_recv_sge = 0; qp_attr.cap.max_inline_data = 72; qp_attr.recv_cq = cq; qp_attr.send_cq = cq; qp_attr.srq = id->srq; qp_attr.sq_sig_all = 1; check_zero(rdma_create_qp(client_id.get(), NULL, &qp_attr)); check_zero(rdma_accept(client_id.get(), nullptr)); clients([client_id = std::move(client_id)](auto && clients) mutable { auto pos = std::lower_bound(std::begin(clients), std::end(clients), client_id->qp->qp_num, [](const auto &client, const qp_t &qp_num) { return client->qp->qp_num < qp_num; }); clients.insert(pos, std::move(client_id)); }); }
int ibw_accept(struct ibw_ctx *ctx, struct ibw_conn *conn, void *conn_userdata) { struct ibw_conn_priv *pconn = talloc_get_type(conn->internal, struct ibw_conn_priv); struct rdma_conn_param conn_param; int rc; DEBUG(DEBUG_DEBUG, ("ibw_accept: cmid=%p\n", pconn->cm_id)); conn->conn_userdata = conn_userdata; memset(&conn_param, 0, sizeof(struct rdma_conn_param)); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; rc = rdma_accept(pconn->cm_id, &conn_param); if (rc) { sprintf(ibw_lasterr, "rdma_accept failed %d\n", rc); DEBUG(DEBUG_ERR, (ibw_lasterr)); return -1;; } pconn->is_accepted = 1; /* continued at RDMA_CM_EVENT_ESTABLISHED */ return 0; }
std::unique_ptr<Socket> Acceptor::accept() { rdma_cm_id* new_cm_id; if (rdma_get_request(m_cm_id, &new_cm_id)) { throw exception::acceptor::generic_error( "Error on rdma_get_request: " + std::string(strerror(errno))); } rdma_conn_param conn_param; memset(&conn_param, 0, sizeof(rdma_conn_param)); conn_param.rnr_retry_count = m_rnr_retry_count; if (rdma_accept(new_cm_id, &conn_param)) { rdma_destroy_ep(new_cm_id); throw exception::acceptor::generic_error( "Error on rdma_accept: " + std::string(strerror(errno))); } ibv_qp_attr attr; memset(&attr, 0, sizeof(ibv_qp_attr)); attr.min_rnr_timer = m_min_rtr_timer; int flags = IBV_QP_MIN_RNR_TIMER; if (ibv_modify_qp(new_cm_id->qp, &attr, flags)) { rdma_destroy_ep(new_cm_id); throw exception::acceptor::generic_error( "Error on ibv_modify_qp: " + std::string(strerror(errno))); } std::unique_ptr<Socket> socket_ptr(new Socket(new_cm_id, m_credits)); return socket_ptr; }
int fi_ibv_accept_xrc(struct fi_ibv_xrc_ep *ep, int reciprocal, void *param, size_t paramlen) { struct sockaddr *addr; struct fi_ibv_connreq *connreq; struct rdma_conn_param conn_param = { 0 }; struct fi_ibv_xrc_cm_data *cm_data = param; int ret; addr = rdma_get_local_addr(ep->tgt_id); if (addr) ofi_straddr_dbg(&fi_ibv_prov, FI_LOG_CORE, "src_addr", addr); addr = rdma_get_peer_addr(ep->tgt_id); if (addr) ofi_straddr_dbg(&fi_ibv_prov, FI_LOG_CORE, "dest_addr", addr); connreq = container_of(ep->base_ep.info->handle, struct fi_ibv_connreq, handle); ret = fi_ibv_ep_create_tgt_qp(ep, connreq->xrc.conn_data); if (ret) return ret; fi_ibv_set_xrc_cm_data(cm_data, connreq->xrc.is_reciprocal, connreq->xrc.conn_tag, connreq->xrc.port, ep->srqn); conn_param.private_data = cm_data; conn_param.private_data_len = paramlen; conn_param.responder_resources = RDMA_MAX_RESP_RES; conn_param.initiator_depth = RDMA_MAX_INIT_DEPTH; conn_param.flow_control = 1; conn_param.rnr_retry_count = 7; if (ep->base_ep.srq_ep) conn_param.srq = 1; /* Shared INI/TGT QP connection use a temporarily reserved QP number * avoid the appearance of being a stale/duplicate IB CM message */ if (!ep->tgt_id->qp) conn_param.qp_num = ep->conn_setup->rsvd_tgt_qpn->qp_num; if (connreq->xrc.is_reciprocal) fi_ibv_eq_clear_xrc_conn_tag(ep); else ep->conn_setup->conn_tag = connreq->xrc.conn_tag; assert(ep->conn_state == FI_IBV_XRC_UNCONNECTED || ep->conn_state == FI_IBV_XRC_ORIG_CONNECTED); fi_ibv_next_xrc_conn_state(ep); ret = rdma_accept(ep->tgt_id, &conn_param); if (ret) { ret = -errno; VERBS_INFO_ERRNO(FI_LOG_EP_CTRL, "XRC TGT, ibv_open_qp", errno); fi_ibv_prev_xrc_conn_state(ep); } free(connreq); return ret; }
/** * @param[in] ni * @param[in] conn * @param[in] event * * @return status * * conn is locked */ static int accept_connection_request(ni_t *ni, conn_t *conn, struct rdma_cm_event *event) { struct rdma_conn_param conn_param; struct ibv_qp_init_attr init_attr; struct cm_priv_accept priv; conn->state = CONN_STATE_CONNECTING; memset(&init_attr, 0, sizeof(init_attr)); init_attr.qp_type = IBV_QPT_RC; init_attr.cap.max_send_wr = ni->iface->cap.max_send_wr; init_attr.send_cq = ni->rdma.cq; init_attr.recv_cq = ni->rdma.cq; init_attr.srq = ni->rdma.srq; init_attr.cap.max_send_sge = ni->iface->cap.max_send_sge; if (rdma_create_qp(event->id, ni->iface->pd, &init_attr)) { conn->state = CONN_STATE_DISCONNECTED; pthread_cond_broadcast(&conn->move_wait); return PTL_FAIL; } /* If we were already trying to connect ourselves, cancel it. */ if (conn->rdma.cm_id != NULL) { assert(conn->rdma.cm_id->context == conn); conn->rdma.cm_id->context = NULL; } event->id->context = conn; conn->rdma.cm_id = event->id; memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; conn_param.retry_count = 7; conn_param.rnr_retry_count = 7; if (ni->options & PTL_NI_LOGICAL) { conn_param.private_data = &priv; conn_param.private_data_len = sizeof(priv); } if (rdma_accept(event->id, &conn_param)) { rdma_destroy_qp(event->id); conn->rdma.cm_id = NULL; conn->state = CONN_STATE_DISCONNECTED; pthread_cond_broadcast(&conn->move_wait); return PTL_FAIL; } return PTL_OK; }
int on_connect_request(struct rdma_cm_id *id) { struct rdma_conn_param cm_params; printf("received connection request.\n"); build_connection(id); build_params(&cm_params); sprintf(get_local_message_region(id->context), "message from passive/server side with pid %d", getpid()); TEST_NZ(rdma_accept(id, &cm_params)); return 0; }
static int connect_client (struct rdma_cm_id *client) { if (!client) return -1; if ( -1 == kiro_attach_qp (client)) { g_critical ("Could not create a QP for the new connection"); rdma_destroy_id (client); return -1; } struct kiro_connection_context *ctx = (struct kiro_connection_context *)g_try_malloc0 (sizeof (struct kiro_connection_context)); if (!ctx) { g_critical ("Failed to create connection context"); rdma_destroy_id (client); return -1; } ctx->cf_mr_recv = kiro_create_rdma_memory (client->pd, sizeof (struct kiro_ctrl_msg), IBV_ACCESS_LOCAL_WRITE); ctx->cf_mr_send = kiro_create_rdma_memory (client->pd, sizeof (struct kiro_ctrl_msg), IBV_ACCESS_LOCAL_WRITE); if (!ctx->cf_mr_recv || !ctx->cf_mr_send) { g_critical ("Failed to register control message memory"); goto error; } ctx->cf_mr_recv->size = ctx->cf_mr_send->size = sizeof (struct kiro_ctrl_msg); client->context = ctx; if (rdma_post_recv (client, client, ctx->cf_mr_recv->mem, ctx->cf_mr_recv->size, ctx->cf_mr_recv->mr)) { g_critical ("Posting preemtive receive for connection failed: %s", strerror (errno)); goto error; } if (rdma_accept (client, NULL)) { g_warning ("Failed to establish connection to the client: %s", strerror (errno)); goto error; } g_debug ("Client connection setup successfull"); return 0; error: rdma_reject (client, NULL, 0); kiro_destroy_connection_context (&ctx); rdma_destroy_id (client); return -1; }
void IBConnection::accept_connect_request() { L_(debug) << "accepting connection"; // Accept rdma connection request auto private_data = get_private_data(); assert(private_data->size() <= 255); struct rdma_conn_param conn_param = rdma_conn_param(); conn_param.responder_resources = 1; conn_param.private_data = private_data->data(); conn_param.private_data_len = static_cast<uint8_t>(private_data->size()); int err = rdma_accept(cm_id_, &conn_param); if (err) throw InfinibandException("RDMA accept failed"); }
static void accept_connection(struct rdma_cm_id *id) { struct rdma_conn_param conn_param; /* simple_context_t *context = malloc(sizeof(*context)); if (!context) { perror("failed to malloc context for connection\n"); rdma_reject(id, NULL, 0); return; } // associate this context with this id. context->id = id; id->context = context; context->quit_cq_thread = 0; if (allocate_server_resources(context)) { fprintf(stderr, "failed to allocate resources\n"); rdma_reject(id, NULL, 0); return; } post_server_rec_work_req(context); */ printf("Accepting connection on id == %p (total connections %d)\n", id, ++connections); build_connection_s(id); // memset(&conn_param, 0, sizeof(conn_param)); // conn_param.responder_resources = 1; // conn_param.initiator_depth = 1; build_params(&conn_param); // sprintf(get_local_message_region(id->context), "message from passive/server side with pid %d", getpid()); // rdma_accept(context->id, &conn_param); TEST_NZ(rdma_accept(id, &conn_param)); //if (query_qp_on_alloc) // { // debug_print_qp(context); // } }
static int connect_handler(struct rdma_cm_id *cma_id) { struct cmatest_node *node; struct rdma_conn_param conn_param; int ret; if (test.conn_index == connections) { ret = -ENOMEM; goto err1; } node = &test.nodes[test.conn_index++]; node->cma_id = cma_id; cma_id->context = node; ret = verify_test_params(node); if (ret) goto err2; ret = init_node(node); if (ret) goto err2; ret = post_recvs(node); if (ret) goto err2; memset(&conn_param, 0, sizeof conn_param); conn_param.qp_num = node->cma_id->qp->qp_num; ret = rdma_accept(node->cma_id, &conn_param); if (ret) { perror("udaddy: failure accepting"); goto err2; } node->connected = 1; test.connects_left--; return 0; err2: node->cma_id = NULL; connect_error(); err1: printf("udaddy: failing connection request\n"); rdma_reject(cma_id, NULL, 0); return ret; }
/** * Accept an RC connection request to self. * * called while holding connect->mutex * only used for physical NIs * * @param[in] ni * @param[in] conn * @param[in] event * * @return status */ static int accept_connection_self(ni_t *ni, conn_t *conn, struct rdma_cm_event *event) { struct rdma_conn_param conn_param; struct ibv_qp_init_attr init_attr; conn->state = CONN_STATE_CONNECTING; memset(&init_attr, 0, sizeof(init_attr)); init_attr.qp_type = IBV_QPT_RC; init_attr.send_cq = ni->rdma.cq; init_attr.recv_cq = ni->rdma.cq; init_attr.srq = ni->rdma.srq; init_attr.cap.max_send_wr = ni->iface->cap.max_send_wr; init_attr.cap.max_send_sge = ni->iface->cap.max_send_sge; if (rdma_create_qp(event->id, ni->iface->pd, &init_attr)) { conn->state = CONN_STATE_DISCONNECTED; pthread_cond_broadcast(&conn->move_wait); return PTL_FAIL; } ni->rdma.self_cm_id = event->id; /* The lower 2 bits (on 32 bits hosts), or 3 bits (on 64 bits * hosts) of a pointer is always 0. Use it to store the type of * context. 0=conn; 1=NI. */ event->id->context = (void *)((uintptr_t) ni | 1); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; conn_param.rnr_retry_count = 7; if (rdma_accept(event->id, &conn_param)) { rdma_destroy_qp(event->id); conn->state = CONN_STATE_DISCONNECTED; pthread_cond_broadcast(&conn->move_wait); return PTL_FAIL; } return PTL_OK; }
static int rping_accept(struct rping_cb *cb) { int ret; DEBUG_LOG("accepting client connection request\n"); ret = rdma_accept(cb->child_cm_id, NULL); if (ret) { perror("rdma_accept"); return ret; } sem_wait(&cb->sem); if (cb->state == ERROR) { fprintf(stderr, "wait for CONNECTED state %d\n", cb->state); return -1; } return 0; }
static int connect_handler(struct rdma_cm_id *cma_id) { struct cmatest_node *node; int ret; if (test.conn_index == connections) { ret = -ENOMEM; goto err1; } node = &test.nodes[test.conn_index++]; node->cma_id = cma_id; cma_id->context = node; ret = init_node(node); if (ret) goto err2; ret = post_recvs(node); if (ret) goto err2; ret = rdma_accept(node->cma_id, NULL); if (ret) { perror("cmatose: failure accepting"); goto err2; } return 0; err2: node->cma_id = NULL; connect_error(); err1: printf("cmatose: failing connection request\n"); rdma_reject(cma_id, NULL, 0); return ret; }
static int fio_rdmaio_accept(struct thread_data *td, struct fio_file *f) { struct rdmaio_data *rd = td->io_ops->data; struct rdma_conn_param conn_param; struct ibv_send_wr *bad_wr; int ret = 0; /* rdma_accept() - then wait for accept success */ memset(&conn_param, 0, sizeof(conn_param)); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; if (rdma_accept(rd->child_cm_id, &conn_param) != 0) { log_err("fio: rdma_accept\n"); return 1; } if (get_next_channel_event (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) { log_err("fio: wait for RDMA_CM_EVENT_ESTABLISHED\n"); return 1; } /* wait for request */ ret = rdma_poll_wait(td, IBV_WC_RECV) < 0; if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) { log_err("fio: ibv_post_send fail"); return 1; } if (rdma_poll_wait(td, IBV_WC_SEND) < 0) return 1; return ret; }
static int rping_accept(struct rping_cb *cb) { struct rdma_conn_param conn_param; int ret; DEBUG_LOG("accepting client connection request\n"); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 1; conn_param.initiator_depth = 1; ret = rdma_accept(cb->child_cm_id, &conn_param); if (ret) { perror("rdma_accept"); return ret; } sem_wait(&cb->sem); if (cb->state == ERROR) { fprintf(stderr, "wait for CONNECTED state %d\n", cb->state); return -1; } return 0; }
int on_connect_request(struct rdma_cm_id *id) { struct ibv_qp_init_attr qp_attr; struct rdma_conn_param cm_params; struct connection *conn; printf("received connection request.\n"); build_context(id->verbs); build_qp_attr(&qp_attr); TEST_NZ(rdma_create_qp(id, s_ctx->pd, &qp_attr)); id->context = conn = (struct connection *)malloc(sizeof(struct connection)); conn->qp = id->qp; register_memory(conn); post_receives(conn); memset(&cm_params, 0, sizeof(cm_params)); TEST_NZ(rdma_accept(id, &cm_params)); return 0; }