int fi_ibv_process_xrc_connreq(struct fi_ibv_ep *ep, struct fi_ibv_connreq *connreq) { struct fi_ibv_xrc_ep *xrc_ep = container_of(ep, struct fi_ibv_xrc_ep, base_ep); int ret; assert(ep->info->src_addr); assert(ep->info->dest_addr); xrc_ep->conn_setup = calloc(1, sizeof(*xrc_ep->conn_setup)); if (!xrc_ep->conn_setup) return -FI_ENOMEM; /* This endpoint was created on the passive side of a connection * request. The reciprocal connection request will go back to the * passive port indicated by the active side */ ofi_addr_set_port(ep->info->src_addr, 0); ofi_addr_set_port(ep->info->dest_addr, connreq->xrc.port); ret = fi_ibv_create_ep(NULL, NULL, 0, ep->info, NULL, &ep->id); if (ret) { VERBS_WARN(FI_LOG_EP_CTRL, "Creation of INI cm_id failed %d\n", ret); goto create_err; } xrc_ep->tgt_id = connreq->id; xrc_ep->tgt_id->context = &ep->util_ep.ep_fid.fid; return FI_SUCCESS; create_err: free(xrc_ep->conn_setup); return ret; }
static int fi_ibv_get_param_int(char *param_name, char *param_str, size_t *param_default) { char *param_help; size_t len, ret; int param; len = strlen(param_str) + 50; param_help = malloc(len); ret = snprintf(param_help, len, "%s (default: %zu)", param_str, *param_default); if (ret >= len) { VERBS_WARN(FI_LOG_EP_DATA, "param_help string size insufficient!\n"); free(param_help); assert(0); return -FI_ETOOSMALL; } fi_param_define(&fi_ibv_prov, param_name, FI_PARAM_INT, param_help); if (!fi_param_get_int(&fi_ibv_prov, param_name, ¶m)) *param_default = param; free(param_help); return 0; }
static int fi_ibv_rai_to_fi(struct rdma_addrinfo *rai, struct fi_info *fi) { fi->addr_format = ofi_translate_addr_format(rai->ai_family); if (fi->addr_format == FI_FORMAT_UNSPEC) { VERBS_WARN(FI_LOG_FABRIC, "Unknown address format\n"); return -FI_EINVAL; } if (rai->ai_src_len) { free(fi->src_addr); if (!(fi->src_addr = malloc(rai->ai_src_len))) return -FI_ENOMEM; memcpy(fi->src_addr, rai->ai_src_addr, rai->ai_src_len); fi->src_addrlen = rai->ai_src_len; } if (rai->ai_dst_len) { free(fi->dest_addr); if (!(fi->dest_addr = malloc(rai->ai_dst_len))) return -FI_ENOMEM; memcpy(fi->dest_addr, rai->ai_dst_addr, rai->ai_dst_len); fi->dest_addrlen = rai->ai_dst_len; } return 0; }
int fi_ibv_verify_xrc_cm_data(struct fi_ibv_xrc_cm_data *remote, int private_data_len) { if (sizeof(*remote) > private_data_len) { VERBS_WARN(FI_LOG_EP_CTRL, "XRC MSG EP CM data length mismatch\n"); return -FI_EINVAL; } if (remote->version != FI_IBV_XRC_VERSION) { VERBS_WARN(FI_LOG_EP_CTRL, "XRC MSG EP connection protocol mismatch " "(local %"PRIu8", remote %"PRIu8")\n", FI_IBV_XRC_VERSION, remote->version); return -FI_EINVAL; } return FI_SUCCESS; }
static int fi_ibv_get_srcaddr_devs(struct fi_info **info) { struct fi_info *fi, *add_info; struct fi_info *fi_unconf = NULL, *fi_prev = NULL; struct verbs_dev_info *dev; struct verbs_addr *addr; int ret = 0; DEFINE_LIST(verbs_devs); ret = fi_ibv_getifaddrs(&verbs_devs); if (ret) return ret; if (dlist_empty(&verbs_devs)) { VERBS_WARN(FI_LOG_CORE, "No interface address found\n"); return 0; } for (fi = *info; fi; fi = fi->next) { dlist_foreach_container(&verbs_devs, struct verbs_dev_info, dev, entry) if (!strncmp(fi->domain_attr->name, dev->name, strlen(dev->name))) { dlist_foreach_container(&dev->addrs, struct verbs_addr, addr, entry) { /* When a device has multiple interfaces/addresses configured * duplicate fi_info and add the address info. fi->src_addr * would have been set in the previous iteration */ if (fi->src_addr) { if (!(add_info = fi_dupinfo(fi))) { ret = -FI_ENOMEM; goto out; } add_info->next = fi->next; fi->next = add_info; fi = add_info; } ret = fi_ibv_rai_to_fi(addr->rai, fi); if (ret) goto out; } break; } }
int fi_ibv_connect_xrc(struct fi_ibv_xrc_ep *ep, struct sockaddr *addr, int reciprocal, void *param, size_t paramlen) { struct fi_ibv_domain *domain = fi_ibv_ep_to_domain(&ep->base_ep); struct sockaddr *peer_addr; int ret; assert(ep->base_ep.id && !ep->base_ep.ibv_qp && !ep->ini_conn); peer_addr = rdma_get_local_addr(ep->base_ep.id); if (peer_addr) ofi_straddr_dbg(&fi_ibv_prov, FI_LOG_FABRIC, "XRC connect src_addr", peer_addr); peer_addr = rdma_get_peer_addr(ep->base_ep.id); if (peer_addr) ofi_straddr_dbg(&fi_ibv_prov, FI_LOG_FABRIC, "XRC connect dest_addr", peer_addr); if (!reciprocal) { ep->conn_setup = calloc(1, sizeof(*ep->conn_setup)); if (!ep->conn_setup) return -FI_ENOMEM; } fastlock_acquire(&domain->xrc.ini_mgmt_lock); ret = fi_ibv_get_shared_ini_conn(ep, &ep->ini_conn); if (ret) { VERBS_WARN(FI_LOG_FABRIC, "Get of shared XRC INI connection failed %d\n", ret); fastlock_release(&domain->xrc.ini_mgmt_lock); if (!reciprocal) { free(ep->conn_setup); ep->conn_setup = NULL; } return ret; } fi_ibv_add_pending_ini_conn(ep, reciprocal, param, paramlen); fi_ibv_sched_ini_conn(ep->ini_conn); fastlock_release(&domain->xrc.ini_mgmt_lock); return FI_SUCCESS; }
static inline int fi_ibv_poll_events(struct fi_ibv_cq *_cq, int timeout) { int ret, rc; void *context; struct pollfd fds[2]; char data; fds[0].fd = _cq->channel->fd; fds[1].fd = _cq->signal_fd[0]; fds[0].events = fds[1].events = POLLIN; rc = poll(fds, 2, timeout); if (rc == 0) return -FI_EAGAIN; else if (rc < 0) return -errno; if (fds[0].revents & POLLIN) { ret = ibv_get_cq_event(_cq->channel, &_cq->cq, &context); if (ret) return ret; ofi_atomic_inc32(&_cq->nevents); rc--; } if (fds[1].revents & POLLIN) { do { ret = read(fds[1].fd, &data, 1); } while (ret > 0); ret = -FI_EAGAIN; rc--; } if (rc) { VERBS_WARN(FI_LOG_CQ, "Unknown poll error: check revents\n"); return -FI_EOTHER; } return ret; }
static int fi_ibv_reap_comp(struct fi_ibv_msg_ep *ep) { struct fi_ibv_wce *wce = NULL; int got_wc = 0; int ret = 0; fastlock_acquire(&ep->scq->lock); while (ofi_atomic_get32(&ep->comp_pending) > 0) { if (!wce) { wce = util_buf_alloc(ep->scq->wce_pool); if (!wce) { fastlock_release(&ep->scq->lock); return -FI_ENOMEM; } memset(wce, 0, sizeof(*wce)); } ret = fi_ibv_poll_cq(ep->scq, &wce->wc); if (ret < 0) { VERBS_WARN(FI_LOG_EP_DATA, "Failed to read completion for signaled send\n"); util_buf_release(ep->scq->wce_pool, wce); fastlock_release(&ep->scq->lock); return ret; } else if (ret > 0) { slist_insert_tail(&wce->entry, &ep->scq->wcq); got_wc = 1; wce = NULL; } } if (wce) util_buf_release(ep->scq->wce_pool, wce); if (got_wc && ep->scq->channel) ret = fi_ibv_cq_signal(&ep->scq->cq_fid); fastlock_release(&ep->scq->lock); return ret; }
void fi_ibv_prev_xrc_conn_state(struct fi_ibv_xrc_ep *ep) { switch (ep->conn_state) { case FI_IBV_XRC_UNCONNECTED: break; case FI_IBV_XRC_ORIG_CONNECTING: ep->conn_state = FI_IBV_XRC_UNCONNECTED; break; case FI_IBV_XRC_ORIG_CONNECTED: ep->conn_state = FI_IBV_XRC_ORIG_CONNECTING; break; case FI_IBV_XRC_RECIP_CONNECTING: ep->conn_state = FI_IBV_XRC_ORIG_CONNECTED; break; case FI_IBV_XRC_CONNECTED: ep->conn_state = FI_IBV_XRC_RECIP_CONNECTING; break; default: assert(0); VERBS_WARN(FI_LOG_FABRIC, "Unkown XRC connection state %d\n", ep->conn_state); } }
static int fi_ibv_msg_ep_enable(struct fid_ep *ep) { struct ibv_qp_init_attr attr; struct fi_ibv_msg_ep *_ep; struct ibv_pd *pd; _ep = container_of(ep, struct fi_ibv_msg_ep, ep_fid); if (!_ep->eq) { VERBS_WARN(FI_LOG_EP_CTRL, "Endpoint is not bound to an event queue\n"); return -FI_ENOEQ; } if (!_ep->scq && !_ep->rcq) { VERBS_WARN(FI_LOG_EP_CTRL, "Endpoint is not bound to " "a send or receive completion queue\n"); return -FI_ENOCQ; } if (!_ep->scq && (ofi_send_allowed(_ep->info->caps) || ofi_rma_initiate_allowed(_ep->info->caps))) { VERBS_WARN(FI_LOG_EP_CTRL, "Endpoint is not bound to " "a send completion queue when it has transmit " "capabilities enabled (FI_SEND | FI_RMA).\n"); return -FI_ENOCQ; } if (!_ep->rcq && ofi_recv_allowed(_ep->info->caps)) { VERBS_WARN(FI_LOG_EP_CTRL, "Endpoint is not bound to " "a receive completion queue when it has receive " "capabilities enabled. (FI_RECV)\n"); return -FI_ENOCQ; } memset(&attr, 0, sizeof attr); if (_ep->scq) { attr.cap.max_send_wr = _ep->info->tx_attr->size; attr.cap.max_send_sge = _ep->info->tx_attr->iov_limit; attr.send_cq = _ep->scq->cq; pd = _ep->scq->domain->pd; } else { attr.send_cq = _ep->rcq->cq; pd = _ep->rcq->domain->pd; } if (_ep->rcq) { attr.cap.max_recv_wr = _ep->info->rx_attr->size; attr.cap.max_recv_sge = _ep->info->rx_attr->iov_limit; attr.recv_cq = _ep->rcq->cq; } else { attr.recv_cq = _ep->scq->cq; } attr.cap.max_inline_data = _ep->info->tx_attr->inject_size; if (_ep->srq_ep) { attr.srq =_ep->srq_ep->srq; /* Use of SRQ, no need to allocate recv_wr entries in the QP */ attr.cap.max_recv_wr = 0; /* Override the default ops to prevent the user from posting WRs to a * QP where a SRQ is attached to */ _ep->ep_fid.msg = fi_ibv_msg_srq_ep_ops_msg(_ep); } attr.qp_type = IBV_QPT_RC; attr.sq_sig_all = 0; attr.qp_context = _ep; return rdma_create_qp(_ep->id, pd, &attr) ? -errno : 0; }
static int fi_ibv_mr_reg(struct fid *fid, const void *buf, size_t len, uint64_t access, uint64_t offset, uint64_t requested_key, uint64_t flags, struct fid_mr **mr, void *context) { struct fi_ibv_mem_desc *md; int fi_ibv_access = 0; struct fid_domain *domain; if (flags) return -FI_EBADFLAGS; if (fid->fclass != FI_CLASS_DOMAIN) { return -FI_EINVAL; } domain = container_of(fid, struct fid_domain, fid); md = calloc(1, sizeof *md); if (!md) return -FI_ENOMEM; md->domain = container_of(domain, struct fi_ibv_domain, domain_fid); md->mr_fid.fid.fclass = FI_CLASS_MR; md->mr_fid.fid.context = context; md->mr_fid.fid.ops = &fi_ibv_mr_ops; /* Enable local write access by default for FI_EP_RDM which hides local * registration requirements. This allows to avoid buffering or double * registration */ if (!(md->domain->info->caps & FI_LOCAL_MR)) fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE; /* Local read access to an MR is enabled by default in verbs */ if (access & FI_RECV) fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE; /* iWARP spec requires Remote Write access for an MR that is used * as a data sink for a Remote Read */ if (access & FI_READ) { fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE; if (md->domain->verbs->device->transport_type == IBV_TRANSPORT_IWARP) fi_ibv_access |= IBV_ACCESS_REMOTE_WRITE; } if (access & FI_WRITE) fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE; if (access & FI_REMOTE_READ) fi_ibv_access |= IBV_ACCESS_REMOTE_READ; /* Verbs requires Local Write access too for Remote Write access */ if (access & FI_REMOTE_WRITE) fi_ibv_access |= IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_ATOMIC; md->mr = ibv_reg_mr(md->domain->pd, (void *) buf, len, fi_ibv_access); if (!md->mr) goto err; md->mr_fid.mem_desc = (void *) (uintptr_t) md->mr->lkey; md->mr_fid.key = md->mr->rkey; *mr = &md->mr_fid; if(md->domain->eq && (md->domain->eq_flags & FI_REG_MR)) { struct fi_eq_entry entry = { .fid = &md->mr_fid.fid, .context = context }; fi_ibv_eq_write_event(md->domain->eq, FI_MR_COMPLETE, &entry, sizeof(entry)); } return 0; err: free(md); return -errno; } static int fi_ibv_mr_regv(struct fid *fid, const struct iovec * iov, size_t count, uint64_t access, uint64_t offset, uint64_t requested_key, uint64_t flags, struct fid_mr **mr, void *context) { if (count > VERBS_MR_IOV_LIMIT) { VERBS_WARN(FI_LOG_FABRIC, "iov count > %d not supported\n", VERBS_MR_IOV_LIMIT); return -FI_EINVAL; } return fi_ibv_mr_reg(fid, iov->iov_base, iov->iov_len, access, offset, requested_key, flags, mr, context); } static int fi_ibv_mr_regattr(struct fid *fid, const struct fi_mr_attr *attr, uint64_t flags, struct fid_mr **mr) { return fi_ibv_mr_regv(fid, attr->mr_iov, attr->iov_count, attr->access, 0, attr->requested_key, flags, mr, attr->context); } static int fi_ibv_domain_bind(struct fid *fid, struct fid *bfid, uint64_t flags) { struct fi_ibv_domain *domain; struct fi_ibv_eq *eq; domain = container_of(fid, struct fi_ibv_domain, domain_fid.fid); switch (bfid->fclass) { case FI_CLASS_EQ: eq = container_of(bfid, struct fi_ibv_eq, eq_fid); domain->eq = eq; domain->eq_flags = flags; break; default: return -EINVAL; } return 0; } static int fi_ibv_domain_close(fid_t fid) { struct fi_ibv_domain *domain; int ret; domain = container_of(fid, struct fi_ibv_domain, domain_fid.fid); if (domain->rdm) { rdma_destroy_ep(domain->rdm_cm->listener); free(domain->rdm_cm); } if (domain->pd) { ret = ibv_dealloc_pd(domain->pd); if (ret) return -ret; domain->pd = NULL; } fi_freeinfo(domain->info); free(domain); return 0; } static int fi_ibv_open_device_by_name(struct fi_ibv_domain *domain, const char *name) { struct ibv_context **dev_list; int i, ret = -FI_ENODEV; if (!name) return -FI_EINVAL; dev_list = rdma_get_devices(NULL); if (!dev_list) return -errno; for (i = 0; dev_list[i] && ret; i++) { if (domain->rdm) { ret = strncmp(name, ibv_get_device_name(dev_list[i]->device), strlen(name) - strlen(verbs_rdm_domain.suffix)); } else { ret = strcmp(name, ibv_get_device_name(dev_list[i]->device)); } if (!ret) domain->verbs = dev_list[i]; } rdma_free_devices(dev_list); return ret; } static struct fi_ops fi_ibv_fid_ops = { .size = sizeof(struct fi_ops), .close = fi_ibv_domain_close, .bind = fi_ibv_domain_bind, .control = fi_no_control, .ops_open = fi_no_ops_open, }; static struct fi_ops_mr fi_ibv_domain_mr_ops = { .size = sizeof(struct fi_ops_mr), .reg = fi_ibv_mr_reg, .regv = fi_ibv_mr_regv, .regattr = fi_ibv_mr_regattr, }; static struct fi_ops_domain fi_ibv_domain_ops = { .size = sizeof(struct fi_ops_domain), .av_open = fi_no_av_open, .cq_open = fi_ibv_cq_open, .endpoint = fi_ibv_open_ep, .scalable_ep = fi_no_scalable_ep, .cntr_open = fi_no_cntr_open, .poll_open = fi_no_poll_open, .stx_ctx = fi_no_stx_context, .srx_ctx = fi_ibv_srq_context, }; static struct fi_ops_domain fi_ibv_rdm_domain_ops = { .size = sizeof(struct fi_ops_domain), .av_open = fi_ibv_rdm_av_open, .cq_open = fi_ibv_rdm_cq_open, .endpoint = fi_ibv_rdm_open_ep, .scalable_ep = fi_no_scalable_ep, .cntr_open = fi_rbv_rdm_cntr_open, .poll_open = fi_no_poll_open, .stx_ctx = fi_no_stx_context, .srx_ctx = fi_no_srx_context, }; static int fi_ibv_domain(struct fid_fabric *fabric, struct fi_info *info, struct fid_domain **domain, void *context) { struct fi_ibv_domain *_domain; struct fi_ibv_fabric *fab; struct fi_info *fi; int ret; fi = fi_ibv_get_verbs_info(info->domain_attr->name); if (!fi) return -FI_EINVAL; fab = container_of(fabric, struct fi_ibv_fabric, util_fabric.fabric_fid); ret = ofi_check_domain_attr(&fi_ibv_prov, fabric->api_version, fi->domain_attr, info->domain_attr); if (ret) return ret; _domain = calloc(1, sizeof *_domain); if (!_domain) return -FI_ENOMEM; _domain->info = fi_dupinfo(info); if (!_domain->info) goto err1; _domain->rdm = FI_IBV_EP_TYPE_IS_RDM(info); if (_domain->rdm) { _domain->rdm_cm = calloc(1, sizeof(*_domain->rdm_cm)); if (!_domain->rdm_cm) { ret = -FI_ENOMEM; goto err2; } } ret = fi_ibv_open_device_by_name(_domain, info->domain_attr->name); if (ret) goto err2; _domain->pd = ibv_alloc_pd(_domain->verbs); if (!_domain->pd) { ret = -errno; goto err2; } _domain->domain_fid.fid.fclass = FI_CLASS_DOMAIN; _domain->domain_fid.fid.context = context; _domain->domain_fid.fid.ops = &fi_ibv_fid_ops; _domain->domain_fid.mr = &fi_ibv_domain_mr_ops; if (_domain->rdm) { _domain->domain_fid.ops = &fi_ibv_rdm_domain_ops; _domain->rdm_cm->ec = rdma_create_event_channel(); if (!_domain->rdm_cm->ec) { VERBS_INFO(FI_LOG_EP_CTRL, "Failed to create listener event channel: %s\n", strerror(errno)); ret = -FI_EOTHER; goto err2; } if (fi_fd_nonblock(_domain->rdm_cm->ec->fd) != 0) { VERBS_INFO_ERRNO(FI_LOG_EP_CTRL, "fcntl", errno); ret = -FI_EOTHER; goto err3; } if (rdma_create_id(_domain->rdm_cm->ec, &_domain->rdm_cm->listener, NULL, RDMA_PS_TCP)) { VERBS_INFO(FI_LOG_EP_CTRL, "Failed to create cm listener: %s\n", strerror(errno)); ret = -FI_EOTHER; goto err3; } _domain->rdm_cm->is_bound = 0; } else { _domain->domain_fid.ops = &fi_ibv_domain_ops; } _domain->fab = fab; *domain = &_domain->domain_fid; return 0; err3: if (_domain->rdm) rdma_destroy_event_channel(_domain->rdm_cm->ec); err2: if (_domain->rdm) free(_domain->rdm_cm); fi_freeinfo(_domain->info); err1: free(_domain); return ret; } static int fi_ibv_trywait(struct fid_fabric *fabric, struct fid **fids, int count) { struct fi_ibv_cq *cq; int ret, i; for (i = 0; i < count; i++) { switch (fids[i]->fclass) { case FI_CLASS_CQ: cq = container_of(fids[i], struct fi_ibv_cq, cq_fid.fid); ret = cq->trywait(fids[i]); if (ret) return ret; break; case FI_CLASS_EQ: /* We are always ready to wait on an EQ since * rdmacm EQ is based on an fd */ continue; case FI_CLASS_CNTR: case FI_CLASS_WAIT: return -FI_ENOSYS; default: return -FI_EINVAL; } } return FI_SUCCESS; } static int fi_ibv_fabric_close(fid_t fid) { struct fi_ibv_fabric *fab; int ret; fab = container_of(fid, struct fi_ibv_fabric, util_fabric.fabric_fid.fid); ret = ofi_fabric_close(&fab->util_fabric); if (ret) return ret; free(fab); return 0; } static struct fi_ops fi_ibv_fi_ops = { .size = sizeof(struct fi_ops), .close = fi_ibv_fabric_close, .bind = fi_no_bind, .control = fi_no_control, .ops_open = fi_no_ops_open, }; static struct fi_ops_fabric fi_ibv_ops_fabric = { .size = sizeof(struct fi_ops_fabric), .domain = fi_ibv_domain, .passive_ep = fi_ibv_passive_ep, .eq_open = fi_ibv_eq_open, .wait_open = fi_no_wait_open, .trywait = fi_ibv_trywait }; int fi_ibv_fabric(struct fi_fabric_attr *attr, struct fid_fabric **fabric, void *context) { struct fi_ibv_fabric *fab; struct fi_info *info; int ret; ret = fi_ibv_init_info(); if (ret) return ret; fab = calloc(1, sizeof(*fab)); if (!fab) return -FI_ENOMEM; for (info = verbs_info; info; info = info->next) { ret = ofi_fabric_init(&fi_ibv_prov, info->fabric_attr, attr, &fab->util_fabric, context); if (ret != -FI_ENODATA) break; } if (ret) { free(fab); return ret; } *fabric = &fab->util_fabric.fabric_fid; (*fabric)->fid.ops = &fi_ibv_fi_ops; (*fabric)->ops = &fi_ibv_ops_fabric; return 0; }
/* Builds a list of interfaces that correspond to active verbs devices */ static int fi_ibv_getifaddrs(struct dlist_entry *verbs_devs) { struct ifaddrs *ifaddr, *ifa; char name[INET6_ADDRSTRLEN]; struct rdma_addrinfo *rai; struct rdma_cm_id *id; const char *ret_ptr; int ret, num_verbs_ifs = 0; char *iface = NULL; size_t iface_len = 0; int exact_match = 0; ret = getifaddrs(&ifaddr); if (ret) { VERBS_WARN(FI_LOG_FABRIC, "Unable to get interface addresses\n"); return ret; } /* select best iface name based on user's input */ if (fi_param_get_str(&fi_ibv_prov, "iface", &iface) == FI_SUCCESS) { iface_len = strlen(iface); if (iface_len > IFNAMSIZ) { VERBS_INFO(FI_LOG_EP_CTRL, "Too long iface name: %s, max: %d\n", iface, IFNAMSIZ); return -FI_EINVAL; } for (ifa = ifaddr; ifa && !exact_match; ifa = ifa->ifa_next) exact_match = !strcmp(ifa->ifa_name, iface); } for (ifa = ifaddr; ifa; ifa = ifa->ifa_next) { if (!ifa->ifa_addr || !(ifa->ifa_flags & IFF_UP) || !strcmp(ifa->ifa_name, "lo")) continue; if(iface) { if(exact_match) { if(strcmp(ifa->ifa_name, iface)) continue; } else { if(strncmp(ifa->ifa_name, iface, iface_len)) continue; } } switch (ifa->ifa_addr->sa_family) { case AF_INET: ret_ptr = inet_ntop(AF_INET, &ofi_sin_addr(ifa->ifa_addr), name, INET6_ADDRSTRLEN); break; case AF_INET6: ret_ptr = inet_ntop(AF_INET6, &ofi_sin6_addr(ifa->ifa_addr), name, INET6_ADDRSTRLEN); break; default: continue; } if (!ret_ptr) { VERBS_WARN(FI_LOG_FABRIC, "inet_ntop failed: %s(%d)\n", strerror(errno), errno); goto err1; } ret = fi_ibv_create_ep(name, NULL, FI_NUMERICHOST | FI_SOURCE, NULL, &rai, &id); if (ret) continue; ret = fi_ibv_add_rai(verbs_devs, id, rai); if (ret) goto err2; VERBS_DBG(FI_LOG_FABRIC, "Found active interface for verbs device: " "%s with address: %s\n", ibv_get_device_name(id->verbs->device), name); rdma_destroy_ep(id); num_verbs_ifs++; } freeifaddrs(ifaddr); return num_verbs_ifs ? 0 : -FI_ENODATA; err2: rdma_destroy_ep(id); err1: fi_ibv_verbs_devs_free(verbs_devs); freeifaddrs(ifaddr); return ret; }