static inline void __api_cq_setup(uint32_t version, int mr_mode) { int ret, i, j; struct fi_av_attr attr; size_t addrlen = 0; for (i = 0; i < NUMEPS; i++) { hints[i] = fi_allocinfo(); cr_assert(hints[i], "fi_allocinfo"); hints[i]->domain_attr->cq_data_size = NUMEPS * 2; hints[i]->domain_attr->data_progress = FI_PROGRESS_AUTO; hints[i]->domain_attr->mr_mode = mr_mode; hints[i]->mode = mode_bits; hints[i]->fabric_attr->prov_name = strdup("gni"); } /* Get info about fabric services with the provided hints */ for (i = 0; i < NUMEPS; i++) { ret = fi_getinfo(version, NULL, 0, 0, hints[i], &fi[i]); cr_assert(!ret, "fi_getinfo"); } memset(&attr, 0, sizeof(attr)); attr.type = FI_AV_MAP; attr.count = NUMEPS; cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; /* 3x BUF_SZ for multi recv testing */ target_base = malloc(GNIT_ALIGN_LEN(BUF_SZ * 3)); assert(target_base); target = GNIT_ALIGN_BUFFER(char *, target_base); source_base = malloc(GNIT_ALIGN_LEN(BUF_SZ)); assert(source_base); source = GNIT_ALIGN_BUFFER(char *, source_base); uc_target = malloc(BUF_SZ); assert(uc_target); uc_source = malloc(BUF_SZ); assert(uc_source); ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); for (i = 0; i < NUMEPS; i++) { ret = fi_domain(fab, fi[i], dom + i, NULL); cr_assert(!ret, "fi_domain"); ret = fi_open_ops(&dom[i]->fid, FI_GNI_DOMAIN_OPS_1, 0, (void **) (gni_domain_ops + i), NULL); ret = fi_av_open(dom[i], &attr, av + i, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom[i], fi[i], ep + i, NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_cq_open(dom[i], &cq_attr, msg_cq + i, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_getname(&ep[i]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); } for (i = 0; i < NUMEPS; i++) { /* Insert all gni addresses into each av */ for (j = 0; j < NUMEPS; j++) { ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j], 0, NULL); cr_assert(ret == 1); } ret = fi_ep_bind(ep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_ep_bind"); } for (i = 0; i < NUMEPS; i++) { int target_requested_key = USING_SCALABLE(fi[i]) ? (i * 2) : 0; int source_requested_key = USING_SCALABLE(fi[i]) ? (i * 2) + 1 : 0; ret = fi_mr_reg(dom[i], target, 3 * BUF_SZ, FI_REMOTE_WRITE, 0, target_requested_key, 0, rem_mr + i, &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom[i], source, BUF_SZ, FI_REMOTE_WRITE, 0, source_requested_key, 0, loc_mr + i, &source); cr_assert_eq(ret, 0); if (USING_SCALABLE(fi[i])) { MR_ENABLE(rem_mr[i], target, 3 * BUF_SZ); MR_ENABLE(loc_mr[i], source, BUF_SZ); } mr_key[i] = fi_mr_key(rem_mr[i]); } }
/* * rpmem_fip_init_cq -- (internal) initialize completion queue(s) */ static int rpmem_fip_init_cq(struct rpmem_fip *fip) { int ret; struct fi_cq_attr cq_attr = { .size = fip->cq_size, .flags = 0, .format = FI_CQ_FORMAT_MSG, .wait_obj = FI_WAIT_UNSPEC, .signaling_vector = 0, .wait_cond = FI_CQ_COND_NONE, .wait_set = NULL, }; ret = fi_cq_open(fip->domain, &cq_attr, &fip->cq, NULL); if (ret) { RPMEM_FI_ERR(ret, "opening completion queue"); goto err_cq_open; } return 0; err_cq_open: return -1; } /* * rpmem_fip_fini_cq -- (internal) deinitialize completion queue(s) */ static int rpmem_fip_fini_cq(struct rpmem_fip *fip) { return RPMEM_FI_CLOSE(fip->cq, "closing completion queue"); } /* * rpmem_fip_init_ep -- (internal) initialize endpoint */ static int rpmem_fip_init_ep(struct rpmem_fip *fip) { int ret; /* create an endpoint */ ret = fi_endpoint(fip->domain, fip->fi, &fip->ep, NULL); if (ret) { RPMEM_FI_ERR(ret, "allocating endpoint"); goto err_endpoint; } /* * Bind an event queue to an endpoint to get * connection-related events for the endpoint. */ ret = fi_ep_bind(fip->ep, &fip->eq->fid, 0); if (ret) { RPMEM_FI_ERR(ret, "binding event queue to endpoint"); goto err_ep_bind_eq; } /* * Bind a completion queue to an endpoint to get completion * events of specified inbound/outbound operations. * * FI_SELECTIVE_COMPLETION means all inbound/outbound operations * must explicitly specify if the completion event should be * generated or not using FI_COMPLETION flag. * * The completion events received are highly related to the * persistency method used and are configured in lanes * initialization specified for persistency method utilized. */ ret = fi_ep_bind(fip->ep, &fip->cq->fid, FI_RECV | FI_TRANSMIT | FI_SELECTIVE_COMPLETION); if (ret) { RPMEM_FI_ERR(ret, "binding completion queue to endpoint"); goto err_ep_bind_cq; } /* * Enable endpoint so it is possible to post inbound/outbound * operations if required. */ ret = fi_enable(fip->ep); if (ret) { RPMEM_FI_ERR(ret, "activating endpoint"); goto err_fi_enable; } return 0; err_fi_enable: err_ep_bind_cq: err_ep_bind_eq: err_endpoint: return ret; }
static mca_mtl_base_module_t* ompi_mtl_ofi_component_init(bool enable_progress_threads, bool enable_mpi_threads) { int ret, fi_version; struct fi_info *hints; struct fi_info *providers = NULL, *prov = NULL; struct fi_cq_attr cq_attr = {0}; struct fi_av_attr av_attr = {0}; char ep_name[FI_NAME_MAX] = {0}; size_t namelen; /** * Hints to filter providers * See man fi_getinfo for a list of all filters * mode: Select capabilities MTL is prepared to support. * In this case, MTL will pass in context into communication calls * ep_type: reliable datagram operation * caps: Capabilities required from the provider. * Tag matching is specified to implement MPI semantics. * msg_order: Guarantee that messages with same tag are ordered. */ hints = fi_allocinfo(); if (!hints) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: Could not allocate fi_info\n", __FILE__, __LINE__); goto error; } hints->mode = FI_CONTEXT; hints->ep_attr->type = FI_EP_RDM; /* Reliable datagram */ hints->caps = FI_TAGGED; /* Tag matching interface */ hints->tx_attr->msg_order = FI_ORDER_SAS; hints->rx_attr->msg_order = FI_ORDER_SAS; hints->domain_attr->threading = FI_THREAD_UNSPEC; if (MTL_OFI_PROG_AUTO == control_progress) { hints->domain_attr->control_progress = FI_PROGRESS_AUTO; } else { hints->domain_attr->control_progress = FI_PROGRESS_MANUAL; } if (MTL_OFI_PROG_MANUAL == data_progress) { hints->domain_attr->data_progress = FI_PROGRESS_MANUAL; } else { hints->domain_attr->data_progress = FI_PROGRESS_AUTO; } if (MTL_OFI_AV_TABLE == av_type) { hints->domain_attr->av_type = FI_AV_TABLE; } else { hints->domain_attr->av_type = FI_AV_MAP; } hints->domain_attr->resource_mgmt = FI_RM_ENABLED; /** * FI_VERSION provides binary backward and forward compatibility support * Specify the version of OFI is coded to, the provider will select struct * layouts that are compatible with this version. */ fi_version = FI_VERSION(1, 0); /** * fi_getinfo: returns information about fabric services for reaching a * remote node or service. this does not necessarily allocate resources. * Pass NULL for name/service because we want a list of providers supported. */ ret = fi_getinfo(fi_version, /* OFI version requested */ NULL, /* Optional name or fabric to resolve */ NULL, /* Optional service name or port to request */ 0ULL, /* Optional flag */ hints, /* In: Hints to filter providers */ &providers); /* Out: List of matching providers */ if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_getinfo failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Select a provider from the list returned by fi_getinfo(). */ prov = select_ofi_provider(providers); if (!prov) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: select_ofi_provider: no provider found\n", __FILE__, __LINE__); goto error; } /** * Open fabric * The getinfo struct returns a fabric attribute struct that can be used to * instantiate the virtual or physical network. This opens a "fabric * provider". See man fi_fabric for details. */ ret = fi_fabric(prov->fabric_attr, /* In: Fabric attributes */ &ompi_mtl_ofi.fabric, /* Out: Fabric handle */ NULL); /* Optional context for fabric events */ if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_fabric failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Create the access domain, which is the physical or virtual network or * hardware port/collection of ports. Returns a domain object that can be * used to create endpoints. See man fi_domain for details. */ ret = fi_domain(ompi_mtl_ofi.fabric, /* In: Fabric object */ prov, /* In: Provider */ &ompi_mtl_ofi.domain, /* Out: Domain oject */ NULL); /* Optional context for domain events */ if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_domain failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Create a transport level communication endpoint. To use the endpoint, * it must be bound to completion counters or event queues and enabled, * and the resources consumed by it, such as address vectors, counters, * completion queues, etc. * see man fi_endpoint for more details. */ ret = fi_endpoint(ompi_mtl_ofi.domain, /* In: Domain object */ prov, /* In: Provider */ &ompi_mtl_ofi.ep, /* Out: Endpoint object */ NULL); /* Optional context */ if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_endpoint failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Save the maximum inject size. */ ompi_mtl_ofi.max_inject_size = prov->tx_attr->inject_size; /** * Create the objects that will be bound to the endpoint. * The objects include: * - completion queue for events * - address vector of other endpoint addresses * - dynamic memory-spanning memory region */ cq_attr.format = FI_CQ_FORMAT_TAGGED; ret = fi_cq_open(ompi_mtl_ofi.domain, &cq_attr, &ompi_mtl_ofi.cq, NULL); if (ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_cq_open failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * The remote fi_addr will be stored in the ofi_endpoint struct. */ av_attr.type = (MTL_OFI_AV_TABLE == av_type) ? FI_AV_TABLE: FI_AV_MAP; ret = fi_av_open(ompi_mtl_ofi.domain, &av_attr, &ompi_mtl_ofi.av, NULL); if (ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_av_open failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Bind the CQ and AV to the endpoint object. */ ret = fi_ep_bind(ompi_mtl_ofi.ep, (fid_t)ompi_mtl_ofi.cq, FI_SEND | FI_RECV); if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_bind CQ-EP failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } ret = fi_ep_bind(ompi_mtl_ofi.ep, (fid_t)ompi_mtl_ofi.av, 0); if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_bind AV-EP failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Enable the endpoint for communication * This commits the bind operations. */ ret = fi_enable(ompi_mtl_ofi.ep); if (0 != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_enable failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } /** * Free providers info since it's not needed anymore. */ fi_freeinfo(hints); hints = NULL; fi_freeinfo(providers); providers = NULL; /** * Get our address and publish it with modex. */ namelen = sizeof(ep_name); ret = fi_getname((fid_t)ompi_mtl_ofi.ep, &ep_name[0], &namelen); if (ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: fi_getname failed: %s\n", __FILE__, __LINE__, fi_strerror(-ret)); goto error; } OFI_COMPAT_MODEX_SEND(ret, &mca_mtl_ofi_component.super.mtl_version, &ep_name, namelen); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: modex_send failed: %d\n", __FILE__, __LINE__, ret); goto error; } ompi_mtl_ofi.epnamelen = namelen; /** * Set the ANY_SRC address. */ ompi_mtl_ofi.any_addr = FI_ADDR_UNSPEC; /** * Activate progress callback. */ ret = opal_progress_register(ompi_mtl_ofi_progress_no_inline); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_mtl_base_framework.framework_output, "%s:%d: opal_progress_register failed: %d\n", __FILE__, __LINE__, ret); goto error; } return &ompi_mtl_ofi.base; error: if (providers) { (void) fi_freeinfo(providers); } if (hints) { (void) fi_freeinfo(hints); } if (ompi_mtl_ofi.av) { (void) fi_close((fid_t)ompi_mtl_ofi.av); } if (ompi_mtl_ofi.cq) { (void) fi_close((fid_t)ompi_mtl_ofi.cq); } if (ompi_mtl_ofi.ep) { (void) fi_close((fid_t)ompi_mtl_ofi.ep); } if (ompi_mtl_ofi.domain) { (void) fi_close((fid_t)ompi_mtl_ofi.domain); } if (ompi_mtl_ofi.fabric) { (void) fi_close((fid_t)ompi_mtl_ofi.fabric); } return NULL; }
void rdm_sr_setup_common_eps(void) { int ret = 0, i = 0, j = 0; struct fi_av_attr attr; size_t addrlen = 0; attr.type = FI_AV_MAP; attr.count = NUMEPS; cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; target = malloc(BUF_SZ * 3); /* 3x BUF_SZ for multi recv testing */ assert(target); source = malloc(BUF_SZ); assert(source); uc_target = malloc(BUF_SZ); assert(uc_target); uc_source = malloc(BUF_SZ); assert(uc_source); ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); for (; i < NUMEPS; i++) { ret = fi_domain(fab, fi[i], dom + i, NULL); cr_assert(!ret, "fi_domain"); ret = fi_open_ops(&dom[i]->fid, FI_GNI_DOMAIN_OPS_1, 0, (void **) (gni_domain_ops + i), NULL); ret = fi_av_open(dom[i], &attr, av + i, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom[i], fi[i], ep + i, NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_cq_open(dom[i], &cq_attr, msg_cq + i, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[i], &msg_cq[i]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[i]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); } for (i = 0; i < NUMEPS; i++) { /* Insert all gni addresses into each av */ for (j = 0; j < NUMEPS; j++) { ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j], 0, NULL); cr_assert(ret == 1); } ret = fi_ep_bind(ep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[i]); cr_assert(!ret, "fi_ep_enable"); ret = fi_cntr_open(dom[i], &cntr_attr, send_cntr + i, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[i], &send_cntr[i]->fid, FI_SEND); cr_assert(!ret, "fi_ep_bind"); ret = fi_cntr_open(dom[i], &cntr_attr, recv_cntr + i, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[i], &recv_cntr[i]->fid, FI_RECV); cr_assert(!ret, "fi_ep_bind"); } }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; struct fi_av_attr av_attr; int ret; buffer_size = opts.user_options & FT_OPT_SIZE ? opts.transfer_size : test_size[TEST_CNT - 1].size; if (max_msg_size > 0 && buffer_size > max_msg_size) { buffer_size = max_msg_size; } if (buffer_size < fi->src_addrlen) { buffer_size = fi->src_addrlen; } buffer_size += prefix_len; buf = malloc(buffer_size); if (!buf) { perror("malloc"); return -1; } buf_ptr = (char *)buf + prefix_len; memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = max_credits << 1; ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err3; } memset(&av_attr, 0, sizeof(av_attr)); av_attr.type = fi->domain_attr->av_type ? fi->domain_attr->av_type : FI_AV_MAP; av_attr.name = NULL; av_attr.flags = 0; ret = fi_av_open(dom, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); goto err4; } ret = fi_endpoint(dom, fi, &ep, NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto err5; } return 0; err5: fi_close(&av->fid); err4: fi_close(&mr->fid); err3: fi_close(&rcq->fid); err2: fi_close(&scq->fid); err1: free(buf); return ret; }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; struct epoll_event event; int ret, fd; buf = malloc(buffer_size); if (!buf) { perror("malloc"); return -1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_FD; cq_attr.size = rx_depth; /* Open completion queue for send completions */ ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err1; } /* Open completion queue for recv completions */ ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } /* Create epoll set */ epfd = epoll_create1(0); if (epfd < 0) { ret = -errno; FT_PRINTERR("epoll_create1", ret); goto err3; } /* Retrieve receive queue wait object */ ret = fi_control (&rcq->fid, FI_GETWAIT, (void *) &fd); if (ret) { FT_PRINTERR("fi_control(FI_GETWAIT)", ret); goto err4; } /* Add receive queue wait object to epoll set */ memset((void *)&event, 0, sizeof event); event.events = EPOLLIN; event.data.ptr = (void *)&rcq->fid; ret = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event); if (ret) { ret = -errno; FT_PRINTERR("epoll_ctl", ret); goto err4; } /* Retrieve send queue wait object */ ret = fi_control (&scq->fid, FI_GETWAIT, (void *) &fd); if (ret) { FT_PRINTERR("fi_control(FI_GETWAIT)", ret); goto err4; } /* Add send queue wait object to epoll set */ memset((void *)&event, 0, sizeof event); event.events = EPOLLIN; event.data.ptr = (void *)&scq->fid; ret = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event); if (ret) { ret = -errno; FT_PRINTERR("epoll_ctl", ret); goto err4; } /* Register memory */ ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err4; } ret = fi_endpoint(dom, fi, &ep, NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto err5; } return 0; err5: fi_close(&mr->fid); err4: close(epfd); err3: fi_close(&rcq->fid); err2: fi_close(&scq->fid); err1: free(buf); return ret; }
void rdm_api_setup_ep(void) { int ret, i, j; struct fi_av_attr attr; size_t addrlen = 0; /* Get info about fabric services with the provided hints */ for (i = 0; i < NUMEPS; i++) { ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints[i], &fi[i]); cr_assert(!ret, "fi_getinfo"); } attr.type = FI_AV_MAP; attr.count = NUMEPS; cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; target = malloc(BUF_SZ * 3); /* 3x BUF_SZ for multi recv testing */ assert(target); source = malloc(BUF_SZ); assert(source); uc_target = malloc(BUF_SZ); assert(uc_target); uc_source = malloc(BUF_SZ); assert(uc_source); ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); for (i = 0; i < NUMEPS; i++) { ret = fi_domain(fab, fi[i], dom + i, NULL); cr_assert(!ret, "fi_domain"); ret = fi_open_ops(&dom[i]->fid, FI_GNI_DOMAIN_OPS_1, 0, (void **) (gni_domain_ops + i), NULL); ret = fi_av_open(dom[i], &attr, av + i, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom[i], fi[i], ep + i, NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_cq_open(dom[i], &cq_attr, msg_cq + i, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[i], &msg_cq[i]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[i]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); } for (i = 0; i < NUMEPS; i++) { /* Insert all gni addresses into each av */ for (j = 0; j < NUMEPS; j++) { ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j], 0, NULL); cr_assert(ret == 1); } ret = fi_ep_bind(ep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[i]); cr_assert(!ret, "fi_ep_enable"); ret = fi_cntr_open(dom[i], &cntr_attr, send_cntr + i, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[i], &send_cntr[i]->fid, FI_SEND); cr_assert(!ret, "fi_ep_bind"); ret = fi_cntr_open(dom[i], &cntr_attr, recv_cntr + i, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[i], &recv_cntr[i]->fid, FI_RECV); cr_assert(!ret, "fi_ep_bind"); } for (i = 0; i < NUMEPS; i++) { ret = fi_mr_reg(dom[i], target, 3 * BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, rem_mr + i, &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom[i], source, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, loc_mr + i, &source); cr_assert_eq(ret, 0); mr_key[i] = fi_mr_key(rem_mr[i]); } }
void rdm_sr_setup(void) { int ret = 0; struct fi_av_attr attr; size_t addrlen = 0; hints = fi_allocinfo(); cr_assert(hints, "fi_allocinfo"); hints->domain_attr->cq_data_size = 4; hints->mode = ~0; hints->fabric_attr->name = strdup("gni"); ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints, &fi); cr_assert(!ret, "fi_getinfo"); ret = fi_fabric(fi->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); ret = fi_domain(fab, fi, &dom, NULL); cr_assert(!ret, "fi_domain"); attr.type = FI_AV_MAP; attr.count = 16; ret = fi_av_open(dom, &attr, &av, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom, fi, &ep[0], NULL); cr_assert(!ret, "fi_endpoint"); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.size = 1024; cq_attr.wait_obj = 0; ret = fi_cq_open(dom, &cq_attr, &msg_cq[0], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_cq_open(dom, &cq_attr, &msg_cq[1], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[0], &msg_cq[0]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[0] = malloc(addrlen); cr_assert(ep_name[0] != NULL); ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_endpoint(dom, fi, &ep[1], NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_ep_bind(ep[1], &msg_cq[1]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ep_name[1] = malloc(addrlen); cr_assert(ep_name[1] != NULL); ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0, NULL); cr_assert(ret == 1); ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0, NULL); cr_assert(ret == 1); ret = fi_ep_bind(ep[0], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(ep[1], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[0]); cr_assert(!ret, "fi_ep_enable"); ret = fi_enable(ep[1]); cr_assert(!ret, "fi_ep_enable"); target = malloc(BUF_SZ); assert(target); source = malloc(BUF_SZ); assert(source); ret = fi_mr_reg(dom, target, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &rem_mr, &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom, source, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &loc_mr, &source); cr_assert_eq(ret, 0); mr_key = fi_mr_key(rem_mr); }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; uint64_t access_mode; int ret; buffer_size = opts.user_options & FT_OPT_SIZE ? opts.transfer_size : test_size[TEST_CNT - 1].size; buf = malloc(MAX(buffer_size, sizeof(uint64_t))); if (!buf) { perror("malloc"); return -1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_DATA; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = max_credits << 1; ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } switch (op_type) { case FT_RMA_READ: access_mode = FI_REMOTE_READ; break; case FT_RMA_WRITE: case FT_RMA_WRITEDATA: access_mode = FI_REMOTE_WRITE; break; default: assert(0); ret = -FI_EINVAL; goto err3; } ret = fi_mr_reg(dom, buf, MAX(buffer_size, sizeof(uint64_t)), access_mode, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err3; } if (!cmeq) { ret = alloc_cm_res(); if (ret) goto err4; } return 0; err4: fi_close(&mr->fid); err3: fi_close(&rcq->fid); err2: fi_close(&scq->fid); err1: free(buf); return ret; }
/* mca_btl_ofi_context_alloc_scalable() * * This function allocate communication contexts and return the pointer * to the first btl context. It also take care of all the bindings needed. * USE WITH SCALABLE ENDPOINT ONLY */ mca_btl_ofi_context_t *mca_btl_ofi_context_alloc_scalable(struct fi_info *info, struct fid_domain *domain, struct fid_ep *sep, struct fid_av *av, size_t num_contexts) { BTL_VERBOSE(("creating %zu contexts", num_contexts)); int rc; size_t i; char *linux_device_name = info->domain_attr->name; struct fi_cq_attr cq_attr = {0}; struct fi_tx_attr tx_attr = {0}; struct fi_rx_attr rx_attr = {0}; mca_btl_ofi_context_t *contexts; tx_attr.op_flags = FI_DELIVERY_COMPLETE; contexts = (mca_btl_ofi_context_t*) calloc(num_contexts, sizeof(*contexts)); if (NULL == contexts) { BTL_VERBOSE(("cannot allocate communication contexts.")); return NULL; } /* Don't really need to check, just avoiding compiler warning because * BTL_VERBOSE is a no op in performance build and the compiler will * complain about unused variable. */ if (NULL == linux_device_name) { BTL_VERBOSE(("linux device name is NULL. This shouldn't happen.")); goto scalable_fail; } /* bind AV to endpoint */ rc = fi_scalable_ep_bind(sep, (fid_t)av, 0); if (0 != rc) { BTL_VERBOSE(("%s failed fi_scalable_ep_bind with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } for (i=0; i < num_contexts; i++) { rc = fi_tx_context(sep, i, &tx_attr, &contexts[i].tx_ctx, NULL); if (0 != rc) { BTL_VERBOSE(("%s failed fi_tx_context with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } /* We don't actually need a receiving context as we only do one-sided. * However, sockets provider will hang if we dont have one. It is * also nice to have equal number of tx/rx context. */ rc = fi_rx_context(sep, i, &rx_attr, &contexts[i].rx_ctx, NULL); if (0 != rc) { BTL_VERBOSE(("%s failed fi_rx_context with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } /* create CQ */ cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; rc = fi_cq_open(domain, &cq_attr, &contexts[i].cq, NULL); if (0 != rc) { BTL_VERBOSE(("%s failed fi_cq_open with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } /* bind cq to transmit context */ uint32_t cq_flags = (FI_TRANSMIT); rc = fi_ep_bind(contexts[i].tx_ctx, (fid_t)contexts[i].cq, cq_flags); if (0 != rc) { BTL_VERBOSE(("%s failed fi_ep_bind with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } /* enable the context. */ rc = fi_enable(contexts[i].tx_ctx); if (0 != rc) { BTL_VERBOSE(("%s failed fi_enable with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } rc = fi_enable(contexts[i].rx_ctx); if (0 != rc) { BTL_VERBOSE(("%s failed fi_enable with err=%s", linux_device_name, fi_strerror(-rc) )); goto scalable_fail; } /* initialize completion freelist. */ rc = ofi_comp_list_init(&contexts[i].comp_list); if (rc != OPAL_SUCCESS) { goto scalable_fail; } /* assign the id */ contexts[i].context_id = i; } return contexts; scalable_fail: /* close and free */ for(i=0; i < num_contexts; i++) { mca_btl_ofi_context_finalize(&contexts[i], true); } free(contexts); return NULL; }
/* mca_btl_ofi_context_alloc_normal() * * This function will allocate an ofi_context, map the endpoint to tx/rx context, * bind CQ,AV to the endpoint and initialize all the structure. * USE WITH NORMAL ENDPOINT ONLY */ mca_btl_ofi_context_t *mca_btl_ofi_context_alloc_normal(struct fi_info *info, struct fid_domain *domain, struct fid_ep *ep, struct fid_av *av) { int rc; uint32_t cq_flags = FI_TRANSMIT; char *linux_device_name = info->domain_attr->name; struct fi_cq_attr cq_attr = {0}; mca_btl_ofi_context_t *context; context = (mca_btl_ofi_context_t*) calloc(1, sizeof(*context)); if (NULL == context) { BTL_VERBOSE(("cannot allocate context")); return NULL; } /* Don't really need to check, just avoiding compiler warning because * BTL_VERBOSE is a no op in performance build and the compiler will * complain about unused variable. */ if (NULL == linux_device_name) { BTL_VERBOSE(("linux device name is NULL. This shouldn't happen.")); goto single_fail; } cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; rc = fi_cq_open(domain, &cq_attr, &context->cq, NULL); if (0 != rc) { BTL_VERBOSE(("%s failed fi_cq_open with err=%s", linux_device_name, fi_strerror(-rc) )); goto single_fail; } rc = fi_ep_bind(ep, (fid_t)av, 0); if (0 != rc) { BTL_VERBOSE(("%s failed fi_ep_bind with err=%s", linux_device_name, fi_strerror(-rc) )); goto single_fail; } rc = fi_ep_bind(ep, (fid_t)context->cq, cq_flags); if (0 != rc) { BTL_VERBOSE(("%s failed fi_scalable_ep_bind with err=%s", linux_device_name, fi_strerror(-rc) )); goto single_fail; } rc = ofi_comp_list_init(&context->comp_list); if (rc != OPAL_SUCCESS) { goto single_fail; } context->tx_ctx = ep; context->rx_ctx = ep; context->context_id = 0; return context; single_fail: mca_btl_ofi_context_finalize(context, false); return NULL; }
int ft_alloc_ep_res(struct fi_info *fi) { int ret; if (hints->caps & FI_RMA) { ret = ft_set_rma_caps(fi, opts.rma_op); if (ret) return ret; } ret = ft_alloc_msgs(); if (ret) return ret; if (cq_attr.format == FI_CQ_FORMAT_UNSPEC) { if (fi->caps & FI_TAGGED) cq_attr.format = FI_CQ_FORMAT_TAGGED; else cq_attr.format = FI_CQ_FORMAT_CONTEXT; } if (opts.options & FT_OPT_TX_CQ) { ft_cq_set_wait_attr(); cq_attr.size = fi->tx_attr->size; ret = fi_cq_open(domain, &cq_attr, &txcq, &txcq); if (ret) { FT_PRINTERR("fi_cq_open", ret); return ret; } } if (opts.options & FT_OPT_TX_CNTR) { ft_cntr_set_wait_attr(); ret = fi_cntr_open(domain, &cntr_attr, &txcntr, &txcntr); if (ret) { FT_PRINTERR("fi_cntr_open", ret); return ret; } } if (opts.options & FT_OPT_RX_CQ) { ft_cq_set_wait_attr(); cq_attr.size = fi->rx_attr->size; ret = fi_cq_open(domain, &cq_attr, &rxcq, &rxcq); if (ret) { FT_PRINTERR("fi_cq_open", ret); return ret; } } if (opts.options & FT_OPT_RX_CNTR) { ft_cntr_set_wait_attr(); ret = fi_cntr_open(domain, &cntr_attr, &rxcntr, &rxcntr); if (ret) { FT_PRINTERR("fi_cntr_open", ret); return ret; } } if (fi->ep_attr->type == FI_EP_RDM || fi->ep_attr->type == FI_EP_DGRAM) { if (fi->domain_attr->av_type != FI_AV_UNSPEC) av_attr.type = fi->domain_attr->av_type; if (opts.av_name) { av_attr.name = opts.av_name; } ret = fi_av_open(domain, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); return ret; } } return 0; }
void sep_setup_common(int av_type) { int ret, i, j; struct fi_av_attr av_attr = {0}; size_t addrlen = 0; hints = fi_allocinfo(); cr_assert(hints, "fi_allocinfo"); hints->ep_attr->type = FI_EP_RDM; hints->caps = FI_ATOMIC | FI_RMA | FI_MSG | FI_NAMED_RX_CTX; hints->mode = FI_LOCAL_MR; hints->domain_attr->cq_data_size = NUMEPS * 2; hints->domain_attr->data_progress = FI_PROGRESS_AUTO; hints->domain_attr->mr_mode = FI_MR_BASIC; hints->fabric_attr->prov_name = strdup("gni"); hints->ep_attr->tx_ctx_cnt = ctx_cnt; hints->ep_attr->rx_ctx_cnt = ctx_cnt; for (i = 0; i < NUMEPS; i++) { ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints, &fi[i]); cr_assert(!ret, "fi_getinfo"); tx_cq[i] = calloc(ctx_cnt, sizeof(*tx_cq)); rx_cq[i] = calloc(ctx_cnt, sizeof(*rx_cq)); tx_ep[i] = calloc(ctx_cnt, sizeof(*tx_ep)); rx_ep[i] = calloc(ctx_cnt, sizeof(*rx_ep)); if (!tx_cq[i] || !tx_cq[i] || !tx_ep[i] || !rx_ep[i]) { cr_assert(0, "calloc"); } } ctx_cnt = MIN(ctx_cnt, fi[0]->domain_attr->rx_ctx_cnt); ctx_cnt = MIN(ctx_cnt, fi[0]->domain_attr->tx_ctx_cnt); cr_assert(ctx_cnt, "ctx_cnt is 0"); ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); rx_ctx_bits = 0; while (ctx_cnt >> ++rx_ctx_bits); av_attr.rx_ctx_bits = rx_ctx_bits; av_attr.type = av_type; av_attr.count = NUMEPS; cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = FI_WAIT_NONE; rx_addr = calloc(ctx_cnt, sizeof(*rx_addr)); target = calloc(BUF_SZ, 1); source = calloc(BUF_SZ, 1); iov_src_buf = malloc(BUF_SZ * IOV_CNT); iov_dest_buf = malloc(BUF_SZ * IOV_CNT); src_iov = malloc(sizeof(struct iovec) * IOV_CNT); dest_iov = malloc(sizeof(struct iovec) * IOV_CNT); if (!rx_addr || !target || !source || !iov_src_buf || !iov_dest_buf || !src_iov || !dest_iov) { cr_assert(0, "allocation"); } for (i = 0; i < IOV_CNT; i++) { src_iov[i].iov_base = malloc(BUF_SZ); assert(src_iov[i].iov_base != NULL); dest_iov[i].iov_base = malloc(BUF_SZ * 3); assert(dest_iov[i].iov_base != NULL); } for (i = 0; i < NUMEPS; i++) { fi[i]->ep_attr->tx_ctx_cnt = ctx_cnt; fi[i]->ep_attr->rx_ctx_cnt = ctx_cnt; ret = fi_domain(fab, fi[i], &dom[i], NULL); cr_assert(!ret, "fi_domain"); ret = fi_scalable_ep(dom[i], fi[i], &sep[i], NULL); cr_assert(!ret, "fi_scalable_ep"); ret = fi_av_open(dom[i], &av_attr, &av[i], NULL); cr_assert(!ret, "fi_av_open"); ret = fi_cntr_open(dom[i], &cntr_attr, &send_cntr[i], 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_cntr_open(dom[i], &cntr_attr, &recv_cntr[i], 0); cr_assert(!ret, "fi_cntr_open"); for (j = 0; j < ctx_cnt; j++) { ret = fi_tx_context(sep[i], j, NULL, &tx_ep[i][j], NULL); cr_assert(!ret, "fi_tx_context"); ret = fi_cq_open(dom[i], &cq_attr, &tx_cq[i][j], NULL); cr_assert(!ret, "fi_cq_open"); ret = fi_rx_context(sep[i], j, NULL, &rx_ep[i][j], NULL); cr_assert(!ret, "fi_rx_context"); ret = fi_cq_open(dom[i], &cq_attr, &rx_cq[i][j], NULL); cr_assert(!ret, "fi_cq_open"); } ret = fi_scalable_ep_bind(sep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_scalable_ep_bind"); for (j = 0; j < ctx_cnt; j++) { ret = fi_ep_bind(tx_ep[i][j], &tx_cq[i][j]->fid, FI_TRANSMIT); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(tx_ep[i][j], &send_cntr[i]->fid, FI_SEND | FI_WRITE); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(tx_ep[i][j]); cr_assert(!ret, "fi_enable"); ret = fi_ep_bind(rx_ep[i][j], &rx_cq[i][j]->fid, FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(rx_ep[i][j], &recv_cntr[i]->fid, FI_RECV | FI_READ); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(rx_ep[i][j]); cr_assert(!ret, "fi_enable"); } } for (i = 0; i < NUMEPS; i++) { ret = fi_enable(sep[i]); cr_assert(!ret, "fi_enable"); ret = fi_getname(&sep[i]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&sep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_mr_reg(dom[i], target, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &rem_mr[i], &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom[i], source, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &loc_mr[i], &source); cr_assert_eq(ret, 0); mr_key[i] = fi_mr_key(rem_mr[i]); ret = fi_mr_reg(dom[i], iov_dest_buf, IOV_CNT * BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, iov_dest_buf_mr + i, &iov_dest_buf); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom[i], iov_src_buf, IOV_CNT * BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, iov_src_buf_mr + i, &iov_src_buf); cr_assert_eq(ret, 0); } for (i = 0; i < NUMEPS; i++) { for (j = 0; j < NUMEPS; j++) { ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j], 0, NULL); cr_assert(ret == 1); } } for (i = 0; i < ctx_cnt; i++) { rx_addr[i] = fi_rx_addr(gni_addr[1], i, rx_ctx_bits); } }
static void vc_setup_common(void) { int ret = 0; struct fi_av_attr attr; size_t addrlen = 0; struct gnix_fid_av *gnix_av; hints->fabric_attr->name = strdup("gni"); ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints, &fi); cr_assert(!ret, "fi_getinfo"); ret = fi_fabric(fi->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); ret = fi_domain(fab, fi, &dom, NULL); cr_assert(!ret, "fi_domain"); attr.type = FI_AV_MAP; attr.count = 16; ret = fi_av_open(dom, &attr, &av, NULL); cr_assert(!ret, "fi_av_open"); gnix_av = container_of(av, struct gnix_fid_av, av_fid); ret = fi_endpoint(dom, fi, &ep[0], NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[0] = malloc(addrlen); cr_assert(ep_name[0] != NULL); ep_name[1] = malloc(addrlen); cr_assert(ep_name[1] != NULL); ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_endpoint(dom, fi, &ep[1], NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0, NULL); cr_assert(ret == 1); ret = _gnix_av_lookup(gnix_av, gni_addr[0], &gnix_addr[0]); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0, NULL); cr_assert(ret == 1); ret = _gnix_av_lookup(gnix_av, gni_addr[1], &gnix_addr[1]); cr_assert(ret == FI_SUCCESS); ret = fi_ep_bind(ep[0], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; ret = fi_cq_open(dom, &cq_attr, &cq, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[0], &cq->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[0]); cr_assert(!ret, "fi_enable"); ret = fi_ep_bind(ep[1], &cq->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(ep[1], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[1]); cr_assert(!ret, "fi_ep_enable"); }
static void setup(void) { int i, j; int ret = 0; struct fi_av_attr attr; size_t addrlen = 0; struct fi_gni_ops_domain *gni_domain_ops; uint32_t rx_cq_size; hints = fi_allocinfo(); cr_assert(hints, "fi_allocinfo"); hints->domain_attr->cq_data_size = 4; hints->domain_attr->data_progress = FI_PROGRESS_MANUAL; hints->mode = ~0; hints->fabric_attr->name = strdup("gni"); ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints, &fi); cr_assert(!ret, "fi_getinfo"); ret = fi_fabric(fi->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); attr.type = FI_AV_TABLE; attr.count = NUM_EPS; cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.size = 1024; cq_attr.wait_obj = 0; for (i = 0; i < NUM_EPS; i++) { ret = fi_domain(fab, fi, &dom[i], NULL); cr_assert(!ret, "fi_domain"); ret = fi_open_ops(&dom[i]->fid, FI_GNI_DOMAIN_OPS_1, 0, (void **) &gni_domain_ops, NULL); cr_assert(ret == FI_SUCCESS, "fi_open_ops"); rx_cq_size = min_rx_cq_size; ret = gni_domain_ops->set_val(&dom[i]->fid, GNI_RX_CQ_SIZE, &rx_cq_size); cr_assert(ret == FI_SUCCESS, "set_val"); ret = fi_av_open(dom[i], &attr, &av[i], NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom[i], fi, &ep[i], NULL); cr_assert(!ret, "fi_endpoint"); cr_assert(ep[i]); ret = fi_cq_open(dom[i], &cq_attr, &msg_cq[i], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[i], &msg_cq[i]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); } ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert_eq(ret, -FI_ETOOSMALL); cr_assert(addrlen > 0); for (i = 0; i < NUM_EPS; i++) { ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); for (j = 0; j < NUM_EPS; j++) { ret = fi_av_insert(av[j], ep_name[i], 1, &gni_addr[i], 0, NULL); cr_assert(ret == 1); } } for (i = 0; i < NUM_EPS; i++) { ret = fi_ep_bind(ep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[i]); cr_assert(!ret, "fi_ep_enable"); ret = fi_mr_reg(dom[i], target, NUM_EPS*sizeof(int), FI_RECV, 0, 0, 0, &rem_mr[i], &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom[i], source, NUM_EPS*sizeof(int), FI_SEND, 0, 0, 0, &loc_mr[i], &source); cr_assert_eq(ret, 0); mr_key[i] = fi_mr_key(rem_mr[i]); } }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; struct fi_rx_attr rx_attr; struct fi_tx_attr tx_attr; struct fi_av_attr av_attr; int i, ret = 0; buffer_size = test_size[TEST_CNT - 1].size; buf = malloc(buffer_size); remote_fi_addr = (fi_addr_t *)malloc(sizeof(*remote_fi_addr) * ep_cnt); if (!buf || !remote_fi_addr) { perror("malloc"); goto err1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = rx_depth; memset(&tx_attr, 0, sizeof tx_attr); memset(&rx_attr, 0, sizeof rx_attr); ret = fi_stx_context(dom, &tx_attr, &stx_ctx, NULL); if (ret) { FT_PRINTERR("fi_stx_context", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } ret = fi_srx_context(dom, &rx_attr, &srx_ctx, NULL); if (ret) { FT_PRINTERR("fi_srx_context", ret); goto err3; } ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err4; } ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err5; } memset(&av_attr, 0, sizeof av_attr); av_attr.type = fi->domain_attr->av_type ? fi->domain_attr->av_type : FI_AV_MAP; av_attr.count = ep_cnt; ret = fi_av_open(dom, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); goto err6; } ep = calloc(ep_cnt, sizeof(*ep)); if (!ep) { perror("malloc"); goto err7; } for (i = 0; i < ep_cnt; i++) { ret = fi_endpoint(dom, fi, &ep[i], NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto err8; } } return 0; err8: FT_CLOSEV(ep, ep_cnt); err7: fi_close(&av->fid); err6: fi_close(&mr->fid); err5: fi_close(&rcq->fid); err4: fi_close(&srx_ctx->fid); err3: fi_close(&scq->fid); err2: fi_close(&stx_ctx->fid); err1: free(buf); free(remote_fi_addr); return ret; }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; struct fi_av_attr av_attr; int ret; buf = malloc(buffer_size); if (!buf) { perror("malloc"); return -1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = rx_depth; ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err3; } memset(&av_attr, 0, sizeof av_attr); av_attr.type = fi->domain_attr->av_type ? fi->domain_attr->av_type : FI_AV_MAP; av_attr.count = 1; av_attr.name = NULL; ret = fi_av_open(dom, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); goto err4; } ret = fi_endpoint(dom, fi, &ep, NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto err5; } return 0; err5: fi_close(&av->fid); err4: fi_close(&mr->fid); err3: fi_close(&rcq->fid); err2: fi_close(&scq->fid); err1: free(buf); return ret; }
static void libfabric_init() { int i; struct fi_info *info = NULL; struct fi_info *hints = fi_allocinfo(); struct fi_av_attr av_attr = {0}; struct fi_cq_attr cq_attr = {0}; int max_tx_ctx, max_rx_ctx; int comm_concurrency; int rx_ctx_cnt; int rx_ctx_bits = 0; hints->mode = ~0; hints->caps = FI_RMA | FI_ATOMIC | FI_SOURCE /* do we want this? */ | FI_READ | FI_WRITE | FI_REMOTE_READ | FI_REMOTE_WRITE | FI_MULTI_RECV | FI_FENCE; hints->addr_format = FI_FORMAT_UNSPEC; #if defined(CHPL_COMM_SUBSTRATE_SOCKETS) // // fi_freeinfo(hints) will free() hints->fabric_attr->prov_name; this // is documented, though poorly. So, get that space from malloc(). // { const char s[] = "sockets"; char* sDup = sys_malloc(sizeof(s)); strcpy(sDup, s); hints->fabric_attr->prov_name = sDup; } #elif defined(CHPL_COMM_SUBSTRATE_GNI) #error "Substrate GNI not supported" #else #error "Substrate type not supported" #endif /* connectionless reliable */ hints->ep_attr->type = FI_EP_RDM; hints->domain_attr->threading = FI_THREAD_UNSPEC; hints->domain_attr->control_progress = FI_PROGRESS_MANUAL; hints->domain_attr->data_progress = FI_PROGRESS_MANUAL; hints->domain_attr->av_type = FI_AV_TABLE; hints->domain_attr->mr_mode = FI_MR_SCALABLE; hints->domain_attr->resource_mgmt = FI_RM_ENABLED; // hints->domain_attr->cq_data_size hints->tx_attr->op_flags = FI_COMPLETION; hints->rx_attr->op_flags = FI_COMPLETION; OFICHKERR(fi_getinfo(FI_VERSION(1,0), NULL, NULL, 0, hints, &info)); if (info == NULL) { chpl_internal_error("No fabrics detected."); } else { #ifdef PRINT_FI_GETINFO struct fi_info *cur; for (cur = info; cur; cur = cur->next) { printf("---\n"); printf("%s", fi_tostr(cur, FI_TYPE_INFO)); } printf("\n"); #endif } ofi.num_am_ctx = 1; // Would we ever want more? max_tx_ctx = info->domain_attr->max_ep_tx_ctx; max_rx_ctx = info->domain_attr->max_ep_rx_ctx; comm_concurrency = get_comm_concurrency(); ofi.num_tx_ctx = comm_concurrency+ofi.num_am_ctx > max_tx_ctx ? max_tx_ctx-ofi.num_am_ctx : comm_concurrency; ofi.num_rx_ctx = comm_concurrency+ofi.num_am_ctx > max_rx_ctx ? max_rx_ctx-ofi.num_am_ctx : comm_concurrency; info->ep_attr->tx_ctx_cnt = ofi.num_tx_ctx + ofi.num_am_ctx; info->ep_attr->rx_ctx_cnt = ofi.num_rx_ctx + ofi.num_am_ctx; OFICHKERR(fi_fabric(info->fabric_attr, &ofi.fabric, NULL)); OFICHKERR(fi_domain(ofi.fabric, info, &ofi.domain, NULL)); rx_ctx_cnt = ofi.num_rx_ctx + ofi.num_am_ctx; while (rx_ctx_cnt >> ++rx_ctx_bits); av_attr.rx_ctx_bits = rx_ctx_bits; av_attr.type = FI_AV_TABLE; av_attr.count = chpl_numNodes; OFICHKERR(fi_av_open(ofi.domain, &av_attr, &ofi.av, NULL)); OFICHKERR(fi_scalable_ep(ofi.domain, info, &ofi.ep, NULL)); OFICHKERR(fi_scalable_ep_bind(ofi.ep, &ofi.av->fid, 0)); /* set up tx and rx contexts */ cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.size = 1024; /* ??? */ cq_attr.wait_obj = FI_WAIT_UNSPEC; ofi.tx_ep = (struct fid_ep **) chpl_mem_allocMany(ofi.num_tx_ctx, sizeof(ofi.tx_ep[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); ofi.tx_cq = (struct fid_cq **) chpl_mem_allocMany(ofi.num_tx_ctx, sizeof(ofi.tx_cq[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); for (i = 0; i < ofi.num_tx_ctx; i++) { OFICHKERR(fi_tx_context(ofi.ep, i, NULL, &ofi.tx_ep[i], NULL)); OFICHKERR(fi_cq_open(ofi.domain, &cq_attr, &ofi.tx_cq[i], NULL)); OFICHKERR(fi_ep_bind(ofi.tx_ep[i], &ofi.tx_cq[i]->fid, FI_TRANSMIT)); OFICHKERR(fi_enable(ofi.tx_ep[i])); } ofi.rx_ep = (struct fid_ep **) chpl_mem_allocMany(ofi.num_rx_ctx, sizeof(ofi.rx_ep[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); ofi.rx_cq = (struct fid_cq **) chpl_mem_allocMany(ofi.num_rx_ctx, sizeof(ofi.rx_cq[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); for (i = 0; i < ofi.num_rx_ctx; i++) { OFICHKERR(fi_rx_context(ofi.ep, i, NULL, &ofi.rx_ep[i], NULL)); OFICHKERR(fi_cq_open(ofi.domain, &cq_attr, &ofi.rx_cq[i], NULL)); OFICHKERR(fi_ep_bind(ofi.rx_ep[i], &ofi.rx_cq[i]->fid, FI_RECV)); OFICHKERR(fi_enable(ofi.rx_ep[i])); } ofi.am_tx_ep = (struct fid_ep **) chpl_mem_allocMany(ofi.num_am_ctx, sizeof(ofi.am_tx_ep[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); ofi.am_tx_cq = (struct fid_cq **) chpl_mem_allocMany(ofi.num_am_ctx, sizeof(ofi.am_tx_cq[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); /* set up AM contexts */ for (i = 0; i < ofi.num_am_ctx; i++) { OFICHKERR(fi_tx_context(ofi.ep, i+ofi.num_tx_ctx, NULL, &ofi.am_tx_ep[i], NULL)); OFICHKERR(fi_cq_open(ofi.domain, &cq_attr, &ofi.am_tx_cq[i], NULL)); OFICHKERR(fi_ep_bind(ofi.am_tx_ep[i], &ofi.am_tx_cq[i]->fid, FI_TRANSMIT)); OFICHKERR(fi_enable(ofi.am_tx_ep[i])); } ofi.am_rx_ep = (struct fid_ep **) chpl_mem_allocMany(ofi.num_am_ctx, sizeof(ofi.am_rx_ep[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); ofi.am_rx_cq = (struct fid_cq **) chpl_mem_allocMany(ofi.num_am_ctx, sizeof(ofi.am_rx_cq[0]), CHPL_RT_MD_COMM_PER_LOC_INFO, 0, 0); for (i = 0; i < ofi.num_am_ctx; i++) { OFICHKERR(fi_rx_context(ofi.ep, i+ofi.num_rx_ctx, NULL, &ofi.am_rx_ep[i], NULL)); OFICHKERR(fi_cq_open(ofi.domain, &cq_attr, &ofi.am_rx_cq[i], NULL)); OFICHKERR(fi_ep_bind(ofi.am_rx_ep[i], &ofi.am_rx_cq[i]->fid, FI_RECV)); OFICHKERR(fi_enable(ofi.am_rx_ep[i])); } OFICHKERR(fi_enable(ofi.ep)); libfabric_init_addrvec(rx_ctx_cnt, rx_ctx_bits); OFICHKERR(fi_mr_reg(ofi.domain, 0, SIZE_MAX, FI_READ | FI_WRITE | FI_REMOTE_READ | FI_REMOTE_WRITE | FI_SEND | FI_RECV, 0, (uint64_t) chpl_nodeID, 0, &ofi.mr, NULL)); fi_freeinfo(info); /* No error returned */ fi_freeinfo(hints); /* No error returned */ chpl_msg(2, "%d: completed libfabric initialization\n", chpl_nodeID); }
static void setup_ep(void) { int ret; struct fi_av_attr attr; size_t addrlen = 0; attr.type = FI_AV_MAP; attr.count = 16; ret = fi_av_open(dom, &attr, &av, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom, fi, &ep[0], NULL); cr_assert(!ret, "fi_endpoint"); cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; ret = fi_cq_open(dom, &cq_attr, &msg_cq[0], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_cq_open(dom, &cq_attr, &msg_cq[1], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[0], &msg_cq[0]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[0] = malloc(addrlen); cr_assert(ep_name[0] != NULL); ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_endpoint(dom, fi, &ep[1], NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_ep_bind(ep[1], &msg_cq[1]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ep_name[1] = malloc(addrlen); cr_assert(ep_name[1] != NULL); ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0, NULL); cr_assert(ret == 1); ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0, NULL); cr_assert(ret == 1); ret = fi_ep_bind(ep[0], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(ep[1], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[0]); cr_assert(!ret, "fi_ep_enable"); ret = fi_enable(ep[1]); cr_assert(!ret, "fi_ep_enable"); }
int rxd_cq_open(struct fid_domain *domain, struct fi_cq_attr *attr, struct fid_cq **cq_fid, void *context) { int ret; struct rxd_cq *cq; struct rxd_domain *rxd_domain; cq = calloc(1, sizeof(*cq)); if (!cq) return -FI_ENOMEM; ret = ofi_cq_init(&rxd_prov, domain, attr, &cq->util_cq, &rxd_cq_progress, context); if (ret) goto err1; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: case FI_CQ_FORMAT_CONTEXT: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_ctx_signal : rxd_cq_write_ctx; break; case FI_CQ_FORMAT_MSG: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_msg_signal : rxd_cq_write_msg; break; case FI_CQ_FORMAT_DATA: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_data_signal : rxd_cq_write_data; break; case FI_CQ_FORMAT_TAGGED: cq->write_fn = cq->util_cq.wait ? rxd_cq_write_tagged_signal : rxd_cq_write_tagged; break; default: ret = -FI_EINVAL; goto err2; } rxd_domain = container_of(domain, struct rxd_domain, util_domain.domain_fid); attr->format = FI_CQ_FORMAT_MSG; ret = fi_cq_open(rxd_domain->dg_domain, attr, &cq->dg_cq, context); if (ret) goto err2; cq->unexp_pool = util_buf_pool_create( RXD_EP_MAX_UNEXP_PKT * sizeof (struct rxd_unexp_cq_entry), RXD_BUF_POOL_ALIGNMENT, 0, RXD_EP_MAX_UNEXP_PKT); if (!cq->unexp_pool) { ret = -FI_ENOMEM; goto err3; } dlist_init(&cq->dom_entry); dlist_init(&cq->unexp_list); fastlock_init(&cq->lock); fastlock_acquire(&rxd_domain->lock); dlist_insert_tail(&cq->dom_entry, &rxd_domain->cq_list); fastlock_release(&rxd_domain->lock); *cq_fid = &cq->util_cq.cq_fid; (*cq_fid)->fid.ops = &rxd_cq_fi_ops; (*cq_fid)->ops = &rxd_cq_ops; cq->domain = rxd_domain; return 0; err3: ofi_cq_cleanup(&cq->util_cq); err2: fi_close(&cq->dg_cq->fid); err1: free(cq); return ret; }
int client_connect(struct fi_info *prov, simple_context_t *ctx) { struct fi_eq_attr eq_attr = { 0 }; struct fi_cq_attr cq_attr = { 0 }; struct sockaddr_in addr = { 0 }; int ret; print_trace("in\n"); connected = 0; ret = fi_fabric(prov->fabric_attr, &ctx->fabric, NULL); if (ret) { print_err("fi_fabric returned %d\n", ret); ctx->fabric = NULL; return ret; } ret = fi_domain(ctx->fabric, prov, &ctx->domain, NULL); if (ret) { print_err("fi_fdomain returned %d\n", ret); ctx->domain = NULL; return ret; } /* set QP WR depth */ prov->ep_attr->tx_ctx_cnt = (size_t) (post_depth + 1); prov->ep_attr->rx_ctx_cnt = (size_t) (post_depth + 1); /* set ScatterGather max depth */ prov->tx_attr->iov_limit = 1; prov->rx_attr->iov_limit = 1; prov->tx_attr->inject_size = 0; /* no INLINE support */ ret = fi_endpoint(ctx->domain, prov, &ctx->ep, CONTEXT); if (ret) { print_err("fi_endpoint returned %d\n", ret); ctx->ep = NULL; return ret; } eq_attr.wait_obj = FI_WAIT_NONE; ret = fi_eq_open(ctx->fabric, &eq_attr, &ctx->eq, NULL); if (ret) { print_err("fi_eq_open returned %d\n", ret); ctx->eq = NULL; return ret; } cq_attr.size = post_depth * 4; cq_attr.flags = FI_SEND; cq_attr.format = FI_CQ_FORMAT_MSG; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.wait_cond = FI_CQ_COND_NONE; ret = fi_cq_open(ctx->domain, &cq_attr, &ctx->scq, NULL); if (ret) { print_err("fi_cq_open returned %d\n", ret); ctx->scq = NULL; return ret; } cq_attr.flags = FI_RECV; ret = fi_cq_open(ctx->domain, &cq_attr, &ctx->rcq, NULL); if (ret) { print_err("fi_cq_open returned %d\n", ret); ctx->rcq = NULL; return ret; } ret = fi_ep_bind(ctx->ep, &ctx->eq->fid, 0); if (ret) { print_err("fi_ep_bind returned %d\n", ret); return ret; } ret = fi_ep_bind(ctx->ep, &ctx->scq->fid, FI_SEND); if (ret) { print_err("fi_ep_bind returned %d\n", ret); return ret; } ret = fi_ep_bind(ctx->ep, &ctx->rcq->fid, FI_RECV); if (ret) { print_err("fi_ep_bind returned %d\n", ret); return ret; } ret = fi_enable(ctx->ep); if (ret) { print_err("fi_enable returned %d\n", ret); return ret; } addr.sin_family = AF_INET; addr.sin_port = htons(TEST_PORT); ret = in4_pton(svr_ipaddr, strlen(svr_ipaddr), (u8 *)&addr.sin_addr.s_addr, '\0', NULL); if (ret != 1) { print_err("Err converting target server IP address '%s'?\n", svr_ipaddr); return -EINVAL; } ret = fi_connect(ctx->ep, &addr, PRIVATE_DATA, sizeof(PRIVATE_DATA)); if (ret) { print_err("fi_connect returned %d\n", ret); return ret; } connected = 1; return 0; }
int ft_alloc_active_res(struct fi_info *fi) { int ret; ret = ft_alloc_msgs(); if (ret) return ret; if (cq_attr.format == FI_CQ_FORMAT_UNSPEC) { if (fi->caps & FI_TAGGED) cq_attr.format = FI_CQ_FORMAT_TAGGED; else cq_attr.format = FI_CQ_FORMAT_CONTEXT; } if (opts.options & FT_OPT_TX_CQ) { ft_cq_set_wait_attr(); cq_attr.size = fi->tx_attr->size; ret = fi_cq_open(domain, &cq_attr, &txcq, &txcq); if (ret) { FT_PRINTERR("fi_cq_open", ret); return ret; } if (opts.comp_method == FT_COMP_WAIT_FD) { ret = fi_control(&txcq->fid, FI_GETWAIT, (void *) &tx_fd); if (ret) { FT_PRINTERR("fi_control(FI_GETWAIT)", ret); return ret; } } } if (opts.options & FT_OPT_TX_CNTR) { ft_cntr_set_wait_attr(); ret = fi_cntr_open(domain, &cntr_attr, &txcntr, &txcntr); if (ret) { FT_PRINTERR("fi_cntr_open", ret); return ret; } } if (opts.options & FT_OPT_RX_CQ) { ft_cq_set_wait_attr(); cq_attr.size = fi->rx_attr->size; ret = fi_cq_open(domain, &cq_attr, &rxcq, &rxcq); if (ret) { FT_PRINTERR("fi_cq_open", ret); return ret; } if (opts.comp_method == FT_COMP_WAIT_FD) { ret = fi_control(&rxcq->fid, FI_GETWAIT, (void *) &rx_fd); if (ret) { FT_PRINTERR("fi_control(FI_GETWAIT)", ret); return ret; } } } if (opts.options & FT_OPT_RX_CNTR) { ft_cntr_set_wait_attr(); ret = fi_cntr_open(domain, &cntr_attr, &rxcntr, &rxcntr); if (ret) { FT_PRINTERR("fi_cntr_open", ret); return ret; } } if (fi->ep_attr->type == FI_EP_RDM || fi->ep_attr->type == FI_EP_DGRAM) { if (fi->domain_attr->av_type != FI_AV_UNSPEC) av_attr.type = fi->domain_attr->av_type; if (opts.av_name) { av_attr.name = opts.av_name; } ret = fi_av_open(domain, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); return ret; } } ret = fi_endpoint(domain, fi, &ep, NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); return ret; } return 0; }
/* returns 0 on success or a negative value that can be stringified with * fi_strerror on error */ static int setup_ep_fixture(struct fid_ep **ep_o) { int ret; struct fi_info *myfi; struct fi_av_attr av_attr; struct fi_cq_attr cq_attr; assert(ep_o != NULL); ret = 0; myfi = fi_dupinfo(fi); if (myfi == NULL) { printf("fi_dupinfo returned NULL\n"); goto fail; } ret = fi_endpoint(domain, myfi, ep_o, NULL); if (ret != 0) { printf("fi_endpoint %s\n", fi_strerror(-ret)); goto fail; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = TX_CQ_DEPTH; ret = fi_cq_open(domain, &cq_attr, &wcq, /*context=*/NULL); if (ret != 0) { printf("fi_cq_open %s\n", fi_strerror(-ret)); goto fail; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = RX_CQ_DEPTH; ret = fi_cq_open(domain, &cq_attr, &rcq, /*context=*/NULL); if (ret != 0) { printf("fi_cq_open %s\n", fi_strerror(-ret)); goto fail; } memset(&av_attr, 0, sizeof av_attr); av_attr.type = myfi->domain_attr->av_type ? myfi->domain_attr->av_type : FI_AV_MAP; av_attr.count = 1; av_attr.name = NULL; ret = fi_av_open(domain, &av_attr, &av, NULL); if (ret != 0) { printf("fi_av_open %s\n", fi_strerror(-ret)); goto fail; } ret = fi_ep_bind(*ep_o, &wcq->fid, FI_SEND); if (ret != 0) { printf("fi_ep_bind(wcq) %s\n", fi_strerror(-ret)); goto fail; } ret = fi_ep_bind(*ep_o, &rcq->fid, FI_RECV); if (ret != 0) { printf("fi_ep_bind(rcq) %s\n", fi_strerror(-ret)); goto fail; } ret = fi_ep_bind(*ep_o, &av->fid, 0); if (ret != 0) { printf("fi_ep_bind(av) %s\n", fi_strerror(-ret)); goto fail; } ret = fi_enable(*ep_o); if (ret != 0) { printf("fi_enable %s\n", fi_strerror(-ret)); goto fail; } if (myfi != NULL) { fi_freeinfo(myfi); } return ret; fail: if (myfi != NULL) { fi_freeinfo(myfi); } return teardown_ep_fixture(*ep_o); }
static int init_node(struct cma_node *node, struct fi_info *info) { struct fi_cq_attr cq_attr; int ret; ret = fi_domain(fabric, info, &node->domain, NULL); if (ret) { FT_PRINTERR("fi_domain", ret); goto out; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.size = hints->tx_attr->size ? hints->tx_attr->size : 1; cq_attr.format = FI_CQ_FORMAT_CONTEXT; ret = fi_cq_open(node->domain, &cq_attr, &node->cq[SEND_CQ_INDEX], NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto out; } ret = fi_cq_open(node->domain, &cq_attr, &node->cq[RECV_CQ_INDEX], NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto out; } ret = fi_endpoint(node->domain, info, &node->ep, node); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto out; } ret = fi_ep_bind(node->ep, &node->cq[SEND_CQ_INDEX]->fid, FI_SEND); if (ret) { FT_PRINTERR("fi_ep_bind", ret); goto out; } ret = fi_ep_bind(node->ep, &node->cq[RECV_CQ_INDEX]->fid, FI_RECV); if (ret) { FT_PRINTERR("fi_ep_bind", ret); goto out; } ret = fi_ep_bind(node->ep, &eq->fid, 0); if (ret) { FT_PRINTERR("fi_ep_bind", ret); goto out; } ret = fi_enable(node->ep); if (ret) { FT_PRINTERR("fi_enable", ret); goto out; } ret = create_messages(node); if (ret) { printf("cmatose: failed to create messages: %d\n", ret); goto out; } out: return ret; }
void rdm_str_addr_sr_setup_common(void) { int ret = 0, i = 0, j = 0; struct fi_av_attr attr; memset(&attr, 0, sizeof(attr)); attr.type = FI_AV_MAP; attr.count = NUMEPS; cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; target_base = malloc(GNIT_ALIGN_LEN(BUF_SZ)); assert(target_base); target = GNIT_ALIGN_BUFFER(char *, target_base); source_base = malloc(GNIT_ALIGN_LEN(BUF_SZ)); assert(source_base); source = GNIT_ALIGN_BUFFER(char *, source_base); ret = fi_fabric(fi[0]->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); for (i = 0; i < NUMEPS; i++) { ret = fi_domain(fab, fi[i], dom + i, NULL); cr_assert(!ret, "fi_domain"); ret = fi_av_open(dom[i], &attr, av + i, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom[i], fi[i], ep + i, NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_cq_open(dom[i], &cq_attr, msg_cq + i, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[i], &msg_cq[i]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[i]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[i] = malloc(addrlen); cr_assert(ep_name[i] != NULL); ret = fi_getname(&ep[i]->fid, ep_name[i], &addrlen); cr_assert(ret == FI_SUCCESS); } for (i = 0; i < NUMEPS; i++) { /* * To test API-1.1: Reporting of unknown source addresses -- * only insert addresses into the sender's av */ if (i < (NUMEPS / 2)) { for (j = 0; j < NUMEPS; j++) { dbg_printf("Only does src EP insertions\n"); ret = fi_av_insert(av[i], ep_name[j], 1, &gni_addr[j], 0, NULL); cr_assert(ret == 1); } } ret = fi_ep_bind(ep[i], &av[i]->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[i]); cr_assert(!ret, "fi_ep_enable"); } }
static void setup_ofi_active(struct fi_info *info, struct fid_ep **ep) { // Make an EQ int ret; ret = fi_endpoint(fidev.domain, info, ep, NULL); if (0 != ret) { error("fi_endpoint failed"); } #if WANT_FDS // Add the EQ FD to the epoll fd static struct epoll_event edt; memset(&edt, 0, sizeof(edt)); edt.events = EPOLLIN; edt.data.u32 = 2222; ret = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fidev.eq_fd, &edt); if (ret < 0) { error("epoll_ctl failed"); } #endif // Bind the EP to the EQ ret = fi_ep_bind(*ep, &fidev.eq->fid, 0); if (0 != ret) { error("fi_ep_bind(eq) failed"); } // Make a CQ struct fi_cq_attr cq_attr; memset(&cq_attr, 0, sizeof(cq_attr)); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_FD; cq_attr.size = 32; // JMS POC ret = fi_cq_open(fidev.domain, &cq_attr, &ficonn.cq, NULL); if (ret != 0) { error("fi_cq_open failed"); } // Bind the CQ TX and RX queues to the EQ ret = fi_ep_bind(*ep, &ficonn.cq->fid, FI_TRANSMIT); if (0 != ret) { error("fi_ep_bind(cq tx) failed"); } ret = fi_ep_bind(*ep, &ficonn.cq->fid, FI_RECV); if (0 != ret) { error("fi_ep_bind(cq rx) failed"); } #if WANT_FDS // Get the fd associated with this CQ ret = fi_control(&(ficonn.cq->fid), FI_GETWAIT, &ficonn.cq_fd); if (ret != 0) { error("fi_control to get cq fq failed"); } #endif // Enable the EP! ret = fi_enable(*ep); if (0 != ret) { error("fi_enable failed"); } // Register the buffers (must use different keys for each) ret = fi_mr_reg(fidev.domain, send_buffer, sizeof(send_buffer), FI_SEND, 0, (uintptr_t) send_buffer, 0, &ficonn.send_mr, NULL); if (ret != 0) { error("fi_mr_reg(send) failed\n"); } ret = fi_mr_reg(fidev.domain, recv_buffer, sizeof(recv_buffer), FI_RECV, 0, (uintptr_t) recv_buffer, 0, &ficonn.recv_mr, NULL); if (ret != 0) { printf("ERROR: ret=%d, %s\n", ret, fi_strerror(-ret)); error("fi_mr_reg(recv) failed\n"); } }
static int alloc_ep_res(struct fi_info *fi) { struct fi_cq_attr cq_attr; struct fi_av_attr av_attr; int ret; buffer_size = opts.user_options & FT_OPT_SIZE ? opts.transfer_size : test_size[TEST_CNT - 1].size; buf = malloc(MAX(buffer_size, sizeof(uint64_t))); if (!buf) { perror("malloc"); return -1; } result = malloc(MAX(buffer_size, sizeof(uint64_t))); if (!result) { perror("malloc"); return -1; } compare = malloc(MAX(buffer_size, sizeof(uint64_t))); if (!compare) { perror("malloc"); return -1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = 128; ret = fi_cq_open(dom, &cq_attr, &scq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &rcq, NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } // registers local data buffer buff that specifies // the first operand of the atomic operation ret = fi_mr_reg(dom, buf, MAX(buffer_size, sizeof(uint64_t)), FI_REMOTE_READ | FI_REMOTE_WRITE, 0, get_mr_key(), 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err3; } // registers local data buffer that stores initial value of // the remote buffer ret = fi_mr_reg(dom, result, MAX(buffer_size, sizeof(uint64_t)), FI_REMOTE_READ | FI_REMOTE_WRITE, 0, get_mr_key(), 0, &mr_result, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", -ret); goto err4; } // registers local data buffer that contains comparison data ret = fi_mr_reg(dom, compare, MAX(buffer_size, sizeof(uint64_t)), FI_REMOTE_READ | FI_REMOTE_WRITE, 0, get_mr_key(), 0, &mr_compare, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err5; } memset(&av_attr, 0, sizeof av_attr); av_attr.type = fi->domain_attr->av_type ? fi->domain_attr->av_type : FI_AV_MAP; av_attr.count = 1; av_attr.name = NULL; ret = fi_av_open(dom, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); goto err6; } ret = fi_endpoint(dom, fi, &ep, NULL); if (ret) { FT_PRINTERR("fi_endpoint", ret); goto err7; } return 0; err7: fi_close(&av->fid); err6: fi_close(&mr_compare->fid); err5: fi_close(&mr_result->fid); err4: fi_close(&mr->fid); err3: fi_close(&rcq->fid); err2: fi_close(&scq->fid); err1: free(buf); free(result); free(compare); return ret; }
void rdm_rma_setup(void) { int ret = 0; struct fi_av_attr attr; size_t addrlen = 0; hints = fi_allocinfo(); cr_assert(hints, "fi_allocinfo"); hints->domain_attr->cq_data_size = 4; hints->mode = ~0; hints->fabric_attr->name = strdup("gni"); ret = fi_getinfo(FI_VERSION(1, 0), NULL, 0, 0, hints, &fi); cr_assert(!ret, "fi_getinfo"); ret = fi_fabric(fi->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); ret = fi_domain(fab, fi, &dom, NULL); cr_assert(!ret, "fi_domain"); ret = fi_open_ops(&dom->fid, FI_GNI_DOMAIN_OPS_1, 0, (void **) &gni_domain_ops, NULL); attr.type = FI_AV_MAP; attr.count = 16; ret = fi_av_open(dom, &attr, &av, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom, fi, &ep[0], NULL); cr_assert(!ret, "fi_endpoint"); cq_attr.format = FI_CQ_FORMAT_TAGGED; cq_attr.size = 1024; cq_attr.wait_obj = 0; ret = fi_cq_open(dom, &cq_attr, &send_cq, 0); cr_assert(!ret, "fi_cq_open"); /* * imitate shmem, etc. use FI_WRITE for bind * flag */ ret = fi_ep_bind(ep[0], &send_cq->fid, FI_TRANSMIT); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[0] = malloc(addrlen); cr_assert(ep_name[0] != NULL); ep_name[1] = malloc(addrlen); cr_assert(ep_name[1] != NULL); ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_endpoint(dom, fi, &ep[1], NULL); cr_assert(!ret, "fi_endpoint"); cq_attr.format = FI_CQ_FORMAT_TAGGED; ret = fi_cq_open(dom, &cq_attr, &recv_cq, 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[1], &recv_cq->fid, FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0, NULL); cr_assert(ret == 1); ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0, NULL); cr_assert(ret == 1); ret = fi_ep_bind(ep[0], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(ep[1], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[0]); cr_assert(!ret, "fi_ep_enable"); ret = fi_enable(ep[1]); cr_assert(!ret, "fi_ep_enable"); target = malloc(BUF_SZ); assert(target); source = malloc(BUF_SZ); assert(source); ret = fi_mr_reg(dom, target, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &rem_mr, &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom, source, BUF_SZ, FI_REMOTE_WRITE, 0, 0, 0, &loc_mr, &source); cr_assert_eq(ret, 0); uc_source = malloc(BUF_SZ); assert(uc_source); mr_key = fi_mr_key(rem_mr); ret = fi_cntr_open(dom, &cntr_attr, &write_cntr, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[0], &write_cntr->fid, FI_WRITE); cr_assert(!ret, "fi_ep_bind"); ret = fi_cntr_open(dom, &cntr_attr, &read_cntr, 0); cr_assert(!ret, "fi_cntr_open"); ret = fi_ep_bind(ep[0], &read_cntr->fid, FI_READ); cr_assert(!ret, "fi_ep_bind"); writes = reads = write_errs = read_errs = 0; }
void cancel_setup(void) { int ret = 0; struct fi_av_attr attr; size_t addrlen = 0; int rem_requested_key, loc_requested_key; hints = fi_allocinfo(); cr_assert(hints, "fi_allocinfo"); hints->domain_attr->mr_mode = GNIX_DEFAULT_MR_MODE; hints->domain_attr->cq_data_size = 4; hints->mode = mode_bits; hints->fabric_attr->prov_name = strdup("gni"); ret = fi_getinfo(fi_version(), NULL, 0, 0, hints, &fi); cr_assert(!ret, "fi_getinfo"); ret = fi_fabric(fi->fabric_attr, &fab, NULL); cr_assert(!ret, "fi_fabric"); ret = fi_domain(fab, fi, &dom, NULL); cr_assert(!ret, "fi_domain"); memset(&attr, 0, sizeof(attr)); attr.type = FI_AV_MAP; attr.count = 16; ret = fi_av_open(dom, &attr, &av, NULL); cr_assert(!ret, "fi_av_open"); ret = fi_endpoint(dom, fi, &ep[0], NULL); cr_assert(!ret, "fi_endpoint"); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.size = 1024; cq_attr.wait_obj = 0; ret = fi_cq_open(dom, &cq_attr, &msg_cq[0], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_cq_open(dom, &cq_attr, &msg_cq[1], 0); cr_assert(!ret, "fi_cq_open"); ret = fi_ep_bind(ep[0], &msg_cq[0]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ret = fi_getname(&ep[0]->fid, NULL, &addrlen); cr_assert(addrlen > 0); ep_name[0] = malloc(addrlen); cr_assert(ep_name[0] != NULL); ret = fi_getname(&ep[0]->fid, ep_name[0], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_endpoint(dom, fi, &ep[1], NULL); cr_assert(!ret, "fi_endpoint"); ret = fi_ep_bind(ep[1], &msg_cq[1]->fid, FI_SEND | FI_RECV); cr_assert(!ret, "fi_ep_bind"); ep_name[1] = malloc(addrlen); cr_assert(ep_name[1] != NULL); ret = fi_getname(&ep[1]->fid, ep_name[1], &addrlen); cr_assert(ret == FI_SUCCESS); ret = fi_av_insert(av, ep_name[0], 1, &gni_addr[0], 0, NULL); cr_assert(ret == 1); ret = fi_av_insert(av, ep_name[1], 1, &gni_addr[1], 0, NULL); cr_assert(ret == 1); ret = fi_ep_bind(ep[0], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_ep_bind(ep[1], &av->fid, 0); cr_assert(!ret, "fi_ep_bind"); ret = fi_enable(ep[0]); cr_assert(!ret, "fi_ep_enable"); ret = fi_enable(ep[1]); cr_assert(!ret, "fi_ep_enable"); target_base = malloc(GNIT_ALIGN_LEN(BUF_SZ)); assert(target_base); target = GNIT_ALIGN_BUFFER(char *, target_base); source_base = malloc(GNIT_ALIGN_LEN(BUF_SZ)); assert(source_base); source = GNIT_ALIGN_BUFFER(char *, source_base); rem_requested_key = USING_SCALABLE(fi) ? 1 : 0; loc_requested_key = USING_SCALABLE(fi) ? 2 : 0; ret = fi_mr_reg(dom, target, BUF_SZ, FI_REMOTE_WRITE, 0, rem_requested_key, 0, &rem_mr, &target); cr_assert_eq(ret, 0); ret = fi_mr_reg(dom, source, BUF_SZ, FI_REMOTE_WRITE, 0, loc_requested_key, 0, &loc_mr, &source); cr_assert_eq(ret, 0); if (USING_SCALABLE(fi)) { MR_ENABLE(rem_mr, target, BUF_SZ); MR_ENABLE(loc_mr, source, BUF_SZ); } mr_key = fi_mr_key(rem_mr); }
static int alloc_ep_res(struct fid_ep *sep) { struct fi_cq_attr cq_attr; struct fi_rx_attr rx_attr; struct fi_tx_attr tx_attr; struct fi_av_attr av_attr; int i, ret; buffer_size = test_size[TEST_CNT - 1].size; buf = malloc(buffer_size); scq = calloc(ctx_cnt, sizeof *scq); rcq = calloc(ctx_cnt, sizeof *rcq); tx_ep = calloc(ctx_cnt, sizeof *tx_ep); rx_ep = calloc(ctx_cnt, sizeof *rx_ep); remote_rx_addr = calloc(ctx_cnt, sizeof *remote_rx_addr); if (!buf || !scq || !rcq || !tx_ep || !rx_ep || !remote_rx_addr) { perror("malloc"); return -1; } memset(&cq_attr, 0, sizeof cq_attr); cq_attr.format = FI_CQ_FORMAT_CONTEXT; cq_attr.wait_obj = FI_WAIT_NONE; cq_attr.size = rx_depth; for (i = 0; i < ctx_cnt; i++) { /* Create TX contexts: tx_ep */ ret = fi_tx_context(sep, i, &tx_attr, &tx_ep[i], NULL); if (ret) { FT_PRINTERR("fi_tx_context", ret); goto err1; } ret = fi_cq_open(dom, &cq_attr, &scq[i], NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err2; } } for (i = 0; i < ctx_cnt; i++) { /* Create RX contexts: rx_ep */ ret = fi_rx_context(sep, i, &rx_attr, &rx_ep[i], NULL); if (ret) { FT_PRINTERR("fi_tx_context", ret); goto err3; } ret = fi_cq_open(dom, &cq_attr, &rcq[i], NULL); if (ret) { FT_PRINTERR("fi_cq_open", ret); goto err4; } } ret = fi_mr_reg(dom, buf, buffer_size, 0, 0, 0, 0, &mr, NULL); if (ret) { FT_PRINTERR("fi_mr_reg", ret); goto err5; } /* Get number of bits needed to represent ctx_cnt */ while (ctx_cnt >> ++rx_ctx_bits) ; memset(&av_attr, 0, sizeof av_attr); av_attr.type = fi->domain_attr->av_type ? fi->domain_attr->av_type : FI_AV_MAP; av_attr.count = 1; av_attr.rx_ctx_bits = rx_ctx_bits; /* Open Address Vector */ ret = fi_av_open(dom, &av_attr, &av, NULL); if (ret) { FT_PRINTERR("fi_av_open", ret); goto err6; } return 0; err6: fi_close(&mr->fid); err5: FT_CLOSEV(rcq, ctx_cnt); err4: FT_CLOSEV(rx_ep, ctx_cnt); err3: FT_CLOSEV(scq, ctx_cnt); err2: FT_CLOSEV(tx_ep, ctx_cnt); err1: free(buf); free(rcq); free(scq); free(tx_ep); free(rx_ep); free(remote_rx_addr); return ret; }