/* init the transport by its name */ static ucs_status_t init_iface(char *dev_name, char *tl_name, struct iface_info *iface_p) { ucs_status_t status; uct_iface_config_t *config; /* Defines interface configuration options */ /* Read transport-specific interface configuration */ status = uct_iface_config_read(tl_name, NULL, NULL, &config); CHKERR_JUMP(UCS_OK != status, "setup iface_config", error_ret); /* Open communication interface */ status = uct_iface_open(iface_p->pd, iface_p->worker, tl_name, dev_name, 0, config, &iface_p->iface); uct_config_release(config); CHKERR_JUMP(UCS_OK != status, "open temporary interface", error_ret); /* Get interface attributes */ status = uct_iface_query(iface_p->iface, &iface_p->attr); CHKERR_JUMP(UCS_OK != status, "query iface", error_iface); /* Check if current device and transport support short active messages */ if (iface_p->attr.cap.flags & UCT_IFACE_FLAG_AM_SHORT) { return UCS_OK; } error_iface: uct_iface_close(iface_p->iface); error_ret: return UCS_ERR_UNSUPPORTED; }
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_md_resource_desc_t *md_resources; uct_tl_resource_desc_t *tl_resources; unsigned i, num_md_resources; unsigned j, num_tl_resources; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_md_resources(&md_resources, &num_md_resources); if (status != UCS_OK) { goto out; } for (i = 0; i < num_md_resources; ++i) { status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_md_resources; } status = uct_md_open(md_resources[i].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_md_resources; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_md_resources; } for (j = 0; j < num_tl_resources; ++j) { if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.md = md; status = UCS_OK; goto out_release_md_resources; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name, perf->params.uct.dev_name); status = UCS_ERR_NO_DEVICE; out_release_md_resources: uct_release_md_resource_list(md_resources); out: return status; }
/* Device and transport to be used are determined by minimum latency */ static ucs_status_t dev_tl_lookup(const char *dev_name, const char *tl_name, struct iface_info *iface_p) { int i; int j; ucs_status_t status; uct_md_resource_desc_t *md_resources; /* Memory domain resource descriptor */ uct_tl_resource_desc_t *tl_resources; /*Communication resource descriptor */ unsigned num_md_resources; /* Number of protected domain */ unsigned num_tl_resources; /* Number of transport resources resource objects created */ uct_md_config_t *md_config; status = uct_query_md_resources(&md_resources, &num_md_resources); CHKERR_JUMP(UCS_OK != status, "query for protected domain resources", error_ret); /* Iterate through protected domain resources */ for (i = 0; i < num_md_resources; ++i) { status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config); CHKERR_JUMP(UCS_OK != status, "read PD config", release_pd); status = uct_md_open(md_resources[i].md_name, md_config, &iface_p->pd); uct_config_release(md_config); CHKERR_JUMP(UCS_OK != status, "open protected domains", release_pd); status = uct_md_query_tl_resources(iface_p->pd, &tl_resources, &num_tl_resources); CHKERR_JUMP(UCS_OK != status, "query transport resources", close_pd); /* Go through each available transport and find the proper name */ for (j = 0; j < num_tl_resources; ++j) { if (!strcmp(dev_name, tl_resources[j].dev_name) && !strcmp(tl_name, tl_resources[j].tl_name)) { status = init_iface(tl_resources[j].dev_name, tl_resources[j].tl_name, iface_p); if (UCS_OK == status) { printf("Using %s with %s.\n", tl_resources[j].dev_name, tl_resources[j].tl_name); fflush(stdout); uct_release_tl_resource_list(tl_resources); goto release_pd; } } } uct_release_tl_resource_list(tl_resources); uct_md_close(iface_p->pd); } fprintf(stderr, "No supported (dev/tl) found (%s/%s)\n", dev_name, tl_name); status = UCS_ERR_UNSUPPORTED; release_pd: uct_release_md_resource_list(md_resources); error_ret: return status; close_pd: uct_md_close(iface_p->pd); goto release_pd; }
/* Checks if the device and transports are supported by UCX */ static ucs_status_t resource_supported(char *dev_name, char *tl_name, int kill_iface) { ucs_status_t status; /* Read transport-specific interface configuration */ status = uct_iface_config_read(tl_name, NULL, NULL, &iface_config); if (UCS_OK != status) { fprintf(stderr, "Failed to setup iface_config.\n");fflush(stderr); goto error0; } /* Open communication interface */ status = uct_iface_open(pd, worker, tl_name, dev_name, 0, iface_config, &iface); uct_config_release(iface_config); if (UCS_OK != status) { fprintf(stderr, "Failed to open temporary interface.\n");fflush(stderr); goto error0; } /* Get interface attributes */ status = uct_iface_query(iface, &iface_attr); if (UCS_OK != status) { fprintf(stderr, "Failed to query iface.\n");fflush(stderr); goto error_iface0; } /* Check if current device and transport support short active messages */ if (iface_attr.cap.flags & UCT_IFACE_FLAG_AM_SHORT) { if (kill_iface) { uct_iface_close(iface); } return UCS_OK; } return UCS_ERR_UNSUPPORTED; error_iface0: uct_iface_close(iface); error0: return status; }
static ucs_status_t ucp_fill_resources(ucp_context_h context, const ucp_config_t *config) { unsigned num_tl_resources; unsigned num_md_resources; uct_md_resource_desc_t *md_rscs; ucs_status_t status; ucp_rsc_index_t i; unsigned md_index; uct_md_h md; uct_md_config_t *md_config; uint64_t masks[UCT_DEVICE_TYPE_LAST] = {0}; /* if we got here then num_resources > 0. * if the user's device list is empty, there is no match */ if ((0 == config->devices[UCT_DEVICE_TYPE_NET].count) && (0 == config->devices[UCT_DEVICE_TYPE_SHM].count) && (0 == config->devices[UCT_DEVICE_TYPE_ACC].count) && (0 == config->devices[UCT_DEVICE_TYPE_SELF].count)) { ucs_error("The device lists are empty. Please specify the devices you would like to use " "or omit the UCX_*_DEVICES so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* if we got here then num_resources > 0. * if the user's tls list is empty, there is no match */ if (0 == config->tls.count) { ucs_error("The TLs list is empty. Please specify the transports you would like to use " "or omit the UCX_TLS so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* List memory domain resources */ status = uct_query_md_resources(&md_rscs, &num_md_resources); if (status != UCS_OK) { goto err; } /* Sort md's by name, to increase the likelihood of reusing the same ep * configuration (since remote md map is part of the key). */ qsort(md_rscs, num_md_resources, sizeof(*md_rscs), ucp_md_rsc_compare_name); /* Error check: Make sure there is at least one MD */ if (num_md_resources == 0) { ucs_error("No md resources found"); status = UCS_ERR_NO_DEVICE; goto err_release_md_resources; } context->num_mds = 0; context->md_rscs = NULL; context->mds = NULL; context->md_attrs = NULL; context->num_tls = 0; context->tl_rscs = NULL; /* Allocate array of MD resources we would actually use */ context->md_rscs = ucs_calloc(num_md_resources, sizeof(*context->md_rscs), "ucp_md_resources"); if (context->md_rscs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of memory domains */ context->mds = ucs_calloc(num_md_resources, sizeof(*context->mds), "ucp_mds"); if (context->mds == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of memory domains attributes */ context->md_attrs = ucs_calloc(num_md_resources, sizeof(*context->md_attrs), "ucp_md_attrs"); if (context->md_attrs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Open all memory domains, keep only those which have at least one TL * resources selected on them. */ md_index = 0; for (i = 0; i < num_md_resources; ++i) { status = uct_md_config_read(md_rscs[i].md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto err_free_context_resources; } status = uct_md_open(md_rscs[i].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto err_free_context_resources; } context->md_rscs[md_index] = md_rscs[i]; context->mds[md_index] = md; /* Save MD attributes */ status = uct_md_query(md, &context->md_attrs[md_index]); if (status != UCS_OK) { goto err_free_context_resources; } /* Add communication resources of each MD */ status = ucp_add_tl_resources(context, md, md_index, config, &num_tl_resources, masks); if (status != UCS_OK) { goto err_free_context_resources; } /* If the MD does not have transport resources, don't use it */ if (num_tl_resources > 0) { ++md_index; ++context->num_mds; } else { ucs_debug("closing md %s because it has no selected transport resources", md_rscs[i].md_name); uct_md_close(md); } } /* Error check: Make sure there is at least one transport */ if (0 == context->num_tls) { ucs_error("There are no available resources matching the configured criteria"); status = UCS_ERR_NO_DEVICE; goto err_free_context_resources; } if (context->num_mds > UCP_MD_INDEX_BITS) { ucs_error("Only up to %d memory domains are supported (have: %d)", UCP_MD_INDEX_BITS, context->num_mds); status = UCS_ERR_EXCEEDS_LIMIT; goto err_release_md_resources; } /* Notify the user if there are devices from the command line that are not available */ ucp_check_unavailable_devices(config->devices, masks); /* Error check: Make sure there are not too many transports */ if (context->num_tls >= UCP_MAX_RESOURCES) { ucs_error("Exceeded resources limit (%u requested, up to %d are supported)", context->num_tls, UCP_MAX_RESOURCES); status = UCS_ERR_EXCEEDS_LIMIT; goto err_free_context_resources; } status = ucp_check_tl_names(context); if (status != UCS_OK) { goto err_free_context_resources; } uct_release_md_resource_list(md_rscs); return UCS_OK; err_free_context_resources: ucp_free_resources(context); err_release_md_resources: uct_release_md_resource_list(md_rscs); err: return status; }
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params) { uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf, params); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); }
/* Device and transport to be used are determined by minimum latency */ static ucs_status_t dev_tl_lookup() { int i; int j; uint64_t min_latency = UINT64_MAX; int pd_index = -1; int tl_index = -1; ucs_status_t status; uct_pd_resource_desc_t *pd_resources; /* Protection domain resource descriptor */ uct_tl_resource_desc_t *tl_resources; /*Communication resource descriptor */ unsigned num_pd_resources; /* Number of protected domain */ unsigned num_tl_resources; /* Number of transport resources resource objects created */ uct_pd_config_t *pd_config; status = uct_query_pd_resources(&pd_resources, &num_pd_resources); if (UCS_OK != status) { fprintf(stderr, "Failed to query for protected domain resources.\n"); goto out1; } /* Iterate through protected domain resources */ for (i = 0; i < num_pd_resources; ++i) { status = uct_pd_config_read(pd_resources[i].pd_name, NULL, NULL, &pd_config); if (status != UCS_OK) { goto release1; } status = uct_pd_open(pd_resources[i].pd_name, pd_config, &pd); uct_config_release(pd_config); if (UCS_OK != status) { fprintf(stderr, "Failed to open protected domain.\n"); fflush(stderr); goto release1; } status = uct_pd_query_tl_resources(pd, &tl_resources, &num_tl_resources); if (UCS_OK != status) { fprintf(stderr, "Failed to query transport resources.\n"); fflush(stderr); uct_pd_close(pd); goto release1; } /* Go through each available transport resource for a particular protected domain * and keep track of the fastest latency */ for (j = 0; j < num_tl_resources; ++j) { status = resource_supported(tl_resources[j].dev_name, tl_resources[j].tl_name, 1); if (UCS_OK == status) { if (tl_resources[j].latency < min_latency) { min_latency = tl_resources[j].latency; pd_index = i; tl_index = j; } } } uct_release_tl_resource_list(tl_resources); uct_pd_close(pd); } /* Check if any valid device/transport found */ if ((-1 == pd_index) || (-1 == tl_index)) { uct_release_pd_resource_list(pd_resources); return UCS_ERR_UNSUPPORTED; } /* IMPORTANT: Certain functions that operate on an interface rely on a pointer to the protection domain that created it */ /* Reopen new protection domain and */ status = uct_pd_config_read(pd_resources[i].pd_name, NULL, NULL, &pd_config); if (status != UCS_OK) { goto release1; } status = uct_pd_open(pd_resources[pd_index].pd_name, &pd); uct_config_release(pd_config); if (UCS_OK != status) { fprintf(stderr, "Failed to open final protected domain.\n"); fflush(stderr); goto release1; } /* Open new tranport resources */ status = uct_pd_query_tl_resources(pd, &tl_resources, &num_tl_resources); if (UCS_OK != status) { fprintf(stderr, "Failed to query final transport resources.\n"); fflush(stderr); uct_pd_close(pd); goto release1; } /* Call resource_supported() again to set the interface */ status = resource_supported(tl_resources[tl_index].dev_name, tl_resources[tl_index].tl_name, 0); if (UCS_OK != status) { fprintf(stderr, "Failed to initialize final interface.\n"); fflush(stderr); uct_pd_close(pd); return status; } printf("Using %s with %s.\n", tl_resources[tl_index].dev_name, tl_resources[tl_index].tl_name);fflush(stdout); uct_release_tl_resource_list(tl_resources); release1: uct_release_pd_resource_list(pd_resources); out1: return status; }
void print_uct_config(ucs_config_print_flags_t print_flags, const char *tl_name) { uct_md_resource_desc_t *md_resources; unsigned md_rsc_index, num_md_resources; uct_tl_resource_desc_t *tl_resources; unsigned tl_rsc_index, num_tl_resources; uct_iface_config_t *config; char tl_names[UINT8_MAX][UCT_TL_NAME_MAX]; char cfg_title[UCT_TL_NAME_MAX + 128]; unsigned i, num_tls; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_md_resources(&md_resources, &num_md_resources); if (status != UCS_OK) { return; } uct_md_component_config_print(print_flags); num_tls = 0; for (md_rsc_index = 0; md_rsc_index < num_md_resources; ++md_rsc_index) { status = uct_md_config_read(md_resources[md_rsc_index].md_name, NULL, NULL, &md_config); if (status != UCS_OK) { continue; } status = uct_md_open(md_resources[md_rsc_index].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { continue; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); continue; } for (tl_rsc_index = 0; tl_rsc_index < num_tl_resources; ++tl_rsc_index) { i = 0; while (i < num_tls) { if (!strcmp(tl_names[i], tl_resources[tl_rsc_index].tl_name)) { break; } ++i; } /* Make sure this transport is not inserted to the array before, and * if user selects a specific transport - also make sure this is it. */ if ((i == num_tls) && ((tl_name == NULL) || !strcmp(tl_name, tl_resources[tl_rsc_index].tl_name))) { strncpy(tl_names[num_tls], tl_resources[tl_rsc_index].tl_name, UCT_TL_NAME_MAX); ++num_tls; } } uct_release_tl_resource_list(tl_resources); uct_md_close(md); } uct_release_md_resource_list(md_resources); for (i = 0; i < num_tls; ++i) { snprintf(cfg_title, sizeof(cfg_title), "%s transport configuration", tl_names[i]); status = uct_iface_config_read(tl_names[i], NULL, NULL, &config); if (status != UCS_OK) { printf("# < Failed to read configuration >\n"); continue; } uct_config_print(config, stdout, cfg_title, print_flags); uct_config_release(config); } }
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params) { uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .tl_name = params->uct.tl_name, .dev_name = params->uct.dev_name, .rx_headroom = 0 }; status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_iface_config_read(params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf, params); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); }
static ucs_status_t ucp_fill_resources(ucp_context_h context, const ucp_config_t *config) { unsigned num_tl_resources; unsigned num_pd_resources; uct_pd_resource_desc_t *pd_rscs; ucs_status_t status; ucp_rsc_index_t i; unsigned pd_index; uct_pd_h pd; uct_pd_config_t *pd_config; uint64_t masks[UCT_DEVICE_TYPE_LAST] = {0}; /* if we got here then num_resources > 0. * if the user's device list is empty, there is no match */ if ((0 == config->devices[UCT_DEVICE_TYPE_NET].count) && (0 == config->devices[UCT_DEVICE_TYPE_SHM].count) && (0 == config->devices[UCT_DEVICE_TYPE_ACC].count)) { ucs_error("The device lists are empty. Please specify the devices you would like to use " "or omit the UCX_*_DEVICES so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* if we got here then num_resources > 0. * if the user's tls list is empty, there is no match */ if (0 == config->tls.count) { ucs_error("The TLs list is empty. Please specify the transports you would like to use " "or omit the UCX_TLS so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* List protection domain resources */ status = uct_query_pd_resources(&pd_rscs, &num_pd_resources); if (status != UCS_OK) { goto err; } /* Error check: Make sure there is at least one PD */ if (num_pd_resources == 0) { ucs_error("No pd resources found"); status = UCS_ERR_NO_DEVICE; goto err_release_pd_resources; } if (num_pd_resources >= UCP_MAX_PDS) { ucs_error("Only up to %ld PDs are supported", UCP_MAX_PDS); status = UCS_ERR_EXCEEDS_LIMIT; goto err_release_pd_resources; } context->num_pds = 0; context->pd_rscs = NULL; context->pds = NULL; context->pd_attrs = NULL; context->num_tls = 0; context->tl_rscs = NULL; /* Allocate array of PD resources we would actually use */ context->pd_rscs = ucs_calloc(num_pd_resources, sizeof(*context->pd_rscs), "ucp_pd_resources"); if (context->pd_rscs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of protection domains */ context->pds = ucs_calloc(num_pd_resources, sizeof(*context->pds), "ucp_pds"); if (context->pds == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of protection domains attributes */ context->pd_attrs = ucs_calloc(num_pd_resources, sizeof(*context->pd_attrs), "ucp_pd_attrs"); if (context->pd_attrs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Open all protection domains, keep only those which have at least one TL * resources selected on them. */ pd_index = 0; for (i = 0; i < num_pd_resources; ++i) { status = uct_pd_config_read(pd_rscs[i].pd_name, NULL, NULL, &pd_config); if (status != UCS_OK) { goto err_free_context_resources; } status = uct_pd_open(pd_rscs[i].pd_name, pd_config, &pd); uct_config_release(pd_config); if (status != UCS_OK) { goto err_free_context_resources; } context->pd_rscs[pd_index] = pd_rscs[i]; context->pds[pd_index] = pd; /* Save PD attributes */ status = uct_pd_query(pd, &context->pd_attrs[pd_index]); if (status != UCS_OK) { goto err_free_context_resources; } /* Add communication resources of each PD */ status = ucp_add_tl_resources(context, pd, pd_index, config, &num_tl_resources, masks); if (status != UCS_OK) { goto err_free_context_resources; } /* If the PD does not have transport resources, don't use it */ if (num_tl_resources > 0) { ++pd_index; ++context->num_pds; } else { ucs_debug("closing pd %s because it has no selected transport resources", pd_rscs[i].pd_name); uct_pd_close(pd); } } /* Error check: Make sure there is at least one transport */ if (0 == context->num_tls) { ucs_error("There are no available resources matching the configured criteria"); status = UCS_ERR_NO_DEVICE; goto err_free_context_resources; } /* Notify the user if there are devices from the command line that are not available */ ucp_check_unavailable_devices(config->devices, masks); /* Error check: Make sure there are not too many transports */ if (context->num_tls >= UCP_MAX_RESOURCES) { ucs_error("Exceeded resources limit (%u requested, up to %d are supported)", context->num_tls, UCP_MAX_RESOURCES); status = UCS_ERR_EXCEEDS_LIMIT; goto err_free_context_resources; } uct_release_pd_resource_list(pd_rscs); return UCS_OK; err_free_context_resources: ucp_free_resources(context); err_release_pd_resources: uct_release_pd_resource_list(pd_rscs); err: return status; }
static void print_iface_info(uct_worker_h worker, uct_md_h md, uct_tl_resource_desc_t *resource) { uct_iface_config_t *iface_config; uct_iface_attr_t iface_attr; ucs_status_t status; uct_iface_h iface; char buf[200] = {0}; status = uct_iface_config_read(resource->tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { return; } printf("# Device: %s\n", resource->dev_name); status = uct_iface_open(md, worker, resource->tl_name, resource->dev_name, 0, iface_config, &iface); uct_config_release(iface_config); if (status != UCS_OK) { printf("# < failed to open interface >\n"); return; } printf("#\n"); printf("# capabilities:\n"); status = uct_iface_query(iface, &iface_attr); if (status != UCS_OK) { printf("# < failed to query interface >\n"); } else { printf("# bandwidth: %.2f MB/sec\n", iface_attr.bandwidth / (1024 * 1024)); printf("# latency: %.0f nsec\n", iface_attr.latency * 1e9); printf("# overhead: %.0f nsec\n", iface_attr.overhead * 1e9); PRINT_CAP(PUT_SHORT, iface_attr.cap.flags, iface_attr.cap.put.max_short); PRINT_CAP(PUT_BCOPY, iface_attr.cap.flags, iface_attr.cap.put.max_bcopy); PRINT_CAP(PUT_ZCOPY, iface_attr.cap.flags, iface_attr.cap.put.max_zcopy); PRINT_CAP(GET_BCOPY, iface_attr.cap.flags, iface_attr.cap.get.max_bcopy); PRINT_CAP(GET_ZCOPY, iface_attr.cap.flags, iface_attr.cap.get.max_zcopy); PRINT_CAP(AM_SHORT, iface_attr.cap.flags, iface_attr.cap.am.max_short); PRINT_CAP(AM_BCOPY, iface_attr.cap.flags, iface_attr.cap.am.max_bcopy); PRINT_CAP(AM_ZCOPY, iface_attr.cap.flags, iface_attr.cap.am.max_zcopy); if (iface_attr.cap.flags & (UCT_IFACE_FLAG_AM_BCOPY|UCT_IFACE_FLAG_AM_ZCOPY)) { printf("# am header: %s\n", size_limit_to_str(iface_attr.cap.am.max_hdr)); } PRINT_ATOMIC_CAP(ATOMIC_ADD, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_FADD, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_SWAP, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_CSWAP, iface_attr.cap.flags); buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_CONNECT_TO_EP | UCT_IFACE_FLAG_CONNECT_TO_IFACE)) { if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { strncat(buf, " to ep,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { strncat(buf, " to iface,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# connection:%s\n", buf); printf("# device address: %zu bytes\n", iface_attr.device_addr_len); if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { printf("# iface address: %zu bytes\n", iface_attr.iface_addr_len); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { printf("# ep address: %zu bytes\n", iface_attr.ep_addr_len); } buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_AM_ID | UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM | UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)) { if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF)) { strncat(buf, " buffer (", sizeof(buf) - 1); if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF) { strncat(buf, "short,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF) { strncat(buf, "bcopy,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF) { strncat(buf, "zcopy,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; strncat(buf, "),", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_AM_ID) { strncat(buf, " active-message id,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM) { strncat(buf, " remote access,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE) { strncat(buf, " peer failure,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# error handling:%s\n", buf); } uct_iface_close(iface); printf("#\n"); }
static void print_md_info(const char *md_name, int print_opts, ucs_config_print_flags_t print_flags, const char *req_tl_name) { uct_tl_resource_desc_t *resources, tmp; unsigned resource_index, j, num_resources, count; ucs_status_t status; const char *tl_name; uct_md_config_t *md_config; uct_md_attr_t md_attr; uct_md_h md; status = uct_md_config_read(md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto out; } status = uct_md_open(md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { printf("# < failed to open memory domain %s >\n", md_name); goto out; } status = uct_md_query_tl_resources(md, &resources, &num_resources); if (status != UCS_OK) { printf("# < failed to query memory domain resources >\n"); goto out_close_md; } if (req_tl_name != NULL) { resource_index = 0; while (resource_index < num_resources) { if (!strcmp(resources[resource_index].tl_name, req_tl_name)) { break; } ++resource_index; } if (resource_index == num_resources) { /* no selected transport on the MD */ goto out_free_list; } } status = uct_md_query(md, &md_attr); if (status != UCS_OK) { printf("# < failed to query memory domain >\n"); goto out_free_list; } else { printf("#\n"); printf("# Memory domain: %s\n", md_name); printf("# component: %s\n", md_attr.component_name); if (md_attr.cap.flags & UCT_MD_FLAG_ALLOC) { printf("# allocate: %s\n", size_limit_to_str(md_attr.cap.max_alloc)); } if (md_attr.cap.flags & UCT_MD_FLAG_REG) { printf("# register: %s, cost: %.0f", size_limit_to_str(md_attr.cap.max_reg), md_attr.reg_cost.overhead * 1e9); if (md_attr.reg_cost.growth * 1e9 > 1e-3) { printf("+(%.3f*<SIZE>)", md_attr.reg_cost.growth * 1e9); } printf(" nsec\n"); } printf("# remote key: %zu bytes\n", md_attr.rkey_packed_size); } if (num_resources == 0) { printf("# < no supported devices found >\n"); goto out_free_list; } resource_index = 0; while (resource_index < num_resources) { /* Gather all resources for this transport */ tl_name = resources[resource_index].tl_name; count = 1; for (j = resource_index + 1; j < num_resources; ++j) { if (!strcmp(tl_name, resources[j].tl_name)) { tmp = resources[count + resource_index]; resources[count + resource_index] = resources[j]; resources[j] = tmp; ++count; } } if ((req_tl_name == NULL) || !strcmp(tl_name, req_tl_name)) { print_tl_info(md, tl_name, &resources[resource_index], count, print_opts, print_flags); } resource_index += count; } out_free_list: uct_release_tl_resource_list(resources); out_close_md: uct_md_close(md); out: ; }
static void print_iface_info(uct_worker_h worker, uct_md_h md, uct_tl_resource_desc_t *resource) { uct_iface_config_t *iface_config; uct_iface_attr_t iface_attr; ucs_status_t status; uct_iface_h iface; char buf[200] = {0}; uct_iface_params_t iface_params = { .tl_name = resource->tl_name, .dev_name = resource->dev_name, .rx_headroom = 0 }; status = uct_iface_config_read(resource->tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { return; } printf("# Device: %s\n", resource->dev_name); status = uct_iface_open(md, worker, &iface_params, iface_config, &iface); uct_config_release(iface_config); if (status != UCS_OK) { printf("# < failed to open interface >\n"); return; } printf("#\n"); printf("# capabilities:\n"); status = uct_iface_query(iface, &iface_attr); if (status != UCS_OK) { printf("# < failed to query interface >\n"); } else { printf("# bandwidth: %-.2f MB/sec\n", iface_attr.bandwidth / (1024 * 1024)); printf("# latency: %-.0f nsec\n", iface_attr.latency * 1e9); printf("# overhead: %-.0f nsec\n", iface_attr.overhead * 1e9); PRINT_CAP(PUT_SHORT, iface_attr.cap.flags, iface_attr.cap.put.max_short); PRINT_CAP(PUT_BCOPY, iface_attr.cap.flags, iface_attr.cap.put.max_bcopy); PRINT_CAP(PUT_ZCOPY, iface_attr.cap.flags, iface_attr.cap.put.max_zcopy); PRINT_CAP(GET_BCOPY, iface_attr.cap.flags, iface_attr.cap.get.max_bcopy); PRINT_CAP(GET_ZCOPY, iface_attr.cap.flags, iface_attr.cap.get.max_zcopy); PRINT_CAP(AM_SHORT, iface_attr.cap.flags, iface_attr.cap.am.max_short); PRINT_CAP(AM_BCOPY, iface_attr.cap.flags, iface_attr.cap.am.max_bcopy); PRINT_CAP(AM_ZCOPY, iface_attr.cap.flags, iface_attr.cap.am.max_zcopy); if (iface_attr.cap.flags & (UCT_IFACE_FLAG_AM_BCOPY|UCT_IFACE_FLAG_AM_ZCOPY)) { printf("# am header: %s\n", size_limit_to_str(iface_attr.cap.am.max_hdr)); } PRINT_ATOMIC_CAP(ATOMIC_ADD, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_FADD, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_SWAP, iface_attr.cap.flags); PRINT_ATOMIC_CAP(ATOMIC_CSWAP, iface_attr.cap.flags); buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_CONNECT_TO_EP | UCT_IFACE_FLAG_CONNECT_TO_IFACE)) { if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { strncat(buf, " to ep,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { strncat(buf, " to iface,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# connection:%s\n", buf); printf("# device address: %zu bytes\n", iface_attr.device_addr_len); if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { printf("# iface address: %zu bytes\n", iface_attr.iface_addr_len); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { printf("# ep address: %zu bytes\n", iface_attr.ep_addr_len); } buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_AM_ID | UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM | UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)) { if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF)) { strncat(buf, " buffer (", sizeof(buf) - 1); if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF) { strncat(buf, "short,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF) { strncat(buf, "bcopy,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF) { strncat(buf, "zcopy,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; strncat(buf, "),", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_AM_ID) { strncat(buf, " active-message id,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM) { strncat(buf, " remote access,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE) { strncat(buf, " peer failure,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# error handling:%s\n", buf); } uct_iface_close(iface); printf("#\n"); } static ucs_status_t print_tl_info(uct_md_h md, const char *tl_name, uct_tl_resource_desc_t *resources, unsigned num_resources, int print_opts, ucs_config_print_flags_t print_flags) { ucs_async_context_t async; uct_worker_h worker; ucs_status_t status; unsigned i; status = ucs_async_context_init(&async, UCS_ASYNC_MODE_THREAD); if (status != UCS_OK) { return status; } /* coverity[alloc_arg] */ status = uct_worker_create(&async, UCS_THREAD_MODE_MULTI, &worker); if (status != UCS_OK) { goto out; } printf("#\n"); printf("# Transport: %s\n", tl_name); printf("#\n"); if (num_resources == 0) { printf("# (No supported devices found)\n"); } for (i = 0; i < num_resources; ++i) { ucs_assert(!strcmp(tl_name, resources[i].tl_name)); print_iface_info(worker, md, &resources[i]); } uct_worker_destroy(worker); out: ucs_async_context_cleanup(&async); return status; }
static void print_iface_info(uct_worker_h worker, uct_md_h md, uct_tl_resource_desc_t *resource) { uct_iface_config_t *iface_config; uct_iface_attr_t iface_attr; ucs_status_t status; uct_iface_h iface; char buf[200] = {0}; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_DEVICE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = resource->tl_name, .mode.device.dev_name = resource->dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = uct_md_iface_config_read(md, resource->tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { return; } printf("# Device: %s\n", resource->dev_name); status = uct_iface_open(md, worker, &iface_params, iface_config, &iface); uct_config_release(iface_config); if (status != UCS_OK) { printf("# < failed to open interface >\n"); return; } printf("#\n"); printf("# capabilities:\n"); status = uct_iface_query(iface, &iface_attr); if (status != UCS_OK) { printf("# < failed to query interface >\n"); } else { printf("# bandwidth: %-.2f MB/sec\n", iface_attr.bandwidth / UCS_MBYTE); printf("# latency: %-.0f nsec", iface_attr.latency.overhead * 1e9); if (iface_attr.latency.growth > 0) { printf(" + %.0f * N\n", iface_attr.latency.growth * 1e9); } else { printf("\n"); } printf("# overhead: %-.0f nsec\n", iface_attr.overhead * 1e9); PRINT_CAP(PUT_SHORT, iface_attr.cap.flags, iface_attr.cap.put.max_short); PRINT_CAP(PUT_BCOPY, iface_attr.cap.flags, iface_attr.cap.put.max_bcopy); PRINT_ZCAP(PUT_ZCOPY, iface_attr.cap.flags, iface_attr.cap.put.min_zcopy, iface_attr.cap.put.max_zcopy, iface_attr.cap.put.max_iov); if (iface_attr.cap.flags & UCT_IFACE_FLAG_PUT_ZCOPY) { printf("# put_opt_zcopy_align: %s\n", size_limit_to_str(0, iface_attr.cap.put.opt_zcopy_align)); printf("# put_align_mtu: %s\n", size_limit_to_str(0, iface_attr.cap.put.align_mtu)); } PRINT_CAP(GET_SHORT, iface_attr.cap.flags, iface_attr.cap.get.max_short); PRINT_CAP(GET_BCOPY, iface_attr.cap.flags, iface_attr.cap.get.max_bcopy); PRINT_ZCAP(GET_ZCOPY, iface_attr.cap.flags, iface_attr.cap.get.min_zcopy, iface_attr.cap.get.max_zcopy, iface_attr.cap.get.max_iov); if (iface_attr.cap.flags & UCT_IFACE_FLAG_GET_ZCOPY) { printf("# get_opt_zcopy_align: %s\n", size_limit_to_str(0, iface_attr.cap.get.opt_zcopy_align)); printf("# get_align_mtu: %s\n", size_limit_to_str(0, iface_attr.cap.get.align_mtu)); } PRINT_CAP(AM_SHORT, iface_attr.cap.flags, iface_attr.cap.am.max_short); PRINT_CAP(AM_BCOPY, iface_attr.cap.flags, iface_attr.cap.am.max_bcopy); PRINT_ZCAP(AM_ZCOPY, iface_attr.cap.flags, iface_attr.cap.am.min_zcopy, iface_attr.cap.am.max_zcopy, iface_attr.cap.am.max_iov); if (iface_attr.cap.flags & UCT_IFACE_FLAG_AM_ZCOPY) { printf("# am_opt_zcopy_align: %s\n", size_limit_to_str(0, iface_attr.cap.am.opt_zcopy_align)); printf("# am_align_mtu: %s\n", size_limit_to_str(0, iface_attr.cap.am.align_mtu)); printf("# am header: %s\n", size_limit_to_str(0, iface_attr.cap.am.max_hdr)); } PRINT_CAP(TAG_EAGER_SHORT, iface_attr.cap.flags, iface_attr.cap.tag.eager.max_short); PRINT_CAP(TAG_EAGER_BCOPY, iface_attr.cap.flags, iface_attr.cap.tag.eager.max_bcopy); PRINT_ZCAP(TAG_EAGER_ZCOPY, iface_attr.cap.flags, 0, iface_attr.cap.tag.eager.max_zcopy, iface_attr.cap.tag.eager.max_iov); if (iface_attr.cap.flags & UCT_IFACE_FLAG_TAG_RNDV_ZCOPY) { PRINT_ZCAP_NO_CHECK(TAG_RNDV_ZCOPY, 0, iface_attr.cap.tag.rndv.max_zcopy, iface_attr.cap.tag.rndv.max_iov); printf("# rndv private header: %s\n", size_limit_to_str(0, iface_attr.cap.tag.rndv.max_hdr)); } if (iface_attr.cap.flags & (UCT_IFACE_FLAG_TAG_EAGER_SHORT | UCT_IFACE_FLAG_TAG_EAGER_BCOPY | UCT_IFACE_FLAG_TAG_EAGER_ZCOPY | UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)) { PRINT_ZCAP_NO_CHECK(TAG_RECV, iface_attr.cap.tag.recv.min_recv, iface_attr.cap.tag.recv.max_zcopy, iface_attr.cap.tag.recv.max_iov); printf("# tag_max_outstanding: %s\n", size_limit_to_str(0, iface_attr.cap.tag.recv.max_outstanding)); } if (iface_attr.cap.atomic32.op_flags || iface_attr.cap.atomic64.op_flags || iface_attr.cap.atomic32.fop_flags || iface_attr.cap.atomic64.fop_flags) { if (iface_attr.cap.flags & UCT_IFACE_FLAG_ATOMIC_DEVICE) { printf("# domain: device\n"); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_ATOMIC_CPU) { printf("# domain: cpu\n"); } PRINT_ATOMIC_POST(ADD, iface_attr.cap); PRINT_ATOMIC_POST(AND, iface_attr.cap); PRINT_ATOMIC_POST(OR, iface_attr.cap); PRINT_ATOMIC_POST(XOR, iface_attr.cap); PRINT_ATOMIC_FETCH(ADD, iface_attr.cap, "f"); PRINT_ATOMIC_FETCH(AND, iface_attr.cap, "f"); PRINT_ATOMIC_FETCH(OR, iface_attr.cap, "f"); PRINT_ATOMIC_FETCH(XOR, iface_attr.cap, "f"); PRINT_ATOMIC_FETCH(SWAP , iface_attr.cap, ""); PRINT_ATOMIC_FETCH(CSWAP, iface_attr.cap, ""); } buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_CONNECT_TO_EP | UCT_IFACE_FLAG_CONNECT_TO_IFACE)) { if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { strncat(buf, " to ep,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { strncat(buf, " to iface,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# connection:%s\n", buf); printf("# priority: %d\n", iface_attr.priority); printf("# device address: %zu bytes\n", iface_attr.device_addr_len); if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { printf("# iface address: %zu bytes\n", iface_attr.iface_addr_len); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { printf("# ep address: %zu bytes\n", iface_attr.ep_addr_len); } buf[0] = '\0'; if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_AM_ID | UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM | UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)) { if (iface_attr.cap.flags & (UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF | UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF | UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF)) { strncat(buf, " buffer (", sizeof(buf) - 1); if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_SHORT_BUF) { strncat(buf, "short,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_BCOPY_BUF) { strncat(buf, "bcopy,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_ZCOPY_BUF) { strncat(buf, "zcopy,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; strncat(buf, "),", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_AM_ID) { strncat(buf, " active-message id,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_REMOTE_MEM) { strncat(buf, " remote access,", sizeof(buf) - 1); } if (iface_attr.cap.flags & UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE) { strncat(buf, " peer failure,", sizeof(buf) - 1); } buf[strlen(buf) - 1] = '\0'; } else { strncat(buf, " none", sizeof(buf) - 1); } printf("# error handling:%s\n", buf); } uct_iface_close(iface); printf("#\n"); } static ucs_status_t print_tl_info(uct_md_h md, const char *tl_name, uct_tl_resource_desc_t *resources, unsigned num_resources, int print_opts, ucs_config_print_flags_t print_flags) { ucs_async_context_t async; uct_worker_h worker; ucs_status_t status; unsigned i; status = ucs_async_context_init(&async, UCS_ASYNC_THREAD_LOCK_TYPE); if (status != UCS_OK) { return status; } /* coverity[alloc_arg] */ status = uct_worker_create(&async, UCS_THREAD_MODE_SINGLE, &worker); if (status != UCS_OK) { goto out; } printf("#\n"); printf("# Transport: %s\n", tl_name); printf("#\n"); if (num_resources == 0) { printf("# (No supported devices found)\n"); } for (i = 0; i < num_resources; ++i) { ucs_assert(!strcmp(tl_name, resources[i].tl_name)); print_iface_info(worker, md, &resources[i]); } uct_worker_destroy(worker); out: ucs_async_context_cleanup(&async); return status; }