static void ucp_address_memchek(void *ptr, size_t size, const uct_tl_resource_desc_t *rsc) { void *undef_ptr; undef_ptr = (void*)VALGRIND_CHECK_MEM_IS_DEFINED(ptr, size); if (undef_ptr != NULL) { ucs_error(UCT_TL_RESOURCE_DESC_FMT " address contains undefined bytes at offset %zd", UCT_TL_RESOURCE_DESC_ARG(rsc), undef_ptr - ptr); } }
static int ucp_is_resource_enabled(uct_tl_resource_desc_t *resource, const ucp_config_t *config, uint64_t *devices_mask_p) { int device_enabled, tl_enabled; unsigned config_idx; ucs_assert(config->devices.count > 0); if (!strcmp(config->devices.names[0], "all")) { /* if the user's list is 'all', use all the available resources */ device_enabled = 1; *devices_mask_p = 1; } else { /* go over the device list from the user and check (against the available resources) * which can be satisfied */ device_enabled = 0; *devices_mask_p = 0; ucs_assert_always(config->devices.count <= 64); /* Using uint64_t bitmap */ for (config_idx = 0; config_idx < config->devices.count; ++config_idx) { if (!strcmp(config->devices.names[config_idx], resource->dev_name)) { device_enabled = 1; *devices_mask_p |= UCS_MASK(config_idx); } } } /* Disable the posix mmap and xpmem 'devices'. ONLY for now - use sysv for mm . * This will be removed after multi-rail is supported */ if (!strcmp(resource->dev_name,"posix") || !strcmp(resource->dev_name, "xpmem")) { device_enabled = 0; } ucs_assert(config->tls.count > 0); if (!strcmp(config->tls.names[0], "all")) { /* if the user's list is 'all', use all the available tls */ tl_enabled = 1; } else { /* go over the tls list from the user and compare it against the available resources */ tl_enabled = 0; for (config_idx = 0; config_idx < config->tls.count; ++config_idx) { if (!strcmp(config->tls.names[config_idx], resource->tl_name)) { tl_enabled = 1; break; } } } ucs_trace(UCT_TL_RESOURCE_DESC_FMT " is %sabled", UCT_TL_RESOURCE_DESC_ARG(resource), (device_enabled && tl_enabled) ? "en" : "dis"); return device_enabled && tl_enabled; }
static int ucp_wireup_check_flags(const uct_tl_resource_desc_t *resource, uint64_t flags, uint64_t required_flags, const char *title, const char ** flag_descs, char *reason, size_t max) { const char *missing_flag_desc; if (ucs_test_all_flags(flags, required_flags)) { return 1; } if (required_flags) { missing_flag_desc = ucp_wireup_get_missing_flag_desc(flags, required_flags, flag_descs); ucs_trace(UCT_TL_RESOURCE_DESC_FMT " : not suitable for %s, no %s", UCT_TL_RESOURCE_DESC_ARG(resource), title, missing_flag_desc); snprintf(reason, max, UCT_TL_RESOURCE_DESC_FMT" - no %s", UCT_TL_RESOURCE_DESC_ARG(resource), missing_flag_desc); } return 0; }
static void ucp_wireup_msg_dump(ucp_worker_h worker, uct_am_trace_type_t type, uint8_t id, const void *data, size_t length, char *buffer, size_t max) { ucp_context_h context = worker->context; const ucp_wireup_msg_t *msg = data; char peer_name[UCP_WORKER_NAME_MAX + 1]; ucp_address_entry_t *address_list, *ae; ucp_tl_resource_desc_t *rsc; unsigned address_count; ucp_lane_index_t lane; uint64_t uuid; char *p, *end; ucp_address_unpack(msg + 1, &uuid, peer_name, sizeof(peer_name), &address_count, &address_list); p = buffer; end = buffer + max; snprintf(p, end - p, "WIREUP %s [%s uuid 0x%"PRIx64"]", (msg->type == UCP_WIREUP_MSG_REQUEST ) ? "REQ" : (msg->type == UCP_WIREUP_MSG_REPLY ) ? "REP" : (msg->type == UCP_WIREUP_MSG_ACK ) ? "ACK" : "", peer_name, uuid); p += strlen(p); for (ae = address_list; ae < address_list + address_count; ++ae) { for (rsc = context->tl_rscs; rsc < context->tl_rscs + context->num_tls; ++rsc) { if (ae->tl_name_csum == rsc->tl_name_csum) { snprintf(p, end - p, " "UCT_TL_RESOURCE_DESC_FMT, UCT_TL_RESOURCE_DESC_ARG(&rsc->tl_rsc)); p += strlen(p); break; } } snprintf(p, end - p, "/md[%d]", ae->md_index); p += strlen(p); for (lane = 0; lane < UCP_MAX_LANES; ++lane) { if (msg->tli[lane] == (ae - address_list)) { snprintf(p, end - p, "/lane[%d]", lane); p += strlen(p); } } } ucs_free(address_list); }
static int ucp_is_resource_enabled(uct_tl_resource_desc_t *resource, const ucp_config_t *config, uint64_t *masks) { int device_enabled, tl_enabled; ucp_tl_alias_t *alias; /* Find the enabled devices */ device_enabled = ucp_is_resource_in_device_list(resource, config->devices, masks, resource->dev_type); /* Find the enabled UCTs */ ucs_assert(config->tls.count > 0); if (ucp_config_is_tl_enabled(config, resource->tl_name, 0)) { tl_enabled = 1; } else { tl_enabled = 0; /* check aliases */ for (alias = ucp_tl_aliases; alias->alias != NULL; ++alias) { /* If an alias is enabled, and the transport is part of this alias, * enable the transport. */ if (ucp_config_is_tl_enabled(config, alias->alias, 1) && (ucp_str_array_search(alias->tls, ucp_tl_alias_count(alias), resource->tl_name) >= 0)) { tl_enabled = 1; ucs_trace("enabling tl '%s' for alias '%s'", resource->tl_name, alias->alias); break; } } } ucs_trace(UCT_TL_RESOURCE_DESC_FMT " is %sabled", UCT_TL_RESOURCE_DESC_ARG(resource), (device_enabled && tl_enabled) ? "en" : "dis"); return device_enabled && tl_enabled; }
void ucp_context_print_info(ucp_context_h context, FILE *stream) { ucp_rsc_index_t md_index, rsc_index; fprintf(stream, "#\n"); fprintf(stream, "# UCP context\n"); fprintf(stream, "#\n"); for (md_index = 0; md_index < context->num_mds; ++md_index) { fprintf(stream, "# md[%d]: %s\n", md_index, context->md_rscs[md_index].md_name); } fprintf(stream, "#\n"); for (rsc_index = 0; rsc_index < context->num_tls; ++rsc_index) { fprintf(stream, "# rsc[%2d] / md[%d]: "UCT_TL_RESOURCE_DESC_FMT"\n", rsc_index, context->tl_rscs[rsc_index].md_index, UCT_TL_RESOURCE_DESC_ARG(&context->tl_rscs[rsc_index].tl_rsc) ); } fprintf(stream, "#\n"); }
static ucs_status_t ucp_pick_best_wireup(ucp_worker_h worker, ucp_address_t *address, ucp_wireup_score_function_t score_func, ucp_rsc_index_t *src_rsc_index_p, ucp_rsc_index_t *dst_rsc_index_p, ucp_rsc_index_t *dst_pd_index_p, struct sockaddr **addr_p, uint64_t *reachable_pds, const char *title) { ucp_context_h context = worker->context; ucp_rsc_index_t src_rsc_index, dst_rsc_index; ucp_rsc_index_t pd_index; struct sockaddr *addr, *best_addr; double score, best_score; uct_iface_attr_t *iface_attr; uct_tl_resource_desc_t *resource; char tl_name[UCT_TL_NAME_MAX]; uct_iface_h iface; void *iter; best_addr = NULL; best_score = 1e-9; *src_rsc_index_p = -1; *dst_rsc_index_p = -1; *dst_pd_index_p = -1; *reachable_pds = 0; /* * Find the best combination of local resource and reachable remote address. */ dst_rsc_index = 0; ucp_address_iter_init(address, &iter); while (ucp_address_iter_next(&iter, &addr, tl_name, &pd_index)) { for (src_rsc_index = 0; src_rsc_index < context->num_tls; ++src_rsc_index) { resource = &context->tl_rscs[src_rsc_index].tl_rsc; iface = worker->ifaces[src_rsc_index]; iface_attr = &worker->iface_attrs[src_rsc_index]; /* Must be reachable address, on same transport */ if (strcmp(tl_name, resource->tl_name) || !uct_iface_is_reachable(iface, addr)) { continue; } *reachable_pds |= UCS_BIT(pd_index); score = score_func(resource, iface, iface_attr); ucs_trace("%s " UCT_TL_RESOURCE_DESC_FMT " score %.2f", title, UCT_TL_RESOURCE_DESC_ARG(resource), score); if (score > best_score) { ucs_assert(addr != NULL); best_score = score; best_addr = addr; *src_rsc_index_p = src_rsc_index; *dst_rsc_index_p = dst_rsc_index; *dst_pd_index_p = pd_index; } } ++dst_rsc_index; } if (best_addr == NULL) { return UCS_ERR_UNREACHABLE; } ucs_debug("%s: " UCT_TL_RESOURCE_DESC_FMT " to %d pd %d", title, UCT_TL_RESOURCE_DESC_ARG(&context->tl_rscs[*src_rsc_index_p].tl_rsc), *dst_rsc_index_p, *dst_pd_index_p); *addr_p = best_addr; return UCS_OK; }
static void ucp_wireup_print_config(ucp_context_h context, const ucp_ep_config_key_t *key, const char *title, uint8_t *addr_indices) { char lane_info[128], *p, *endp; ucp_lane_index_t lane, amo_index; ucp_rsc_index_t rsc_index; ucp_md_map_t md_map; if (!ucs_log_enabled(UCS_LOG_LEVEL_DEBUG)) { return; } ucs_debug("%s: am_lane %d wirep_lane %d rma_lane_map 0x%"PRIx64 " amo_lane_map 0x%"PRIx64" reachable_mds 0x%x", title, key->am_lane, key->wireup_msg_lane, key->rma_lane_map, key->amo_lane_map, key->reachable_md_map); for (lane = 0; lane < key->num_lanes; ++lane) { p = lane_info; endp = lane_info + sizeof(lane_info); rsc_index = key->lanes[lane]; if (addr_indices != NULL) { snprintf(p, endp - p, "->addr[%d] ", addr_indices[lane]); p += strlen(p); } if (key->am_lane == lane) { snprintf(p, endp - p, "[am]"); p += strlen(p); } md_map = ucp_lane_map_get_lane(key->rma_lane_map, lane); if (md_map) { snprintf(p, endp - p, "[rma->md%d]", ucs_ffs64(md_map)); p += strlen(p); } amo_index = ucp_ep_get_amo_lane_index(key, lane); if (amo_index != UCP_NULL_LANE) { md_map = ucp_lane_map_get_lane(key->amo_lane_map, amo_index); if (md_map) { snprintf(p, endp - p, "[amo[%d]->md%d]", amo_index, ucs_ffs64(md_map)); p += strlen(p); } } if (key->wireup_msg_lane == lane) { snprintf(p, endp - p, "[wireup]"); p += strlen(p); } ucs_debug("%s: lane[%d] using rsc[%d] "UCT_TL_RESOURCE_DESC_FMT " %s", title, lane, rsc_index, UCT_TL_RESOURCE_DESC_ARG(&context->tl_rscs[rsc_index].tl_rsc), lane_info); } }
/** * Select a local and remote transport */ static UCS_F_NOINLINE ucs_status_t ucp_wireup_select_transport(ucp_ep_h ep, const ucp_address_entry_t *address_list, unsigned address_count, const ucp_wireup_criteria_t *criteria, uint64_t remote_md_map, int show_error, ucp_rsc_index_t *rsc_index_p, unsigned *dst_addr_index_p, double *score_p) { ucp_worker_h worker = ep->worker; ucp_context_h context = worker->context; uct_tl_resource_desc_t *resource; const ucp_address_entry_t *ae; ucp_rsc_index_t rsc_index; double score, best_score; char tls_info[256]; char *p, *endp; uct_iface_attr_t *iface_attr; uct_md_attr_t *md_attr; uint64_t addr_index_map; unsigned addr_index; int reachable; int found; found = 0; best_score = 0.0; p = tls_info; endp = tls_info + sizeof(tls_info) - 1; tls_info[0] = '\0'; /* Check which remote addresses satisfy the criteria */ addr_index_map = 0; for (ae = address_list; ae < address_list + address_count; ++ae) { addr_index = ae - address_list; if (!(remote_md_map & UCS_BIT(ae->md_index))) { ucs_trace("addr[%d]: not in use, because on md[%d]", addr_index, ae->md_index); continue; } if (!ucs_test_all_flags(ae->md_flags, criteria->remote_md_flags)) { ucs_trace("addr[%d]: no %s", addr_index, ucp_wireup_get_missing_flag_desc(ae->md_flags, criteria->remote_md_flags, ucp_wireup_md_flags)); continue; } if (!ucs_test_all_flags(ae->iface_attr.cap_flags, criteria->remote_iface_flags)) { ucs_trace("addr[%d]: no %s", addr_index, ucp_wireup_get_missing_flag_desc(ae->iface_attr.cap_flags, criteria->remote_iface_flags, ucp_wireup_iface_flags)); continue; } addr_index_map |= UCS_BIT(addr_index); } /* For each local resource try to find the best remote address to connect to. * Pick the best local resource to satisfy the criteria. * best one has the highest score (from the dedicated score_func) and * has a reachable tl on the remote peer */ for (rsc_index = 0; rsc_index < context->num_tls; ++rsc_index) { resource = &context->tl_rscs[rsc_index].tl_rsc; iface_attr = &worker->iface_attrs[rsc_index]; md_attr = &context->md_attrs[context->tl_rscs[rsc_index].md_index]; /* Check that local md and interface satisfy the criteria */ if (!ucp_wireup_check_flags(resource, md_attr->cap.flags, criteria->local_md_flags, criteria->title, ucp_wireup_md_flags, p, endp - p) || !ucp_wireup_check_flags(resource, iface_attr->cap.flags, criteria->local_iface_flags, criteria->title, ucp_wireup_iface_flags, p, endp - p)) { p += strlen(p); snprintf(p, endp - p, ", "); p += strlen(p); continue; } reachable = 0; for (ae = address_list; ae < address_list + address_count; ++ae) { if (!(addr_index_map & UCS_BIT(ae - address_list)) || !ucp_wireup_is_reachable(worker, rsc_index, ae)) { /* Must be reachable device address, on same transport */ continue; } reachable = 1; score = criteria->calc_score(md_attr, iface_attr, &ae->iface_attr); ucs_assert(score >= 0.0); ucs_trace(UCT_TL_RESOURCE_DESC_FMT "->addr[%zd] : %s score %.2f", UCT_TL_RESOURCE_DESC_ARG(resource), ae - address_list, criteria->title, score); if (!found || (score > best_score)) { *rsc_index_p = rsc_index; *dst_addr_index_p = ae - address_list; *score_p = score; best_score = score; found = 1; } } /* If a local resource cannot reach any of the remote addresses, generate * debug message. */ if (!reachable) { snprintf(p, endp - p, UCT_TL_RESOURCE_DESC_FMT" - cannot reach remote worker, ", UCT_TL_RESOURCE_DESC_ARG(resource)); p += strlen(p); } } if (p >= tls_info + 2) { *(p - 2) = '\0'; /* trim last "," */ } if (!found) { if (show_error) { ucs_error("No %s transport to %s: %s", criteria->title, ucp_ep_peer_name(ep), tls_info); } return UCS_ERR_UNREACHABLE; } ucs_trace("ep %p: selected for %s: " UCT_TL_RESOURCE_DESC_FMT " -> '%s' address[%d],md[%d] score %.2f", ep, criteria->title, UCT_TL_RESOURCE_DESC_ARG(&context->tl_rscs[*rsc_index_p].tl_rsc), ucp_ep_peer_name(ep), *dst_addr_index_p, address_list[*dst_addr_index_p].md_index, best_score); return UCS_OK; }
static ucs_status_t ucp_address_do_pack(ucp_worker_h worker, ucp_ep_h ep, void *buffer, size_t size, uint64_t tl_bitmap, unsigned *order, const ucp_address_packed_device_t *devices, ucp_rsc_index_t num_devices) { ucp_context_h context = worker->context; const ucp_address_packed_device_t *dev; uct_iface_attr_t *iface_attr; ucp_rsc_index_t md_index; ucs_status_t status; ucp_rsc_index_t i; size_t iface_addr_len; size_t ep_addr_len; uint64_t md_flags; unsigned index; void *ptr; uint8_t *iface_addr_len_ptr; ptr = buffer; index = 0; *(uint64_t*)ptr = worker->uuid; ptr += sizeof(uint64_t); ptr = ucp_address_pack_string(ucp_worker_get_name(worker), ptr); if (num_devices == 0) { *((uint8_t*)ptr) = UCP_NULL_RESOURCE; ++ptr; goto out; } for (dev = devices; dev < devices + num_devices; ++dev) { /* MD index */ md_index = context->tl_rscs[dev->rsc_index].md_index; md_flags = context->tl_mds[md_index].attr.cap.flags; ucs_assert_always(!(md_index & ~UCP_ADDRESS_FLAG_MD_MASK)); *(uint8_t*)ptr = md_index | ((dev->tl_bitmap == 0) ? UCP_ADDRESS_FLAG_EMPTY : 0) | ((md_flags & UCT_MD_FLAG_ALLOC) ? UCP_ADDRESS_FLAG_MD_ALLOC : 0) | ((md_flags & UCT_MD_FLAG_REG) ? UCP_ADDRESS_FLAG_MD_REG : 0); ++ptr; /* Device address length */ ucs_assert(dev->dev_addr_len < UCP_ADDRESS_FLAG_LAST); *(uint8_t*)ptr = dev->dev_addr_len | ((dev == (devices + num_devices - 1)) ? UCP_ADDRESS_FLAG_LAST : 0); ++ptr; /* Device address */ status = uct_iface_get_device_address(worker->ifaces[dev->rsc_index].iface, (uct_device_addr_t*)ptr); if (status != UCS_OK) { return status; } ucp_address_memchek(ptr, dev->dev_addr_len, &context->tl_rscs[dev->rsc_index].tl_rsc); ptr += dev->dev_addr_len; for (i = 0; i < context->num_tls; ++i) { if (!(UCS_BIT(i) & dev->tl_bitmap)) { continue; } /* Transport name checksum */ *(uint16_t*)ptr = context->tl_rscs[i].tl_name_csum; ptr += sizeof(uint16_t); /* Transport information */ ucp_address_pack_iface_attr(ptr, &worker->ifaces[i].attr, worker->atomic_tls & UCS_BIT(i)); ucp_address_memchek(ptr, sizeof(ucp_address_packed_iface_attr_t), &context->tl_rscs[dev->rsc_index].tl_rsc); ptr += sizeof(ucp_address_packed_iface_attr_t); iface_attr = &worker->ifaces[i].attr; if (!(iface_attr->cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) && !(iface_attr->cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP)) { return UCS_ERR_INVALID_ADDR; } /* Pack iface address */ iface_addr_len = iface_attr->iface_addr_len; ucs_assert(iface_addr_len < UCP_ADDRESS_FLAG_EP_ADDR); status = uct_iface_get_address(worker->ifaces[i].iface, (uct_iface_addr_t*)(ptr + 1)); if (status != UCS_OK) { return status; } ucp_address_memchek(ptr + 1, iface_addr_len, &context->tl_rscs[dev->rsc_index].tl_rsc); iface_addr_len_ptr = ptr; *iface_addr_len_ptr = iface_addr_len | ((i == ucs_ilog2(dev->tl_bitmap)) ? UCP_ADDRESS_FLAG_LAST : 0); ptr += 1 + iface_addr_len; /* Pack ep address if present */ if (!(iface_attr->cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) && (ep != NULL)) { *iface_addr_len_ptr |= UCP_ADDRESS_FLAG_EP_ADDR; ep_addr_len = iface_attr->ep_addr_len; ucs_assert(ep_addr_len < UINT8_MAX); *(uint8_t*)ptr = ep_addr_len; status = ucp_address_pack_ep_address(ep, i, ptr + 1); if (status != UCS_OK) { return status; } ucp_address_memchek(ptr + 1, ep_addr_len, &context->tl_rscs[dev->rsc_index].tl_rsc); ptr += 1 + ep_addr_len; } /* Save the address index of this transport */ if (order != NULL) { order[ucs_count_one_bits(tl_bitmap & UCS_MASK(i))] = index; } ucs_trace("pack addr[%d] : "UCT_TL_RESOURCE_DESC_FMT " md_flags 0x%"PRIx64" tl_flags 0x%"PRIx64" bw %e ovh %e " "lat_ovh: %e dev_priority %d", index, UCT_TL_RESOURCE_DESC_ARG(&context->tl_rscs[i].tl_rsc), md_flags, worker->ifaces[i].attr.cap.flags, worker->ifaces[i].attr.bandwidth, worker->ifaces[i].attr.overhead, worker->ifaces[i].attr.latency.overhead, worker->ifaces[i].attr.priority); ++index; } } out: ucs_assertv(buffer + size == ptr, "buffer=%p size=%zu ptr=%p ptr-buffer=%zd", buffer, size, ptr, ptr - buffer); return UCS_OK; }
static void ucp_ep_config_print(FILE *stream, ucp_worker_h worker, const ucp_ep_config_t *config, const uint8_t *addr_indices) { ucp_context_h context = worker->context; ucp_tl_resource_desc_t *rsc; ucp_rsc_index_t rsc_index; ucp_lane_index_t lane; ucp_md_map_t md_map; for (lane = 0; lane < config->key.num_lanes; ++lane) { rsc_index = config->key.lanes[lane]; rsc = &context->tl_rscs[rsc_index]; fprintf(stream, "# lane[%d]: %d:" UCT_TL_RESOURCE_DESC_FMT, lane, rsc_index, UCT_TL_RESOURCE_DESC_ARG(&rsc->tl_rsc)); if (addr_indices != NULL) { fprintf(stream, "->addr[%d] ", addr_indices[lane]); } fprintf(stream, " -"); if (lane == config->key.am_lane) { fprintf(stream, " am"); } md_map = ucp_ep_config_get_rma_md_map(&config->key, lane); if (md_map) { ucp_ep_config_print_md_map(stream, " rma", md_map); } md_map = ucp_ep_config_get_amo_md_map(&config->key, lane); if (md_map) { ucp_ep_config_print_md_map(stream, " amo", md_map); } if (lane == config->key.rndv_lane) { fprintf(stream, " rndv"); } if (lane == config->key.wireup_msg_lane) { fprintf(stream, " wireup"); } fprintf(stream, "\n"); } fprintf(stream, "#\n"); if (context->config.features & UCP_FEATURE_TAG) { ucp_ep_config_print_tag_proto(stream, "tag_send", config->max_eager_short, config->zcopy_thresh, config->rndv_thresh); ucp_ep_config_print_tag_proto(stream, "tag_send_sync", config->max_eager_short, config->sync_zcopy_thresh, config->sync_rndv_thresh); } if (context->config.features & UCP_FEATURE_RMA) { for (lane = 0; lane < config->key.num_lanes; ++lane) { if (!ucp_ep_config_get_rma_md_map(&config->key, lane)) { continue; } ucp_ep_config_print_rma_proto(stream, &config->rma[lane], config->bcopy_thresh); } } }
static int ucp_is_resource_enabled(uct_tl_resource_desc_t *resource, const ucp_config_t *config, uint64_t *devices_mask_p) { int device_enabled, tl_enabled; ucp_tl_alias_t *alias; int config_idx; ucs_assert(config->devices.count > 0); if (!strcmp(config->devices.names[0], "all")) { /* if the user's list is 'all', use all the available resources */ device_enabled = 1; *devices_mask_p = 1; } else { /* go over the device list from the user and check (against the available resources) * which can be satisfied */ device_enabled = 0; *devices_mask_p = 0; ucs_assert_always(config->devices.count <= 64); /* Using uint64_t bitmap */ config_idx = ucp_str_array_search((const char**)config->devices.names, config->devices.count, resource->dev_name); if (config_idx >= 0) { device_enabled = 1; *devices_mask_p |= UCS_MASK(config_idx); } } /* Disable the posix mmap and xpmem 'devices'. ONLY for now - use sysv for mm . * This will be removed after multi-rail is supported */ if (!strcmp(resource->dev_name,"posix") || !strcmp(resource->dev_name, "xpmem")) { device_enabled = 0; } ucs_assert(config->tls.count > 0); if (ucp_config_is_tl_enabled(config, resource->tl_name)) { tl_enabled = 1; } else { tl_enabled = 0; /* check aliases */ for (alias = ucp_tl_aliases; alias->alias != NULL; ++alias) { /* If an alias is enabled, and the transport is part of this alias, * enable the transport. */ if (ucp_config_is_tl_enabled(config, alias->alias) && (ucp_str_array_search(alias->tls, ucp_tl_alias_count(alias), resource->tl_name) >= 0)) { tl_enabled = 1; ucs_trace("enabling tl '%s' for alias '%s'", resource->tl_name, alias->alias); break; } } } ucs_trace(UCT_TL_RESOURCE_DESC_FMT " is %sabled", UCT_TL_RESOURCE_DESC_ARG(resource), (device_enabled && tl_enabled) ? "en" : "dis"); return device_enabled && tl_enabled; }