ucs_status_t uct_iface_mem_alloc(uct_iface_h tl_iface, size_t length, unsigned flags, const char *name, uct_allocated_memory_t *mem) { uct_base_iface_t *iface = ucs_derived_of(tl_iface, uct_base_iface_t); uct_md_attr_t md_attr; ucs_status_t status; status = uct_mem_alloc(NULL, length, UCT_MD_MEM_ACCESS_ALL, iface->config.alloc_methods, iface->config.num_alloc_methods, &iface->md, 1, name, mem); if (status != UCS_OK) { goto err; } /* If the memory was not allocated using MD, register it */ if (mem->method != UCT_ALLOC_METHOD_MD) { status = uct_md_query(iface->md, &md_attr); if (status != UCS_OK) { goto err_free; } /* If MD does not support registration, allow only the MD method */ if ((md_attr.cap.flags & UCT_MD_FLAG_REG) && (md_attr.cap.reg_mem_types & UCS_BIT(mem->mem_type))) { status = uct_md_mem_reg(iface->md, mem->address, mem->length, flags, &mem->memh); if (status != UCS_OK) { goto err_free; } ucs_assert(mem->memh != UCT_MEM_HANDLE_NULL); } else { mem->memh = UCT_MEM_HANDLE_NULL; } mem->md = iface->md; } return UCS_OK; err_free: uct_mem_free(mem); err: return status; }
ucs_status_t uct_iface_mem_alloc(uct_iface_h tl_iface, size_t length, const char *name, uct_allocated_memory_t *mem) { uct_base_iface_t *iface = ucs_derived_of(tl_iface, uct_base_iface_t); uct_md_attr_t md_attr; ucs_status_t status; status = uct_mem_alloc(length, iface->config.alloc_methods, iface->config.num_alloc_methods, &iface->md, 1, name, mem); if (status != UCS_OK) { goto err; } /* If the memory was not allocated using MD, register it */ if (mem->method != UCT_ALLOC_METHOD_MD) { status = uct_md_query(iface->md, &md_attr); if (status != UCS_OK) { goto err_free; } /* If MD does not support registration, allow only the MD method */ if (!(md_attr.cap.flags & UCT_MD_FLAG_REG)) { ucs_error("%s md does not support registration, so cannot use any allocation " "method except 'md'", iface->md->component->name); status = UCS_ERR_NO_MEMORY; goto err_free; } status = uct_md_mem_reg(iface->md, mem->address, mem->length, &mem->memh); if (status != UCS_OK) { goto err_free; } ucs_assert(mem->memh != UCT_INVALID_MEM_HANDLE); mem->md = iface->md; } return UCS_OK; err_free: uct_mem_free(mem); err: return status; }
static ucs_status_t ucp_fill_resources(ucp_context_h context, const ucp_config_t *config) { unsigned num_tl_resources; unsigned num_md_resources; uct_md_resource_desc_t *md_rscs; ucs_status_t status; ucp_rsc_index_t i; unsigned md_index; uct_md_h md; uct_md_config_t *md_config; uint64_t masks[UCT_DEVICE_TYPE_LAST] = {0}; /* if we got here then num_resources > 0. * if the user's device list is empty, there is no match */ if ((0 == config->devices[UCT_DEVICE_TYPE_NET].count) && (0 == config->devices[UCT_DEVICE_TYPE_SHM].count) && (0 == config->devices[UCT_DEVICE_TYPE_ACC].count) && (0 == config->devices[UCT_DEVICE_TYPE_SELF].count)) { ucs_error("The device lists are empty. Please specify the devices you would like to use " "or omit the UCX_*_DEVICES so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* if we got here then num_resources > 0. * if the user's tls list is empty, there is no match */ if (0 == config->tls.count) { ucs_error("The TLs list is empty. Please specify the transports you would like to use " "or omit the UCX_TLS so that the default will be used."); status = UCS_ERR_NO_ELEM; goto err; } /* List memory domain resources */ status = uct_query_md_resources(&md_rscs, &num_md_resources); if (status != UCS_OK) { goto err; } /* Sort md's by name, to increase the likelihood of reusing the same ep * configuration (since remote md map is part of the key). */ qsort(md_rscs, num_md_resources, sizeof(*md_rscs), ucp_md_rsc_compare_name); /* Error check: Make sure there is at least one MD */ if (num_md_resources == 0) { ucs_error("No md resources found"); status = UCS_ERR_NO_DEVICE; goto err_release_md_resources; } context->num_mds = 0; context->md_rscs = NULL; context->mds = NULL; context->md_attrs = NULL; context->num_tls = 0; context->tl_rscs = NULL; /* Allocate array of MD resources we would actually use */ context->md_rscs = ucs_calloc(num_md_resources, sizeof(*context->md_rscs), "ucp_md_resources"); if (context->md_rscs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of memory domains */ context->mds = ucs_calloc(num_md_resources, sizeof(*context->mds), "ucp_mds"); if (context->mds == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Allocate array of memory domains attributes */ context->md_attrs = ucs_calloc(num_md_resources, sizeof(*context->md_attrs), "ucp_md_attrs"); if (context->md_attrs == NULL) { status = UCS_ERR_NO_MEMORY; goto err_free_context_resources; } /* Open all memory domains, keep only those which have at least one TL * resources selected on them. */ md_index = 0; for (i = 0; i < num_md_resources; ++i) { status = uct_md_config_read(md_rscs[i].md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto err_free_context_resources; } status = uct_md_open(md_rscs[i].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto err_free_context_resources; } context->md_rscs[md_index] = md_rscs[i]; context->mds[md_index] = md; /* Save MD attributes */ status = uct_md_query(md, &context->md_attrs[md_index]); if (status != UCS_OK) { goto err_free_context_resources; } /* Add communication resources of each MD */ status = ucp_add_tl_resources(context, md, md_index, config, &num_tl_resources, masks); if (status != UCS_OK) { goto err_free_context_resources; } /* If the MD does not have transport resources, don't use it */ if (num_tl_resources > 0) { ++md_index; ++context->num_mds; } else { ucs_debug("closing md %s because it has no selected transport resources", md_rscs[i].md_name); uct_md_close(md); } } /* Error check: Make sure there is at least one transport */ if (0 == context->num_tls) { ucs_error("There are no available resources matching the configured criteria"); status = UCS_ERR_NO_DEVICE; goto err_free_context_resources; } if (context->num_mds > UCP_MD_INDEX_BITS) { ucs_error("Only up to %d memory domains are supported (have: %d)", UCP_MD_INDEX_BITS, context->num_mds); status = UCS_ERR_EXCEEDS_LIMIT; goto err_release_md_resources; } /* Notify the user if there are devices from the command line that are not available */ ucp_check_unavailable_devices(config->devices, masks); /* Error check: Make sure there are not too many transports */ if (context->num_tls >= UCP_MAX_RESOURCES) { ucs_error("Exceeded resources limit (%u requested, up to %d are supported)", context->num_tls, UCP_MAX_RESOURCES); status = UCS_ERR_EXCEEDS_LIMIT; goto err_free_context_resources; } status = ucp_check_tl_names(context); if (status != UCS_OK) { goto err_free_context_resources; } uct_release_md_resource_list(md_rscs); return UCS_OK; err_free_context_resources: ucp_free_resources(context); err_release_md_resources: uct_release_md_resource_list(md_rscs); err: return status; }
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = 2048; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = (void*)rkey_buffer + info.rkey_size; iface_addr = (void*)dev_addr + info.uct.dev_addr_len; ep_addr = (void*)iface_addr + info.uct.iface_addr_len; ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = (void*)rkey_buffer + remote_info->rkey_size; iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len; ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len; perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.type = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); rte_call(perf, barrier); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.type != NULL) { uct_rkey_release(&perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; }
ucs_status_t uct_mem_alloc(size_t min_length, uct_alloc_method_t *methods, unsigned num_methods, uct_md_h *mds, unsigned num_mds, const char *alloc_name, uct_allocated_memory_t *mem) { uct_alloc_method_t *method; uct_md_attr_t md_attr; ucs_status_t status; size_t alloc_length; unsigned md_index; uct_mem_h memh; uct_md_h md; void *address; int shmid; if (min_length == 0) { ucs_error("Allocation length cannot be 0"); return UCS_ERR_INVALID_PARAM; } if (num_methods == 0) { ucs_error("No allocation methods provided"); return UCS_ERR_INVALID_PARAM; } for (method = methods; method < methods + num_methods; ++method) { ucs_debug("trying allocation method %s", uct_alloc_method_names[*method]); switch (*method) { case UCT_ALLOC_METHOD_MD: /* Allocate with one of the specified memory domains */ for (md_index = 0; md_index < num_mds; ++md_index) { md = mds[md_index]; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to query MD"); return status; } /* Check if MD supports allocation */ if (!(md_attr.cap.flags & UCT_MD_FLAG_ALLOC)) { continue; } /* Allocate memory using the MD. * If the allocation fails, it's considered an error and we don't * fall-back, because this MD already exposed support for memory * allocation. */ alloc_length = min_length; status = uct_md_mem_alloc(md, &alloc_length, &address, alloc_name, &memh); if (status != UCS_OK) { ucs_error("failed to allocate %zu bytes using md %s: %s", alloc_length, md->component->name, ucs_status_string(status)); return status; } ucs_assert(memh != UCT_INVALID_MEM_HANDLE); mem->md = md; mem->memh = memh; goto allocated; } break; case UCT_ALLOC_METHOD_HEAP: /* Allocate aligned memory using libc allocator */ alloc_length = min_length; address = ucs_memalign(UCS_SYS_CACHE_LINE_SIZE, alloc_length UCS_MEMTRACK_VAL); if (address != NULL) { goto allocated_without_md; } ucs_debug("failed to allocate %zu bytes from the heap", alloc_length); break; case UCT_ALLOC_METHOD_MMAP: /* Request memory from operating system using mmap() */ alloc_length = ucs_align_up_pow2(min_length, ucs_get_page_size()); address = ucs_mmap(NULL, alloc_length, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0 UCS_MEMTRACK_VAL); if (address != MAP_FAILED) { goto allocated_without_md; } ucs_debug("failed to mmap %zu bytes: %m", alloc_length); break; case UCT_ALLOC_METHOD_HUGE: /* Allocate huge pages */ alloc_length = min_length; status = ucs_sysv_alloc(&alloc_length, &address, SHM_HUGETLB, &shmid UCS_MEMTRACK_VAL); if (status == UCS_OK) { goto allocated_without_md; } ucs_debug("failed to allocate %zu bytes from hugetlb: %s", min_length, ucs_status_string(status)); break; default: ucs_error("Invalid allocation method %d", *method); return UCS_ERR_INVALID_PARAM; } } ucs_debug("Could not allocate memory with any of the provided methods"); return UCS_ERR_NO_MEMORY; allocated_without_md: mem->md = NULL; mem->memh = UCT_INVALID_MEM_HANDLE; allocated: ucs_debug("allocated %zu bytes at %p using %s", alloc_length, address, (mem->md == NULL) ? uct_alloc_method_names[*method] : mem->md->component->name); mem->address = address; mem->length = alloc_length; mem->method = *method; return UCS_OK; }
ucs_status_t uct_mem_alloc(void *addr, size_t min_length, unsigned flags, uct_alloc_method_t *methods, unsigned num_methods, uct_md_h *mds, unsigned num_mds, const char *alloc_name, uct_allocated_memory_t *mem) { uct_alloc_method_t *method; uct_md_attr_t md_attr; ucs_status_t status; size_t alloc_length; unsigned md_index; uct_mem_h memh; uct_md_h md; void *address; int shmid; unsigned map_flags; if (min_length == 0) { ucs_error("Allocation length cannot be 0"); return UCS_ERR_INVALID_PARAM; } if (num_methods == 0) { ucs_error("No allocation methods provided"); return UCS_ERR_INVALID_PARAM; } if ((flags & UCT_MD_MEM_FLAG_FIXED) && (!addr || ((uintptr_t)addr % ucs_get_page_size()))) { ucs_debug("UCT_MD_MEM_FLAG_FIXED requires valid page size aligned address"); return UCS_ERR_INVALID_PARAM; } for (method = methods; method < methods + num_methods; ++method) { ucs_debug("trying allocation method %s", uct_alloc_method_names[*method]); switch (*method) { case UCT_ALLOC_METHOD_MD: /* Allocate with one of the specified memory domains */ for (md_index = 0; md_index < num_mds; ++md_index) { md = mds[md_index]; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to query MD"); return status; } /* Check if MD supports allocation */ if (!(md_attr.cap.flags & UCT_MD_FLAG_ALLOC)) { continue; } /* Check if MD supports allocation with fixed address * if it's requested */ if ((flags & UCT_MD_MEM_FLAG_FIXED) && !(md_attr.cap.flags & UCT_MD_FLAG_FIXED)) { continue; } /* Allocate memory using the MD. * If the allocation fails, it's considered an error and we don't * fall-back, because this MD already exposed support for memory * allocation. */ alloc_length = min_length; address = addr; status = uct_md_mem_alloc(md, &alloc_length, &address, flags, alloc_name, &memh); if (status != UCS_OK) { ucs_error("failed to allocate %zu bytes using md %s: %s", alloc_length, md->component->name, ucs_status_string(status)); return status; } ucs_assert(memh != UCT_MEM_HANDLE_NULL); mem->md = md; mem->memh = memh; goto allocated; } break; case UCT_ALLOC_METHOD_THP: #ifdef MADV_HUGEPAGE if (!ucs_is_thp_enabled()) { break; } /* Fixed option is not supported for thp allocation*/ if (flags & UCT_MD_MEM_FLAG_FIXED) { break; } alloc_length = ucs_align_up(min_length, ucs_get_huge_page_size()); address = ucs_memalign(ucs_get_huge_page_size(), alloc_length UCS_MEMTRACK_VAL); if (address != NULL) { status = madvise(address, alloc_length, MADV_HUGEPAGE); if (status != UCS_OK) { ucs_error("madvise failure status (%d) address(%p) len(%zu):" " %m", status, address, alloc_length); ucs_free(address); break; } else { goto allocated_without_md; } } ucs_debug("failed to allocate by thp %zu bytes: %m", alloc_length); #endif break; case UCT_ALLOC_METHOD_HEAP: /* Allocate aligned memory using libc allocator */ /* Fixed option is not supported for heap allocation*/ if (flags & UCT_MD_MEM_FLAG_FIXED) { break; } alloc_length = min_length; address = ucs_memalign(UCS_SYS_CACHE_LINE_SIZE, alloc_length UCS_MEMTRACK_VAL); if (address != NULL) { goto allocated_without_md; } ucs_debug("failed to allocate %zu bytes from the heap", alloc_length); break; case UCT_ALLOC_METHOD_MMAP: map_flags = uct_mem_get_mmap_flags(flags); /* Request memory from operating system using mmap() */ alloc_length = ucs_align_up_pow2(min_length, ucs_get_page_size()); address = ucs_mmap(addr, alloc_length, PROT_READ | PROT_WRITE, map_flags, -1, 0 UCS_MEMTRACK_VAL); if (address != MAP_FAILED) { goto allocated_without_md; } ucs_debug("failed to mmap %zu bytes: %m", alloc_length); break; case UCT_ALLOC_METHOD_HUGE: /* Allocate huge pages */ alloc_length = min_length; address = (flags & UCT_MD_MEM_FLAG_FIXED) ? addr : NULL; status = ucs_sysv_alloc(&alloc_length, &address, SHM_HUGETLB, &shmid UCS_MEMTRACK_VAL); if (status == UCS_OK) { goto allocated_without_md; } ucs_debug("failed to allocate %zu bytes from hugetlb: %s", min_length, ucs_status_string(status)); break; default: ucs_error("Invalid allocation method %d", *method); return UCS_ERR_INVALID_PARAM; } } ucs_debug("Could not allocate memory with any of the provided methods"); return UCS_ERR_NO_MEMORY; allocated_without_md: mem->md = NULL; mem->memh = UCT_MEM_HANDLE_NULL; allocated: ucs_debug("allocated %zu bytes at %p using %s", alloc_length, address, (mem->md == NULL) ? uct_alloc_method_names[*method] : mem->md->component->name); mem->address = address; mem->length = alloc_length; mem->method = *method; return UCS_OK; }
static void print_md_info(const char *md_name, int print_opts, ucs_config_print_flags_t print_flags, const char *req_tl_name) { uct_tl_resource_desc_t *resources, tmp; unsigned resource_index, j, num_resources, count; ucs_status_t status; const char *tl_name; uct_md_config_t *md_config; uct_md_attr_t md_attr; uct_md_h md; status = uct_md_config_read(md_name, NULL, NULL, &md_config); if (status != UCS_OK) { goto out; } status = uct_md_open(md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { printf("# < failed to open memory domain %s >\n", md_name); goto out; } status = uct_md_query_tl_resources(md, &resources, &num_resources); if (status != UCS_OK) { printf("# < failed to query memory domain resources >\n"); goto out_close_md; } if (req_tl_name != NULL) { resource_index = 0; while (resource_index < num_resources) { if (!strcmp(resources[resource_index].tl_name, req_tl_name)) { break; } ++resource_index; } if (resource_index == num_resources) { /* no selected transport on the MD */ goto out_free_list; } } status = uct_md_query(md, &md_attr); if (status != UCS_OK) { printf("# < failed to query memory domain >\n"); goto out_free_list; } else { printf("#\n"); printf("# Memory domain: %s\n", md_name); printf("# component: %s\n", md_attr.component_name); if (md_attr.cap.flags & UCT_MD_FLAG_ALLOC) { printf("# allocate: %s\n", size_limit_to_str(md_attr.cap.max_alloc)); } if (md_attr.cap.flags & UCT_MD_FLAG_REG) { printf("# register: %s, cost: %.0f", size_limit_to_str(md_attr.cap.max_reg), md_attr.reg_cost.overhead * 1e9); if (md_attr.reg_cost.growth * 1e9 > 1e-3) { printf("+(%.3f*<SIZE>)", md_attr.reg_cost.growth * 1e9); } printf(" nsec\n"); } printf("# remote key: %zu bytes\n", md_attr.rkey_packed_size); } if (num_resources == 0) { printf("# < no supported devices found >\n"); goto out_free_list; } resource_index = 0; while (resource_index < num_resources) { /* Gather all resources for this transport */ tl_name = resources[resource_index].tl_name; count = 1; for (j = resource_index + 1; j < num_resources; ++j) { if (!strcmp(tl_name, resources[j].tl_name)) { tmp = resources[count + resource_index]; resources[count + resource_index] = resources[j]; resources[j] = tmp; ++count; } } if ((req_tl_name == NULL) || !strcmp(tl_name, req_tl_name)) { print_tl_info(md, tl_name, &resources[resource_index], count, print_opts, print_flags); } resource_index += count; } out_free_list: uct_release_tl_resource_list(resources); out_close_md: uct_md_close(md); out: ; }