void output_console(hwloc_topology_t topology, const char *filename, int logical, int legend __hwloc_attribute_unused, int verbose_mode) { unsigned topodepth; FILE *output; if (!filename || !strcmp(filename, "-")) output = stdout; else { output = open_file(filename, "w"); if (!output) { fprintf(stderr, "Failed to open %s for writing (%s)\n", filename, strerror(errno)); return; } } topodepth = hwloc_topology_get_depth(topology); /* * if verbose_mode == 0, only print the summary. * if verbose_mode == 1, only print the topology tree. * if verbose_mode > 1, print both. */ if (lstopo_show_only != (hwloc_obj_type_t)-1) { if (verbose_mode > 1) fprintf(output, "Only showing %s objects\n", hwloc_obj_type_string(lstopo_show_only)); output_only (topology, hwloc_get_root_obj(topology), output, logical, verbose_mode); } else if (verbose_mode >= 1) { output_topology (topology, hwloc_get_root_obj(topology), NULL, output, 0, logical, verbose_mode); fprintf(output, "\n"); } if ((verbose_mode > 1 || !verbose_mode) && lstopo_show_only == (hwloc_obj_type_t)-1) { hwloc_lstopo_show_summary(output, topology); } if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) { const struct hwloc_distances_s * distances; unsigned depth; for (depth = 0; depth < topodepth; depth++) { distances = hwloc_get_whole_distance_matrix_by_depth(topology, depth); if (!distances || !distances->latency) continue; printf("latency matrix between %ss (depth %u) by %s indexes:\n", hwloc_obj_type_string(hwloc_get_depth_type(topology, depth)), depth, logical ? "logical" : "physical"); hwloc_utils_print_distance_matrix(topology, hwloc_get_root_obj(topology), distances->nbobjs, depth, distances->latency, logical); } } if (verbose_mode > 1 && lstopo_show_only == (hwloc_obj_type_t)-1) { hwloc_const_bitmap_t complete = hwloc_topology_get_complete_cpuset(topology); hwloc_const_bitmap_t topo = hwloc_topology_get_topology_cpuset(topology); hwloc_const_bitmap_t online = hwloc_topology_get_online_cpuset(topology); hwloc_const_bitmap_t allowed = hwloc_topology_get_allowed_cpuset(topology); if (complete && !hwloc_bitmap_isequal(topo, complete)) { hwloc_bitmap_t unknown = hwloc_bitmap_alloc(); char *unknownstr; hwloc_bitmap_copy(unknown, complete); hwloc_bitmap_andnot(unknown, unknown, topo); hwloc_bitmap_asprintf(&unknownstr, unknown); fprintf (output, "%d processors not represented in topology: %s\n", hwloc_bitmap_weight(unknown), unknownstr); free(unknownstr); hwloc_bitmap_free(unknown); } if (complete && !hwloc_bitmap_isequal(online, complete)) { hwloc_bitmap_t offline = hwloc_bitmap_alloc(); char *offlinestr; hwloc_bitmap_copy(offline, complete); hwloc_bitmap_andnot(offline, offline, online); hwloc_bitmap_asprintf(&offlinestr, offline); fprintf (output, "%d processors offline: %s\n", hwloc_bitmap_weight(offline), offlinestr); free(offlinestr); hwloc_bitmap_free(offline); } if (complete && !hwloc_bitmap_isequal(allowed, online)) { if (!hwloc_bitmap_isincluded(online, allowed)) { hwloc_bitmap_t forbidden = hwloc_bitmap_alloc(); char *forbiddenstr; hwloc_bitmap_copy(forbidden, online); hwloc_bitmap_andnot(forbidden, forbidden, allowed); hwloc_bitmap_asprintf(&forbiddenstr, forbidden); fprintf(output, "%d processors online but not allowed: %s\n", hwloc_bitmap_weight(forbidden), forbiddenstr); free(forbiddenstr); hwloc_bitmap_free(forbidden); } if (!hwloc_bitmap_isincluded(allowed, online)) { hwloc_bitmap_t potential = hwloc_bitmap_alloc(); char *potentialstr; hwloc_bitmap_copy(potential, allowed); hwloc_bitmap_andnot(potential, potential, online); hwloc_bitmap_asprintf(&potentialstr, potential); fprintf(output, "%d processors allowed but not online: %s\n", hwloc_bitmap_weight(potential), potentialstr); free(potentialstr); hwloc_bitmap_free(potential); } } if (!hwloc_topology_is_thissystem(topology)) fprintf (output, "Topology not from this system\n"); } if (output != stdout) fclose(output); }
int main(void) { hwloc_topology_t topology; #ifdef HWLOC_HAVE_CPU_SET unsigned depth; hwloc_bitmap_t hwlocset; cpu_set_t schedset; hwloc_obj_t obj; int err; #endif /* HWLOC_HAVE_CPU_SET */ hwloc_topology_init(&topology); hwloc_topology_load(topology); #ifdef HWLOC_HAVE_CPU_SET depth = hwloc_topology_get_depth(topology); hwlocset = hwloc_bitmap_dup(hwloc_topology_get_complete_cpuset(topology)); hwloc_cpuset_to_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset)); #ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY err = sched_setaffinity(0, sizeof(schedset)); #else err = sched_setaffinity(0, sizeof(schedset), &schedset); #endif assert(!err); hwloc_bitmap_free(hwlocset); #ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY err = sched_getaffinity(0, sizeof(schedset)); #else err = sched_getaffinity(0, sizeof(schedset), &schedset); #endif assert(!err); hwlocset = hwloc_bitmap_alloc(); hwloc_cpuset_from_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset)); assert(hwloc_bitmap_isincluded(hwlocset, hwloc_topology_get_complete_cpuset(topology))); hwloc_bitmap_andnot(hwlocset, hwlocset, hwloc_topology_get_online_cpuset(topology)); hwloc_bitmap_andnot(hwlocset, hwlocset, hwloc_topology_get_allowed_cpuset(topology)); assert(hwloc_bitmap_iszero(hwlocset)); hwloc_bitmap_free(hwlocset); obj = hwloc_get_obj_by_depth(topology, depth-1, hwloc_get_nbobjs_by_depth(topology, depth-1) - 1); assert(obj); assert(obj->type == HWLOC_OBJ_PU); hwlocset = hwloc_bitmap_dup(obj->cpuset); hwloc_cpuset_to_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset)); #ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY err = sched_setaffinity(0, sizeof(schedset)); #else err = sched_setaffinity(0, sizeof(schedset), &schedset); #endif assert(!err); hwloc_bitmap_free(hwlocset); #ifdef HWLOC_HAVE_OLD_SCHED_SETAFFINITY err = sched_getaffinity(0, sizeof(schedset)); #else err = sched_getaffinity(0, sizeof(schedset), &schedset); #endif assert(!err); hwlocset = hwloc_bitmap_alloc(); hwloc_cpuset_from_glibc_sched_affinity(topology, hwlocset, &schedset, sizeof(schedset)); assert(hwloc_bitmap_isequal(hwlocset, obj->cpuset)); hwloc_bitmap_free(hwlocset); #endif /* HWLOC_HAVE_CPU_SET */ hwloc_topology_destroy(topology); return 0; }
static void getNumCPUs(void) { // // accessible cores // // // Hwloc can't tell us the number of accessible cores directly, so // get that by counting the parent cores of the accessible PUs. // // // We could seemingly use hwloc_topology_get_allowed_cpuset() to get // the set of accessible PUs here. But that seems not to reflect the // schedaffinity settings, so use hwloc_get_proc_cpubind() instead. // hwloc_cpuset_t logAccSet; CHK_ERR_ERRNO((logAccSet = hwloc_bitmap_alloc()) != NULL); if (hwloc_get_proc_cpubind(topology, getpid(), logAccSet, 0) != 0) { #ifdef __APPLE__ const int errRecoverable = (errno == ENOSYS); // no cpubind on macOS #else const int errRecoverable = 0; #endif if (errRecoverable) { hwloc_bitmap_fill(logAccSet); } else { REPORT_ERR_ERRNO(hwloc_get_proc_cpubind(topology, getpid(), logAccSet, 0) == 0); } } hwloc_bitmap_and(logAccSet, logAccSet, hwloc_topology_get_online_cpuset(topology)); hwloc_cpuset_t physAccSet; CHK_ERR_ERRNO((physAccSet = hwloc_bitmap_alloc()) != NULL); #define NEXT_PU(pu) \ hwloc_get_next_obj_inside_cpuset_by_type(topology, logAccSet, \ HWLOC_OBJ_PU, pu) for (hwloc_obj_t pu = NEXT_PU(NULL); pu != NULL; pu = NEXT_PU(pu)) { hwloc_obj_t core; CHK_ERR_ERRNO((core = hwloc_get_ancestor_obj_by_type(topology, HWLOC_OBJ_CORE, pu)) != NULL); hwloc_bitmap_set(physAccSet, core->logical_index); } #undef NEXT_PU numCPUsPhysAcc = hwloc_bitmap_weight(physAccSet); hwloc_bitmap_free(physAccSet); CHK_ERR(numCPUsPhysAcc > 0); // // all cores // numCPUsPhysAll = hwloc_get_nbobjs_by_type(topology, HWLOC_OBJ_CORE); CHK_ERR(numCPUsPhysAll > 0); // // accessible PUs // numCPUsLogAcc = hwloc_bitmap_weight(logAccSet); CHK_ERR(numCPUsLogAcc > 0); hwloc_bitmap_free(logAccSet); // // all PUs // numCPUsLogAll = hwloc_get_nbobjs_by_type(topology, HWLOC_OBJ_PU); CHK_ERR(numCPUsLogAll > 0); }