int main(void) { hwloc_bitmap_t orig, expected; orig = hwloc_bitmap_alloc(); expected = hwloc_bitmap_alloc(); /* empty set gives empty set */ hwloc_bitmap_singlify(orig); assert(hwloc_bitmap_iszero(orig)); /* full set gives first bit only */ hwloc_bitmap_fill(orig); hwloc_bitmap_singlify(orig); hwloc_bitmap_zero(expected); hwloc_bitmap_set(expected, 0); assert(hwloc_bitmap_isequal(orig, expected)); assert(!hwloc_bitmap_compare(orig, expected)); /* actual non-trivial set */ hwloc_bitmap_zero(orig); hwloc_bitmap_set(orig, 45); hwloc_bitmap_set(orig, 46); hwloc_bitmap_set(orig, 517); hwloc_bitmap_singlify(orig); hwloc_bitmap_zero(expected); hwloc_bitmap_set(expected, 45); assert(hwloc_bitmap_isequal(orig, expected)); assert(!hwloc_bitmap_compare(orig, expected)); hwloc_bitmap_free(orig); hwloc_bitmap_free(expected); return 0; }
c_sublocid_t chpl_topo_getThreadLocality(void) { hwloc_cpuset_t cpuset; hwloc_nodeset_t nodeset; int flags; int node; if (!haveTopology) { return c_sublocid_any; } if (!topoSupport->cpubind->get_thread_cpubind) { return c_sublocid_any; } CHK_ERR_ERRNO((cpuset = hwloc_bitmap_alloc()) != NULL); CHK_ERR_ERRNO((nodeset = hwloc_bitmap_alloc()) != NULL); flags = HWLOC_CPUBIND_THREAD; CHK_ERR_ERRNO(hwloc_set_cpubind(topology, cpuset, flags) == 0); hwloc_cpuset_to_nodeset(topology, cpuset, nodeset); node = hwloc_bitmap_first(nodeset); hwloc_bitmap_free(nodeset); hwloc_bitmap_free(cpuset); return node; }
END_TEST START_TEST(add_extra_memory_nodes_if_needed_test) { long long mem_requested; long long mem_reserved; std::set<int> current_mem_ids; hwloc_bitmap_t job_mems = hwloc_bitmap_alloc(); hwloc_bitmap_t torque_root_mems = hwloc_bitmap_alloc(); char buf[1024]; hwloc_bitmap_set(job_mems, 0); current_mem_ids.insert(0); hwloc_bitmap_set(torque_root_mems, 0); hwloc_bitmap_set(torque_root_mems, 1); mem_requested = 16 * 1024; mem_requested *= 1024; mem_requested *= 1024; mem_reserved = 15 * 1024; mem_reserved *= 1024; mem_reserved *= 1024; add_extra_memory_nodes_if_needed(mem_requested, mem_reserved, job_mems, torque_root_mems, current_mem_ids); fail_unless(hwloc_bitmap_weight(job_mems) == 2); hwloc_bitmap_displaylist(buf, sizeof(buf), job_mems); fail_unless(strchr(buf, '0') != NULL); fail_unless(strchr(buf, '1') != NULL); }
/* * Set the node where the current process will run */ void hw_set_first_core_node(int node, int proc) { hwloc_nodeset_t nset; hwloc_cpuset_t set,newset; if (local_topo->nnodes != 0 ){ nset = hwloc_bitmap_alloc(); set = hwloc_bitmap_alloc(); newset = hwloc_bitmap_alloc(); hwloc_bitmap_zero(set); hwloc_bitmap_zero(newset); hwloc_bitmap_zero(nset); hwloc_bitmap_set(nset,node); hwloc_cpuset_from_nodeset(topology,set,nset); int core = hwloc_bitmap_first(set); hwloc_bitmap_set(newset,core); if (proc) hwloc_set_proc_cpubind (topology,0,newset,HWLOC_CPUBIND_PROCESS); else hwloc_set_proc_cpubind (topology,0,newset,HWLOC_CPUBIND_THREAD); hwloc_bitmap_free(newset); hwloc_bitmap_free(set); hwloc_bitmap_free(nset); } }
/******************* FUNCTION *********************/ int TopoHwloc::getCurrentIdFromNUMABinding(void) const { hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); hwloc_membind_policy_t policy; int res = -1; int weight; int status; #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) char buffer[4096]; #endif //if no numa node, return immediately if (getNbNumaEntities() == 1) return -1; //nodes // flags = 0 fallback on PROCESS if THREAD is not supported (as for windows). status = hwloc_get_membind_nodeset(topology,nodeset,&policy,0); assert(status == 0); if (status == 0) return -1; #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) status = hwloc_bitmap_list_snprintf(buffer,4096,nodeset); sprintf(stderr,"Current nodes : %s\n",buffer); #endif //cores // flags = 0 fallback on PROCESS if THREAD is not supported (as for windows). status = hwloc_get_membind(topology,cpuset,&policy,0); assert(status == 0); if (status == 0) return -1; #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) status = hwloc_bitmap_list_snprintf(buffer,4096,cpuset); sprintf(stderr,"Current cores : %s\n",buffer); #endif //nodes from cores hwloc_cpuset_to_nodeset(topology,cpuset,nodeset); #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) status = hwloc_bitmap_list_snprintf(buffer,4096,nodeset); sprintf(stderr,"Current nodes from cores : %s\n",buffer); #endif //calc res weight = hwloc_bitmap_weight(nodeset); assert(weight != 0); if (weight == 1) res = getFirstBitInBitmap(nodeset); hwloc_bitmap_free(cpuset); hwloc_bitmap_free(nodeset); return res; }
int hwloc_look_hardwired_fujitsu_fx10(struct hwloc_topology *topology) { /* FIXME: what if a broken core is disabled? */ unsigned i; hwloc_obj_t obj; hwloc_bitmap_t set; for(i=0; i<16; i++) { set = hwloc_bitmap_alloc(); hwloc_bitmap_set(set, i); obj = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, -1); obj->cpuset = hwloc_bitmap_dup(set); obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; obj->attr->cache.depth = 1; obj->attr->cache.size = 32*1024; obj->attr->cache.linesize = 128; obj->attr->cache.associativity = 2; hwloc_insert_object_by_cpuset(topology, obj); obj = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, -1); obj->cpuset = hwloc_bitmap_dup(set); obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; obj->attr->cache.depth = 1; obj->attr->cache.size = 32*1024; obj->attr->cache.linesize = 128; obj->attr->cache.associativity = 2; hwloc_insert_object_by_cpuset(topology, obj); obj = hwloc_alloc_setup_object(HWLOC_OBJ_CORE, i); obj->cpuset = set; hwloc_insert_object_by_cpuset(topology, obj); } set = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(set, 0, 15); obj = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, -1); obj->cpuset = hwloc_bitmap_dup(set); obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; obj->attr->cache.depth = 2; obj->attr->cache.size = 12*1024*1024; obj->attr->cache.linesize = 128; obj->attr->cache.associativity = 24; hwloc_insert_object_by_cpuset(topology, obj); obj = hwloc_alloc_setup_object(HWLOC_OBJ_PACKAGE, 0); obj->cpuset = set; hwloc_obj_add_info(obj, "CPUVendor", "Fujitsu"); hwloc_obj_add_info(obj, "CPUModel", "SPARC64 IXfx"); hwloc_insert_object_by_cpuset(topology, obj); hwloc_setup_pu_level(topology, 16); return 0; }
JNIEXPORT void JNICALL Java_com_rr_core_os_NativeHooksImpl_jniSetPriority( JNIEnv *env, jclass clazz, jint cpumask, jint priority ) { int topodepth; hwloc_topology_t topology; hwloc_cpuset_t cpuset; hwloc_topology_init(&topology); hwloc_topology_load(topology); topodepth = hwloc_topology_get_depth(topology); cpuset = hwloc_bitmap_alloc(); hwloc_bitmap_from_ulong( cpuset, (unsigned int)cpumask ); char *str; hwloc_bitmap_asprintf(&str, cpuset); printf("cpumask [%d] => hwloc [%s]\n", cpumask, str); if (hwloc_set_cpubind(topology, cpuset, HWLOC_CPUBIND_THREAD)) { printf("Couldn't bind cpuset %s\n", str); } else { printf("BOUND cpuset %s\n", str); } free(str); /* Free our cpuset copy */ hwloc_bitmap_free(cpuset); /* Destroy topology object. */ hwloc_topology_destroy(topology); }
void hwloc_look_synthetic(struct hwloc_topology *topology) { hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); unsigned first_cpu = 0, i; topology->support.discovery->pu = 1; /* start with id=0 for each level */ for (i = 0; topology->backend_params.synthetic.arity[i] > 0; i++) topology->backend_params.synthetic.id[i] = 0; /* ... including the last one */ topology->backend_params.synthetic.id[i] = 0; /* update first level type according to the synthetic type array */ topology->levels[0][0]->type = topology->backend_params.synthetic.type[0]; for (i = 0; i < topology->backend_params.synthetic.arity[0]; i++) first_cpu = hwloc__look_synthetic(topology, 1, first_cpu, cpuset); hwloc_bitmap_free(cpuset); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Synthetic"); hwloc_obj_add_info(topology->levels[0][0], "SyntheticDescription", topology->backend_params.synthetic.string); }
static HYD_status handle_rr_binding(void) { int i; HYD_status status = HYD_SUCCESS; HYDU_FUNC_ENTER(); HYDU_ASSERT(hwloc_initialized, status); /* initialize bitmaps */ HYDT_topo_hwloc_info.num_bitmaps = hwloc_get_nbobjs_by_type(topology, HWLOC_OBJ_PU); HYDU_MALLOC_OR_JUMP(HYDT_topo_hwloc_info.bitmap, hwloc_bitmap_t *, HYDT_topo_hwloc_info.num_bitmaps * sizeof(hwloc_bitmap_t), status); for (i = 0; i < HYDT_topo_hwloc_info.num_bitmaps; i++) { HYDT_topo_hwloc_info.bitmap[i] = hwloc_bitmap_alloc(); hwloc_bitmap_only(HYDT_topo_hwloc_info.bitmap[i], i); } fn_exit: HYDU_FUNC_EXIT(); return status; fn_fail: goto fn_exit; }
int main(void) { hwloc_topology_t topology; int i; int err; hwloc_topology_init(&topology); hwloc_topology_set_flags(topology, HWLOC_TOPOLOGY_FLAG_IO_DEVICES); hwloc_topology_load(topology); for(i=0; ; i++) { hwloc_bitmap_t set; hwloc_obj_t osdev, ancestor; const char *value; osdev = hwloc_intel_mic_get_device_osdev_by_index(topology, i); if (!osdev) break; assert(osdev); ancestor = hwloc_get_non_io_ancestor_obj(topology, osdev); printf("found OSDev %s\n", osdev->name); err = strncmp(osdev->name, "mic", 3); assert(!err); assert(atoi(osdev->name+3) == (int) i); assert(osdev->attr->osdev.type == HWLOC_OBJ_OSDEV_COPROC); value = hwloc_obj_get_info_by_name(osdev, "CoProcType"); err = strcmp(value, "MIC"); assert(!err); value = hwloc_obj_get_info_by_name(osdev, "MICFamily"); printf("found MICFamily %s\n", value); value = hwloc_obj_get_info_by_name(osdev, "MICSKU"); printf("found MICSKU %s\n", value); value = hwloc_obj_get_info_by_name(osdev, "MICActiveCores"); printf("found MICActiveCores %s\n", value); value = hwloc_obj_get_info_by_name(osdev, "MICMemorySize"); printf("found MICMemorySize %s\n", value); set = hwloc_bitmap_alloc(); err = hwloc_intel_mic_get_device_cpuset(topology, i, set); if (err < 0) { printf("failed to get cpuset for device %d\n", i); } else { char *cpuset_string = NULL; hwloc_bitmap_asprintf(&cpuset_string, set); printf("got cpuset %s for device %d\n", cpuset_string, i); assert(hwloc_bitmap_isequal(set, ancestor->cpuset)); free(cpuset_string); } hwloc_bitmap_free(set); } hwloc_topology_destroy(topology); return 0; }
c_sublocid_t chpl_topo_getMemLocality(void* p) { int flags; hwloc_nodeset_t nodeset; int node; if (!haveTopology) { return c_sublocid_any; } if (!topoSupport->membind->get_area_memlocation) { return c_sublocid_any; } if (p == NULL) { return c_sublocid_any; } CHK_ERR_ERRNO((nodeset = hwloc_bitmap_alloc()) != NULL); flags = HWLOC_MEMBIND_BYNODESET; CHK_ERR_ERRNO(hwloc_get_area_memlocation(topology, p, 1, nodeset, flags) == 0); node = hwloc_bitmap_first(nodeset); if (!isActualSublocID(node)) { node = c_sublocid_any; } hwloc_bitmap_free(nodeset); return node; }
void chpl_topo_setThreadLocality(c_sublocid_t subloc) { hwloc_cpuset_t cpuset; int flags; _DBG_P("chpl_topo_setThreadLocality(%d)\n", (int) subloc); if (!haveTopology) { return; } if (!topoSupport->cpubind->set_thread_cpubind) return; if ((cpuset = hwloc_bitmap_alloc()) == NULL) { report_error("hwloc_bitmap_alloc()", errno); } hwloc_cpuset_from_nodeset(topology, cpuset, getNumaObj(subloc)->allowed_nodeset); flags = HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT; if (hwloc_set_cpubind(topology, cpuset, flags)) { report_error("hwloc_set_cpubind()", errno); } hwloc_bitmap_free(cpuset); }
void hwloc_numa_setInterleaved(int* processorList, int numberOfProcessors) { int i,j; int ret = 0; likwid_hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); likwid_hwloc_membind_flags_t flags = HWLOC_MEMBIND_STRICT|HWLOC_MEMBIND_PROCESS; likwid_hwloc_bitmap_zero(cpuset); for (i=0; i<numa_info.numberOfNodes; i++) { for (j=0; j<numberOfProcessors; j++) { if (likwid_hwloc_findProcessor(i,processorList[j])) { likwid_hwloc_bitmap_set(cpuset, i); } } } ret = likwid_hwloc_set_membind(hwloc_topology, cpuset, HWLOC_MEMBIND_INTERLEAVE, flags); likwid_hwloc_bitmap_free(cpuset); if (ret < 0) { ERROR; } }
/* * Set the node where the current thread will run */ void hw_set_thread_node(int node) { hwloc_nodeset_t nset; hwloc_cpuset_t set; if (local_topo->nnodes != 0 ){ nset = hwloc_bitmap_alloc(); set = hwloc_bitmap_alloc(); hwloc_bitmap_zero(nset); hwloc_bitmap_set(nset,node); hwloc_cpuset_from_nodeset(topology,set,nset); hwloc_set_proc_cpubind (topology,0,set,HWLOC_CPUBIND_THREAD); hwloc_bitmap_free(set); hwloc_bitmap_free(nset); } }
/******************* FUNCTION *********************/ int TopoHwloc::getCurrentId ( int level, int depth ) const { //vars int res = -1; //get absolute depth int absDepth = getAbsDepth(level,depth); //search current thread binding hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); int status = hwloc_get_cpubind (topology, cpuset, 0); allocAssume(status >= 0,"Failed to get the current thread CPU binding with hwloc."); //count overlap over current level int cnt = hwloc_get_nbobjs_inside_cpuset_by_depth(topology,cpuset,absDepth); //if get only one return its ID if (cnt == 1) { hwloc_obj_t obj = hwloc_get_obj_inside_cpuset_by_depth(topology,cpuset,absDepth,0); //TODO maybe their is an accessor for this res = obj->logical_index; } //clean memory hwloc_bitmap_free(cpuset); return res; }
static void testmem(hwloc_const_bitmap_t nodeset, hwloc_membind_policy_t policy, int flags, int expected) { hwloc_bitmap_t new_nodeset = hwloc_bitmap_alloc(); hwloc_membind_policy_t newpolicy; void *area; size_t area_size = 1024; result_set("Bind this singlethreaded process memory", hwloc_set_membind(topology, nodeset, policy, flags), (support->membind->set_thisproc_membind || support->membind->set_thisthread_membind) && expected); result_get("Get this singlethreaded process memory", nodeset, new_nodeset, hwloc_get_membind(topology, new_nodeset, &newpolicy, flags), (support->membind->get_thisproc_membind || support->membind->get_thisthread_membind) && expected); result_set("Bind this thread memory", hwloc_set_membind(topology, nodeset, policy, flags | HWLOC_MEMBIND_THREAD), support->membind->set_thisproc_membind && expected); result_get("Get this thread memory", nodeset, new_nodeset, hwloc_get_membind(topology, new_nodeset, &newpolicy, flags | HWLOC_MEMBIND_THREAD), support->membind->get_thisproc_membind && expected); result_set("Bind this whole process memory", hwloc_set_membind(topology, nodeset, policy, flags | HWLOC_MEMBIND_PROCESS), support->membind->set_thisproc_membind && expected); result_get("Get this whole process memory", nodeset, new_nodeset, hwloc_get_membind(topology, new_nodeset, &newpolicy, flags | HWLOC_MEMBIND_PROCESS), support->membind->get_thisproc_membind && expected); #ifdef HWLOC_WIN_SYS result_set("Bind process memory", hwloc_set_proc_membind(topology, GetCurrentProcess(), nodeset, policy, flags), support->membind->set_proc_membind && expected); result_get("Get process memory", nodeset, new_nodeset, hwloc_get_proc_membind(topology, GetCurrentProcess(), new_nodeset, &newpolicy, flags), support->membind->get_proc_membind && expected); #else /* !HWLOC_WIN_SYS */ result_set("Bind process memory", hwloc_set_proc_membind(topology, getpid(), nodeset, policy, flags), support->membind->set_proc_membind && expected); result_get("Get process memory", nodeset, new_nodeset, hwloc_get_proc_membind(topology, getpid(), new_nodeset, &newpolicy, flags), support->membind->get_proc_membind && expected); #endif /* !HWLOC_WIN_SYS */ result_set("Bind area", hwloc_set_area_membind(topology, &new_nodeset, sizeof(new_nodeset), nodeset, policy, flags), support->membind->set_area_membind && expected); result_get("Get area", nodeset, new_nodeset, hwloc_get_area_membind(topology, &new_nodeset, sizeof(new_nodeset), new_nodeset, &newpolicy, flags), support->membind->get_area_membind && expected); if (!(flags & HWLOC_MEMBIND_MIGRATE)) { result_set("Alloc bound area", (area = hwloc_alloc_membind(topology, area_size, nodeset, policy, flags)) == NULL, (support->membind->alloc_membind && expected) || !(flags & HWLOC_MEMBIND_STRICT)); if (area) { memset(area, 0, area_size); result_get("Get bound area", nodeset, new_nodeset, hwloc_get_area_membind(topology, area, area_size, new_nodeset, &newpolicy, flags), support->membind->get_area_membind && expected); result_get("Free bound area", NULL, NULL, hwloc_free(topology, area, area_size), support->membind->alloc_membind && expected); } } printf("\n"); hwloc_bitmap_free(new_nodeset); }
static int hwloc_look_synthetic(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; struct hwloc_synthetic_backend_data_s *data = backend->private_data; hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); unsigned i; assert(!topology->levels[0][0]->cpuset); hwloc_alloc_obj_cpusets(topology->levels[0][0]); topology->support.discovery->pu = 1; /* start with os_index 0 for each level */ for (i = 0; data->level[i].arity > 0; i++) data->level[i].next_os_index = 0; /* ... including the last one */ data->level[i].next_os_index = 0; /* update first level type according to the synthetic type array */ topology->levels[0][0]->type = data->level[0].type; hwloc_synthetic__post_look_hooks(&data->level[0], topology->levels[0][0]); for (i = 0; i < data->level[0].arity; i++) hwloc__look_synthetic(topology, data, 1, cpuset); hwloc_bitmap_free(cpuset); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Synthetic"); hwloc_obj_add_info(topology->levels[0][0], "SyntheticDescription", data->string); return 1; }
/* * Get the memory policy of a process */ void hw_get_mempol(int *node, int *mem_pol) { hwloc_nodeset_t nset; hwloc_membind_policy_t mempol=-1; if (local_topo->nnodes != 0 ){ nset = hwloc_bitmap_alloc(); hwloc_get_membind_nodeset(topology,nset,&mempol,0); (*node) = hwloc_bitmap_first(nset); switch(mempol) { case HWLOC_MEMBIND_FIRSTTOUCH: (*mem_pol) = OS; break; case HWLOC_MEMBIND_BIND: (*mem_pol) = LOCAL; break; case HWLOC_MEMBIND_INTERLEAVE: (*mem_pol) = INTERLEAVE; (*node) = -1; break; default: (*mem_pol) = -1; (*node) = -1; break; } } else (*mem_pol) = -1; }
static void test(hwloc_const_bitmap_t cpuset, int flags) { hwloc_bitmap_t new_cpuset = hwloc_bitmap_alloc(); result_set("Bind this singlethreaded process", hwloc_set_cpubind(topology, cpuset, flags), support->cpubind->set_thisproc_cpubind || support->cpubind->set_thisthread_cpubind); result_get("Get this singlethreaded process", cpuset, new_cpuset, hwloc_get_cpubind(topology, new_cpuset, flags), support->cpubind->get_thisproc_cpubind || support->cpubind->get_thisthread_cpubind); result_set("Bind this thread", hwloc_set_cpubind(topology, cpuset, flags | HWLOC_CPUBIND_THREAD), support->cpubind->set_thisthread_cpubind); result_get("Get this thread", cpuset, new_cpuset, hwloc_get_cpubind(topology, new_cpuset, flags | HWLOC_CPUBIND_THREAD), support->cpubind->get_thisthread_cpubind); result_set("Bind this whole process", hwloc_set_cpubind(topology, cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->set_thisproc_cpubind); result_get("Get this whole process", cpuset, new_cpuset, hwloc_get_cpubind(topology, new_cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->get_thisproc_cpubind); #ifdef HWLOC_WIN_SYS result_set("Bind process", hwloc_set_proc_cpubind(topology, GetCurrentProcess(), cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->set_proc_cpubind); result_get("Get process", cpuset, new_cpuset, hwloc_get_proc_cpubind(topology, GetCurrentProcess(), new_cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->get_proc_cpubind); result_set("Bind thread", hwloc_set_thread_cpubind(topology, GetCurrentThread(), cpuset, flags | HWLOC_CPUBIND_THREAD), support->cpubind->set_thread_cpubind); result_get("Get thread", cpuset, new_cpuset, hwloc_get_thread_cpubind(topology, GetCurrentThread(), new_cpuset, flags | HWLOC_CPUBIND_THREAD), support->cpubind->get_thread_cpubind); #else /* !HWLOC_WIN_SYS */ result_set("Bind whole process", hwloc_set_proc_cpubind(topology, getpid(), cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->set_proc_cpubind); result_get("Get whole process", cpuset, new_cpuset, hwloc_get_proc_cpubind(topology, getpid(), new_cpuset, flags | HWLOC_CPUBIND_PROCESS), support->cpubind->get_proc_cpubind); result_set("Bind process", hwloc_set_proc_cpubind(topology, getpid(), cpuset, flags), support->cpubind->set_proc_cpubind); result_get("Get process", cpuset, new_cpuset, hwloc_get_proc_cpubind(topology, getpid(), new_cpuset, flags), support->cpubind->get_proc_cpubind); #ifdef hwloc_thread_t result_set("Bind thread", hwloc_set_thread_cpubind(topology, pthread_self(), cpuset, flags), support->cpubind->set_thread_cpubind); result_get("Get thread", cpuset, new_cpuset, hwloc_get_thread_cpubind(topology, pthread_self(), new_cpuset, flags), support->cpubind->get_thread_cpubind); #endif #endif /* !HWLOC_WIN_SYS */ printf("\n"); hwloc_bitmap_free(new_cpuset); }
std::size_t thread_affinity_worker(std::size_t desired) { // Returns the OS-thread number of the worker that is running this // PX-thread. std::size_t current = hpx::get_worker_thread_num(); bool numa_sensitive = hpx::is_scheduler_numa_sensitive(); if (current == desired) { // extract the desired affinity mask hpx::threads::topology const& t = hpx::get_runtime().get_topology(); hpx::threads::mask_type desired_mask = t.get_thread_affinity_mask(current, numa_sensitive); #if defined(HPX_HAVE_HWLOC) std::size_t idx = hpx::threads::find_first(desired_mask); hwloc_topology_t topo; hwloc_topology_init(&topo); hwloc_topology_load(topo); // retrieve the current affinity mask hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); hwloc_bitmap_zero(cpuset); if (0 == hwloc_get_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD)) { // sadly get_cpubind is not implemented for Windows based systems hwloc_cpuset_t cpuset_cmp = hwloc_bitmap_alloc(); hwloc_bitmap_zero(cpuset_cmp); hwloc_bitmap_only(cpuset_cmp, unsigned(idx)); HPX_TEST(hwloc_bitmap_compare(cpuset, cpuset_cmp) == 0); hwloc_bitmap_free(cpuset_cmp); } else { HPX_TEST(false && "hwloc_get_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD) failed!"); } hwloc_bitmap_free(cpuset); hwloc_topology_destroy(topo); #endif return desired; } // This PX-thread has been run by the wrong OS-thread, make the foreman // try again by rescheduling it. return std::size_t(-1); }
inline void __pact_reuse_add(void *ary, long long start, long long end, long long mem_ac) { hwloc_bitmap_t set = hwloc_bitmap_alloc(); hwloc_get_cpubind(__pact_topo, set, HWLOC_CPUBIND_THREAD); hwloc_get_last_cpu_location(__pact_topo, set, HWLOC_CPUBIND_THREAD); hwloc_bitmap_singlify(set); hwloc_set_area_membind ( __pact_topo, (const void*)ary, abs(end-start), (hwloc_const_cpuset_t)set, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_MIGRATE ); hwloc_bitmap_free(set); }
int main(void) { hwloc_topology_t topology; struct ibv_device **dev_list, *dev; int count, i; int err; dev_list = ibv_get_device_list(&count); if (!dev_list) { fprintf(stderr, "ibv_get_device_list failed\n"); return 0; } printf("ibv_get_device_list found %d devices\n", count); hwloc_topology_init(&topology); hwloc_topology_set_type_filter(topology, HWLOC_OBJ_PCI_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_set_type_filter(topology, HWLOC_OBJ_OS_DEVICE, HWLOC_TYPE_FILTER_KEEP_IMPORTANT); hwloc_topology_load(topology); for(i=0; i<count; i++) { hwloc_bitmap_t set; dev = dev_list[i]; set = hwloc_bitmap_alloc(); err = hwloc_ibv_get_device_cpuset(topology, dev, set); if (err < 0) { printf("failed to get cpuset for device %d (%s)\n", i, ibv_get_device_name(dev)); } else { char *cpuset_string = NULL; hwloc_obj_t os; hwloc_bitmap_asprintf(&cpuset_string, set); printf("got cpuset %s for device %d (%s)\n", cpuset_string, i, ibv_get_device_name(dev)); free(cpuset_string); os = hwloc_ibv_get_device_osdev(topology, dev); if (os) { assert(os->type == HWLOC_OBJ_OS_DEVICE); printf("found OS object subtype %u lindex %u name %s\n", (unsigned) os->attr->osdev.type, os->logical_index, os->name); assert(os->attr->osdev.type == HWLOC_OBJ_OSDEV_OPENFABRICS); if (strcmp(ibv_get_device_name(dev), os->name)) assert(0); } } hwloc_bitmap_free(set); } hwloc_topology_destroy(topology); ibv_free_device_list(dev_list); return 0; }
signed getCurrentCore() { hwloc_topology_t topology = getHWTopology(); hwloc_cpuset_t cpu_set = hwloc_bitmap_alloc(); if (hwloc_get_last_cpu_location(topology, cpu_set, HWLOC_CPUBIND_THREAD) < 0) { return -1; } hwloc_obj_t current_core = hwloc_get_next_obj_covering_cpuset_by_type(topology, cpu_set, HWLOC_OBJ_CORE, NULL); hwloc_bitmap_free(cpu_set); return current_core->logical_index; }
int main(void) { #define N 10 hwloc_bitmap_t sets[N]; unsigned i,j; unsigned stats[5]; memset(stats, 0, sizeof(stats)); sets[0] = hwloc_bitmap_alloc(); sets[1] = hwloc_bitmap_alloc_full(); sets[2] = hwloc_bitmap_alloc_full(); hwloc_bitmap_clr_range(sets[2], 56, 129); sets[3] = hwloc_bitmap_alloc_full(); hwloc_bitmap_clr_range(sets[3], 0, 33); hwloc_bitmap_clr_range(sets[3], 50, 135); sets[4] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[4], 0, 24); sets[5] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[5], 0, 178); sets[6] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[6], 0, 191); hwloc_bitmap_set_range(sets[6], 1031, 2035); sets[7] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[7], 324, 456); sets[8] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[8], 323, 455); hwloc_bitmap_set_range(sets[8], 136, 177); sets[9] = hwloc_bitmap_alloc(); hwloc_bitmap_set_range(sets[9], 3, 6); for(i=0; i<N; i++) { for(j=0; j<N; j++) { int res = check_compare(sets[i], sets[j]); stats[res]++; } } for(i=0; i<N; i++) hwloc_bitmap_free(sets[i]); printf("got %u EQUAL\n", stats[0]); printf("got %u INCLUDED\n", stats[1]); printf("got %u CONTAINS\n", stats[2]); printf("got %u INTERSECTS\n", stats[3]); printf("got %u DIFFERENT\n", stats[4]); return 0; }
static int mca_sbgp_map_to_socket_core(int processor_id, int *socket, int *core) { int ret = OPAL_ERR_NOT_FOUND; hwloc_obj_t obj; hwloc_topology_t *t; hwloc_bitmap_t good; /* bozo check */ if (NULL == opal_hwloc_topology) { return OPAL_ERR_NOT_INITIALIZED; } t = &opal_hwloc_topology; good = hwloc_bitmap_alloc(); if (NULL == good) { return OPAL_ERR_OUT_OF_RESOURCE; } /* Iterate through every core and find one that contains the processor_id. Then find the corresponding socket. */ for (obj = hwloc_get_next_obj_by_type(*t, HWLOC_OBJ_CORE, NULL); NULL != obj; obj = hwloc_get_next_obj_by_type(*t, HWLOC_OBJ_CORE, obj)) { hwloc_bitmap_and(good, obj->online_cpuset, obj->allowed_cpuset); /* Does this core contain the processor_id in question? */ if (hwloc_bitmap_isset(good, processor_id)) { *core = obj->os_index; /* Go upward from the core object until we find its parent socket. */ while (HWLOC_OBJ_SOCKET != obj->type) { if (NULL == obj->parent) { /* If we get to the root without finding a socket, er.. Hmm. Error! */ ret = OPAL_ERR_NOT_FOUND; goto out; } obj = obj->parent; } *socket = obj->os_index; ret = OPAL_SUCCESS; goto out; } } /* If we didn't even find the right core, we didn't find it. Fall through. */ ret = OPAL_ERR_NOT_FOUND; out: hwloc_bitmap_free(good); return ret; }
static void hwloc_pci_forced_locality_parse_one(struct hwloc_topology *topology, const char *string /* must contain a ' ' */, unsigned *allocated) { unsigned nr = topology->pci_forced_locality_nr; unsigned domain, bus_first, bus_last, dummy; hwloc_bitmap_t set; char *tmp; if (sscanf(string, "%x:%x-%x %x", &domain, &bus_first, &bus_last, &dummy) == 4) { /* fine */ } else if (sscanf(string, "%x:%x %x", &domain, &bus_first, &dummy) == 3) { bus_last = bus_first; } else if (sscanf(string, "%x %x", &domain, &dummy) == 2) { bus_first = 0; bus_last = 255; } else return; tmp = strchr(string, ' '); if (!tmp) return; tmp++; set = hwloc_bitmap_alloc(); hwloc_bitmap_sscanf(set, tmp); if (!*allocated) { topology->pci_forced_locality = malloc(sizeof(*topology->pci_forced_locality)); if (!topology->pci_forced_locality) goto out_with_set; /* failed to allocate, ignore this forced locality */ *allocated = 1; } else if (nr >= *allocated) { struct hwloc_pci_forced_locality_s *tmp; tmp = realloc(topology->pci_forced_locality, 2 * *allocated * sizeof(*topology->pci_forced_locality)); if (!tmp) goto out_with_set; /* failed to allocate, ignore this forced locality */ topology->pci_forced_locality = tmp; *allocated *= 2; } topology->pci_forced_locality[nr].domain = domain; topology->pci_forced_locality[nr].bus_first = bus_first; topology->pci_forced_locality[nr].bus_last = bus_last; topology->pci_forced_locality[nr].cpuset = set; topology->pci_forced_locality_nr++; return; out_with_set: hwloc_bitmap_free(set); return; }
/* * Set the core where the current process will run * return the core */ void hw_set_proc_core(int core) { hwloc_cpuset_t set; core = phys_cpus[core]; set = hwloc_bitmap_alloc(); hwloc_bitmap_zero(set); hwloc_bitmap_set(set,core); hwloc_set_proc_cpubind (topology,0,set,HWLOC_CPUBIND_PROCESS); hwloc_bitmap_free(set); }
/* * Get the node where the current thread is running * return the node of the core */ int hw_my_node() { int node; hwloc_cpuset_t set; hwloc_nodeset_t nset; if (local_topo->nnodes != 0 ){ set = hwloc_bitmap_alloc(); nset = hwloc_bitmap_alloc(); hwloc_get_cpubind (topology,set,HWLOC_CPUBIND_THREAD); hwloc_cpuset_to_nodeset(topology,set,nset); node = hwloc_bitmap_first(nset); hwloc_bitmap_free(set); hwloc_bitmap_free(nset); } else node = -1; return node; }
/******************* FUNCTION *********************/ int TopoHwloc::getCurrentIdFromThreadBinding(void) const { hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); int res = -1; int weight; #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) char buffer[4096]; #endif //get current core binding //for windows use 0 instead of HWLOC_CPUBIND_THREAD int status = hwloc_get_cpubind (topology, cpuset, 0); assert(status == 0); if (status == 0) return -1; #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) status = hwloc_bitmap_list_snprintf(buffer,4096,cpuset); sprintf(stderr,"Current cores : %s\n",buffer); #endif //nodes from cores hwloc_cpuset_to_nodeset(topology,cpuset,nodeset); #if defined(SCTK_ALLOC_DEBUG) && defined(hwloc_bitmap_list_snprintf) status = hwloc_bitmap_list_snprintf(buffer,4096,nodeset); sprintf(stderr,"Current nodes from cores : %s\n",buffer); #endif //calc res weight = hwloc_bitmap_weight(nodeset); assert(weight != 0); if (weight == 1) res = getFirstBitInBitmap(nodeset); hwloc_bitmap_free(cpuset); hwloc_bitmap_free(nodeset); return res; }
/* * Set the core where the current thread will run * return the core */ void hw_set_my_core(int cpu) { hwloc_cpuset_t set; cpu = phys_cpus[cpu]; set = hwloc_bitmap_alloc(); hwloc_bitmap_zero(set); hwloc_bitmap_set(set,cpu); hwloc_set_cpubind (topology,set,HWLOC_CPUBIND_THREAD); hwloc_bitmap_free(set); }