static int hwloc_aix_prepare_membind(hwloc_topology_t topology, rsethandle_t *rad, hwloc_const_nodeset_t nodeset, int flags __hwloc_attribute_unused) { rsethandle_t rset, noderad; int MCMlevel; int node; MCMlevel = rs_getinfo(NULL, R_MCMSDL, 0); if ((topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) rset = rs_alloc(RS_ALL); else rset = rs_alloc(RS_PARTITION); *rad = rs_alloc(RS_EMPTY); noderad = rs_alloc(RS_EMPTY); hwloc_bitmap_foreach_begin(node, nodeset) rs_getrad(rset, noderad, MCMlevel, node, 0); rs_op(RS_UNION, noderad, *rad, 0, 0); hwloc_bitmap_foreach_end(); rs_free(rset); rs_free(noderad); return 0; }
static int hwloc_aix_set_sth_cpubind(hwloc_topology_t topology, rstype_t what, rsid_t who, hwloc_const_bitmap_t hwloc_set, int flags __hwloc_attribute_unused) { rsethandle_t rad; int res; unsigned cpu; if (flags & HWLOC_CPUBIND_NOMEMBIND) { errno = ENOSYS; return -1; } /* The resulting binding is always strict */ if (hwloc_bitmap_isequal(hwloc_set, hwloc_topology_get_complete_cpuset(topology))) { if (ra_detachrset(what, who, 0)) return -1; return 0; } rad = rs_alloc(RS_EMPTY); hwloc_bitmap_foreach_begin(cpu, hwloc_set) rs_op(RS_ADDRESOURCE, rad, NULL, R_PROCS, cpu); hwloc_bitmap_foreach_end(); res = ra_attachrset(what, who, rad, 0); rs_free(rad); return res; }
static int prepare_radset(hwloc_topology_t topology __hwloc_attribute_unused, radset_t *radset, hwloc_const_bitmap_t hwloc_set) { unsigned cpu; cpuset_t target_cpuset; cpuset_t cpuset, xor_cpuset; radid_t radid; int ret = 0; int ret_errno = 0; int nbnodes = rad_get_num(); cpusetcreate(&target_cpuset); cpuemptyset(target_cpuset); hwloc_bitmap_foreach_begin(cpu, hwloc_set) cpuaddset(target_cpuset, cpu); hwloc_bitmap_foreach_end(); cpusetcreate(&cpuset); cpusetcreate(&xor_cpuset); for (radid = 0; radid < nbnodes; radid++) { cpuemptyset(cpuset); if (rad_get_cpus(radid, cpuset)==-1) { fprintf(stderr,"rad_get_cpus(%d) failed: %s\n",radid,strerror(errno)); continue; } cpuxorset(target_cpuset, cpuset, xor_cpuset); if (cpucountset(xor_cpuset) == 0) { /* Found it */ radsetcreate(radset); rademptyset(*radset); radaddset(*radset, radid); ret = 1; goto out; } } /* radset containing exactly this set of CPUs not found */ ret_errno = EXDEV; out: cpusetdestroy(&target_cpuset); cpusetdestroy(&cpuset); cpusetdestroy(&xor_cpuset); errno = ret_errno; return ret; }
static int hwloc_aix_set_sth_cpubind(hwloc_topology_t topology, rstype_t what, rsid_t who, pid_t pid, hwloc_const_bitmap_t hwloc_set, int flags __hwloc_attribute_unused) { rsethandle_t rad; int res; unsigned cpu; if (flags & HWLOC_CPUBIND_NOMEMBIND) { errno = ENOSYS; return -1; } /* The resulting binding is always strict */ if (hwloc_bitmap_isequal(hwloc_set, hwloc_topology_get_complete_cpuset(topology))) { if (ra_detachrset(what, who, 0)) return -1; return 0; } rad = rs_alloc(RS_EMPTY); hwloc_bitmap_foreach_begin(cpu, hwloc_set) rs_op(RS_ADDRESOURCE, rad, NULL, R_PROCS, cpu); hwloc_bitmap_foreach_end(); res = ra_attachrset(what, who, rad, 0); if (res < 0 && errno == EPERM) { /* EPERM may mean that one thread has ben bound with bindprocessor(). * Unbind the entire process (we can't unbind individual threads) * and try again. */ bindprocessor(BINDPROCESS, pid, PROCESSOR_CLASS_ANY); res = ra_attachrset(what, who, rad, 0); } rs_free(rad); return res; }
static int hwloc_osf_prepare_mattr(hwloc_topology_t topology __hwloc_attribute_unused, memalloc_attr_t *mattr, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags __hwloc_attribute_unused) { unsigned long osf_policy; int node; switch (policy) { case HWLOC_MEMBIND_FIRSTTOUCH: osf_policy = MPOL_THREAD; break; case HWLOC_MEMBIND_DEFAULT: case HWLOC_MEMBIND_BIND: osf_policy = MPOL_DIRECTED; break; case HWLOC_MEMBIND_INTERLEAVE: osf_policy = MPOL_STRIPPED; break; case HWLOC_MEMBIND_REPLICATE: osf_policy = MPOL_REPLICATED; break; default: errno = ENOSYS; return -1; } memset(mattr, 0, sizeof(*mattr)); mattr->mattr_policy = osf_policy; mattr->mattr_rad = RAD_NONE; radsetcreate(&mattr->mattr_radset); rademptyset(mattr->mattr_radset); hwloc_bitmap_foreach_begin(node, nodeset) radaddset(mattr->mattr_radset, node); hwloc_bitmap_foreach_end(); return 0; }
static int hwloc_aix_set_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_const_bitmap_t hwloc_set, int flags) { struct __pthrdsinfo info; int size; if ((errno = pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size))) return -1; { rsid_t who = { .at_tid = info.__pi_tid }; return hwloc_aix_set_sth_cpubind(topology, R_THREAD, who, hwloc_set, flags); } } static int hwloc_aix_get_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t pthread, hwloc_bitmap_t hwloc_set, int flags) { struct __pthrdsinfo info; int size; if (pthread_getthrds_np(&pthread, PTHRDSINFO_QUERY_TID, &info, sizeof(info), NULL, &size)) return -1; { rsid_t who; who.at_tid = info.__pi_tid; return hwloc_aix_get_sth_cpubind(topology, R_THREAD, who, hwloc_set, flags); } } #endif /* HWLOC_HAVE_PTHREAD_GETTHRDS_NP */ #endif /* R_THREAD */ #ifdef P_DEFAULT static int hwloc_aix_membind_policy_from_hwloc(uint_t *aix_policy, int policy) { switch (policy) { case HWLOC_MEMBIND_DEFAULT: case HWLOC_MEMBIND_BIND: *aix_policy = P_DEFAULT; break; case HWLOC_MEMBIND_FIRSTTOUCH: *aix_policy = P_FIRST_TOUCH; break; case HWLOC_MEMBIND_INTERLEAVE: *aix_policy = P_BALANCED; break; default: errno = ENOSYS; return -1; } return 0; } static int hwloc_aix_prepare_membind(hwloc_topology_t topology, rsethandle_t *rad, hwloc_const_nodeset_t nodeset, int flags __hwloc_attribute_unused) { rsethandle_t rset, noderad; int MCMlevel; int node; MCMlevel = rs_getinfo(NULL, R_MCMSDL, 0); if ((topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) rset = rs_alloc(RS_ALL); else rset = rs_alloc(RS_PARTITION); *rad = rs_alloc(RS_EMPTY); noderad = rs_alloc(RS_EMPTY); hwloc_bitmap_foreach_begin(node, nodeset) rs_getrad(rset, noderad, MCMlevel, node, 0); rs_op(RS_UNION, noderad, *rad, 0, 0); hwloc_bitmap_foreach_end(); rs_free(rset); rs_free(noderad); return 0; }