void gomp_affinity_print_place (void *p) { unsigned long i, max = 8 * gomp_cpuset_size, len; cpu_set_t *cpusetp = (cpu_set_t *) p; bool notfirst = false; for (i = 0, len = 0; i < max; i++) if (CPU_ISSET_S (i, gomp_cpuset_size, cpusetp)) { if (len == 0) { if (notfirst) fputc (',', stderr); notfirst = true; fprintf (stderr, "%lu", i); } ++len; } else { if (len > 1) fprintf (stderr, ":%lu", len); len = 0; } if (len > 1) fprintf (stderr, ":%lu", len); }
int pthread_getaffinity_np (pthread_t thread, size_t cpusetsize, cpu_set_t *cpuset) { int ret; unsigned long i, max; if (orig_getaffinity_np == NULL) { orig_getaffinity_np = (int (*) (pthread_t, size_t, cpu_set_t *)) dlsym (RTLD_NEXT, "pthread_getaffinity_np"); if (orig_getaffinity_np == NULL) exit (0); } ret = orig_getaffinity_np (thread, cpusetsize, cpuset); if (ret != 0) return ret; if (contig_cpucount == 0) { max = 8 * cpusetsize; for (i = 0; i < max; i++) if (!CPU_ISSET_S (i, cpusetsize, cpuset)) break; contig_cpucount = i; min_cpusetsize = cpusetsize; } return ret; }
virBitmapPtr virProcessGetAffinity(pid_t pid) { size_t i; cpu_set_t *mask; size_t masklen; size_t ncpus; virBitmapPtr ret = NULL; # ifdef CPU_ALLOC /* 262144 cpus ought to be enough for anyone */ ncpus = 1024 << 8; masklen = CPU_ALLOC_SIZE(ncpus); mask = CPU_ALLOC(ncpus); if (!mask) { virReportOOMError(); return NULL; } CPU_ZERO_S(masklen, mask); # else ncpus = 1024; if (VIR_ALLOC(mask) < 0) return NULL; masklen = sizeof(*mask); CPU_ZERO(mask); # endif if (sched_getaffinity(pid, masklen, mask) < 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); goto cleanup; } if (!(ret = virBitmapNew(ncpus))) goto cleanup; for (i = 0; i < ncpus; i++) { # ifdef CPU_ALLOC /* coverity[overrun-local] */ if (CPU_ISSET_S(i, masklen, mask)) ignore_value(virBitmapSetBit(ret, i)); # else if (CPU_ISSET(i, mask)) ignore_value(virBitmapSetBit(ret, i)); # endif } cleanup: # ifdef CPU_ALLOC CPU_FREE(mask); # else VIR_FREE(mask); # endif return ret; }
void gomp_get_place_proc_ids_8 (int place_num, int64_t *ids) { if (place_num < 0 || place_num >= gomp_places_list_len) return; cpu_set_t *cpusetp = (cpu_set_t *) gomp_places_list[place_num]; unsigned long i, max = 8 * gomp_cpuset_size; for (i = 0; i < max; i++) if (CPU_ISSET_S (i, gomp_cpuset_size, cpusetp)) *ids++ = i; }
static unsigned long long int cpusettoull(cpu_set_t* bits, size_t sz) { unsigned long long mask = 0; int i; for (i = 0; i < sizeof(mask)*8; ++i) { if (CPU_ISSET_S(i, sz, bits)) { mask |= (1ull) << i; } } return mask; }
static runconfig * allocforncores(void) { const unsigned ncoresmax = 128; const unsigned cslen = CPU_ALLOC_SIZE(ncoresmax); printf("assuming no more than %u cores. set length = %u\n", ncoresmax, cslen); cpu_set_t * coreset = CPU_ALLOC(ncoresmax); if(coreset && !sched_getaffinity(getpid(), cslen, coreset)) { } else { fail("can't get current affinity"); } const int ncores = CPU_COUNT_S(cslen, coreset); if(ncores) { } else { fail("don't know how to work on 0 cores\n"); } runconfig *const cfg = malloc(sizeof(runconfig) + sizeof(unsigned) * (ncores - 1)); if(cfg) { } else { fail("can't allocate memory for config structure"); } cfg->ncores = ncores; unsigned cc = 0; // current core for(unsigned i = 0; cc < ncores; i += 1) { if(CPU_ISSET_S(i, cslen, coreset)) { cfg->corelist[cc] = i; cc += 1; } } free(coreset); return cfg; }
bool gomp_affinity_remove_cpu (void *p, unsigned long num) { cpu_set_t *cpusetp = (cpu_set_t *) p; if (num >= 8 * gomp_cpuset_size) { gomp_error ("Logical CPU number %lu out of range", num); return false; } if (!CPU_ISSET_S (num, gomp_cpuset_size, cpusetp)) { gomp_error ("Logical CPU %lu to be removed is not in the set", num); return false; } CPU_CLR_S (num, gomp_cpuset_size, cpusetp); return true; }
void _SMP_Multicast_action( const size_t setsize, const cpu_set_t *cpus, SMP_Action_handler handler, void *arg ) { SMP_Multicast_action node; Processor_mask targets; SMP_lock_Context lock_context; uint32_t i; if ( ! _System_state_Is_up( _System_state_Get() ) ) { ( *handler )( arg ); return; } if( cpus == NULL ) { _Processor_mask_Assign( &targets, _SMP_Get_online_processors() ); } else { _Processor_mask_Zero( &targets ); for ( i = 0; i < _SMP_Get_processor_count(); ++i ) { if ( CPU_ISSET_S( i, setsize, cpus ) ) { _Processor_mask_Set( &targets, i ); } } } _Chain_Initialize_node( &node.Node ); node.handler = handler; node.arg = arg; _Processor_mask_Assign( &node.targets, &targets ); _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED ); _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context ); _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node ); _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context ); _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION ); _SMP_Multicasts_try_process(); while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) { /* Wait */ }; }
int pin_cpu(pid_t pid, unsigned int cpu) { size_t size; cpu_set_t * setPtr = CPU_ALLOC(1); assert (NULL != setPtr && "cpu_set allocation failed!"); size = CPU_ALLOC_SIZE(1); CPU_ZERO_S(size, setPtr); // clear set CPU_SET_S(cpu, size, setPtr); // enable requested cpu in set assert(1 == CPU_COUNT_S(size, setPtr)); assert (CPU_ISSET_S(cpu, size, setPtr)); int ret = sched_setaffinity(pid, size, setPtr); assert (ret == 0 && "sched_setaffinity failed"); assert (cpu == sched_getcpu() && "Pinning failed"); CPU_FREE(setPtr); return ret; }
int main(int argc, char* argv[]) { int rank, size, rc; hwloc_cpuset_t cpus; char *bindings; cpu_set_t *mask; int nrcpus, c; size_t csize; char hostname[1024]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); gethostname(hostname, 1024); cpus = hwloc_bitmap_alloc(); rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS); hwloc_bitmap_list_asprintf(&bindings, cpus); printf("[%s;%d] Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n", hostname, (int)getpid(), rank, size, orte_process_info.num_local_peers, rc, (NULL == bindings) ? "NULL" : bindings); nrcpus = sysconf(_SC_NPROCESSORS_ONLN); mask = CPU_ALLOC(nrcpus); csize = CPU_ALLOC_SIZE(nrcpus); CPU_ZERO_S(csize, mask); if ( sched_getaffinity(0, csize, mask) == -1 ) { CPU_FREE(mask); perror("sched_getaffinity"); return -1; } for ( c = 0; c < nrcpus; c++ ) { if ( CPU_ISSET_S(c, csize, mask) ) { printf("[%s:%d] CPU %d is set\n", hostname, (int)getpid(), c); } } CPU_FREE(mask); MPI_Finalize(); return 0; }
bool gomp_affinity_copy_place (void *p, void *q, long stride) { unsigned long i, max = 8 * gomp_cpuset_size; cpu_set_t *destp = (cpu_set_t *) p; cpu_set_t *srcp = (cpu_set_t *) q; CPU_ZERO_S (gomp_cpuset_size, destp); for (i = 0; i < max; i++) if (CPU_ISSET_S (i, gomp_cpuset_size, srcp)) { if ((stride < 0 && i + stride > i) || (stride > 0 && (i + stride < i || i + stride >= max))) { gomp_error ("Logical CPU number %lu+%ld out of range", i, stride); return false; } CPU_SET_S (i + stride, gomp_cpuset_size, destp); } return true; }
static int find_last_cpu (const cpu_set_t *set, size_t size) { /* We need to determine the set size with CPU_COUNT_S and the cpus_found counter because there is no direct way to obtain the actual CPU set size, in bits, from the value of CPU_ALLOC_SIZE. */ size_t cpus_found = 0; size_t total_cpus = CPU_COUNT_S (size, set); int last_cpu = -1; for (int cpu = 0; cpus_found < total_cpus; ++cpu) { if (CPU_ISSET_S (cpu, size, set)) { last_cpu = cpu; ++cpus_found; } } return last_cpu; }
int boundto(int* nelements_set, int* int_mask) { cpu_set_t *mask; size_t size; int i; int nrcpus = 1024; int knt = 0; realloc: mask = CPU_ALLOC(nrcpus); size = CPU_ALLOC_SIZE(nrcpus); CPU_ZERO_S(size, mask); if ( sched_getaffinity(0, size, mask) == -1 ) { CPU_FREE(mask); if (errno == EINVAL && nrcpus < (1024 << 8)) { nrcpus = nrcpus << 2; goto realloc; } perror("sched_getaffinity"); return -1; } for ( i = 0; i < nrcpus; i++ ) { if ( CPU_ISSET_S(i, size, mask) ) { //printf("CPU %d is set\n", (i)); int_mask[i] = 1; knt++; } } *nelements_set = knt; CPU_FREE(mask); return 0; }
int domain_to_first_cpu(int domain) { cpu_set_t *bits; size_t sz; int i, n_online; int first; int ret = read_mapping(domain, "domains", &bits, &sz); if (ret) return ret; n_online = num_online_cpus(); first = -1; /* assume failure */ for (i = 0; i < n_online; ++i) { if(CPU_ISSET_S(i, sz, bits)) { first = i; break; } } CPU_FREE(bits); return first; }
int virProcessGetAffinity(pid_t pid, virBitmapPtr *map, int maxcpu) { size_t i; # ifdef CPU_ALLOC /* New method dynamically allocates cpu mask, allowing unlimted cpus */ int numcpus = 1024; size_t masklen; cpu_set_t *mask; /* Not only may the statically allocated cpu_set_t be too small, * but there is no way to ask the kernel what size is large enough. * So you have no option but to pick a size, try, catch EINVAL, * enlarge, and re-try. * * http://lkml.org/lkml/2009/7/28/620 */ realloc: masklen = CPU_ALLOC_SIZE(numcpus); mask = CPU_ALLOC(numcpus); if (!mask) { virReportOOMError(); return -1; } CPU_ZERO_S(masklen, mask); if (sched_getaffinity(pid, masklen, mask) < 0) { CPU_FREE(mask); if (errno == EINVAL && numcpus < (1024 << 8)) { /* 262144 cpus ought to be enough for anyone */ numcpus = numcpus << 2; goto realloc; } virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return -1; } *map = virBitmapNew(maxcpu); if (!*map) return -1; for (i = 0; i < maxcpu; i++) if (CPU_ISSET_S(i, masklen, mask)) ignore_value(virBitmapSetBit(*map, i)); CPU_FREE(mask); # else /* Legacy method uses a fixed size cpu mask, only allows up to 1024 cpus */ cpu_set_t mask; CPU_ZERO(&mask); if (sched_getaffinity(pid, sizeof(mask), &mask) < 0) { virReportSystemError(errno, _("cannot get CPU affinity of process %d"), pid); return -1; } for (i = 0; i < maxcpu; i++) if (CPU_ISSET(i, &mask)) ignore_value(virBitmapSetBit(*map, i)); # endif return 0; }
void gomp_init_num_threads (void) { #ifdef HAVE_PTHREAD_AFFINITY_NP #if defined (_SC_NPROCESSORS_CONF) && defined (CPU_ALLOC_SIZE) gomp_cpuset_size = sysconf (_SC_NPROCESSORS_CONF); gomp_cpuset_size = CPU_ALLOC_SIZE (gomp_cpuset_size); #else gomp_cpuset_size = sizeof (cpu_set_t); #endif gomp_cpusetp = (cpu_set_t *) gomp_malloc (gomp_cpuset_size); do { int ret = pthread_getaffinity_np (pthread_self (), gomp_cpuset_size, gomp_cpusetp); if (ret == 0) { /* Count only the CPUs this process can use. */ gomp_global_icv.nthreads_var = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp); if (gomp_global_icv.nthreads_var == 0) break; gomp_get_cpuset_size = gomp_cpuset_size; #ifdef CPU_ALLOC_SIZE unsigned long i; for (i = gomp_cpuset_size * 8; i; i--) if (CPU_ISSET_S (i - 1, gomp_cpuset_size, gomp_cpusetp)) break; gomp_cpuset_size = CPU_ALLOC_SIZE (i); #endif return; } if (ret != EINVAL) break; #ifdef CPU_ALLOC_SIZE if (gomp_cpuset_size < sizeof (cpu_set_t)) gomp_cpuset_size = sizeof (cpu_set_t); else gomp_cpuset_size = gomp_cpuset_size * 2; if (gomp_cpuset_size < 8 * sizeof (cpu_set_t)) gomp_cpusetp = (cpu_set_t *) gomp_realloc (gomp_cpusetp, gomp_cpuset_size); else { /* Avoid gomp_fatal if too large memory allocation would be requested, e.g. kernel returning EINVAL all the time. */ void *p = realloc (gomp_cpusetp, gomp_cpuset_size); if (p == NULL) break; gomp_cpusetp = (cpu_set_t *) p; } #else break; #endif } while (1); gomp_cpuset_size = 0; gomp_global_icv.nthreads_var = 1; free (gomp_cpusetp); gomp_cpusetp = NULL; #endif #if defined(__ANDROID__) gomp_global_icv.nthreads_var = sc_nprocessors_actu (); #elif defined(_SC_NPROCESSORS_ONLN) gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN); #endif }
static void test_parse_cpu_set(void) { cpu_set_t *c = NULL; int ncpus; int cpu; /* Simple range (from CPUAffinity example) */ ncpus = parse_cpu_set_and_warn("1 2", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_ISSET_S(1, CPU_ALLOC_SIZE(ncpus), c)); assert_se(CPU_ISSET_S(2, CPU_ALLOC_SIZE(ncpus), c)); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 2); c = cpu_set_mfree(c); /* A more interesting range */ ncpus = parse_cpu_set_and_warn("0 1 2 3 8 9 10 11", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8); for (cpu = 0; cpu < 4; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); for (cpu = 8; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Quoted strings */ ncpus = parse_cpu_set_and_warn("8 '9' 10 \"11\"", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 4); for (cpu = 8; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Use commas as separators */ ncpus = parse_cpu_set_and_warn("0,1,2,3 8,9,10,11", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8); for (cpu = 0; cpu < 4; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); for (cpu = 8; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Commas with spaces (and trailing comma, space) */ ncpus = parse_cpu_set_and_warn("0, 1, 2, 3, 4, 5, 6, 7, ", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8); for (cpu = 0; cpu < 8; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Ranges */ ncpus = parse_cpu_set_and_warn("0-3,8-11", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8); for (cpu = 0; cpu < 4; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); for (cpu = 8; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Ranges with trailing comma, space */ ncpus = parse_cpu_set_and_warn("0-3 8-11, ", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 8); for (cpu = 0; cpu < 4; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); for (cpu = 8; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Negative range (returns empty cpu_set) */ ncpus = parse_cpu_set_and_warn("3-0", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 0); c = cpu_set_mfree(c); /* Overlapping ranges */ ncpus = parse_cpu_set_and_warn("0-7 4-11", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 12); for (cpu = 0; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Mix ranges and individual CPUs */ ncpus = parse_cpu_set_and_warn("0,1 4-11", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus >= 1024); assert_se(CPU_COUNT_S(CPU_ALLOC_SIZE(ncpus), c) == 10); assert_se(CPU_ISSET_S(0, CPU_ALLOC_SIZE(ncpus), c)); assert_se(CPU_ISSET_S(1, CPU_ALLOC_SIZE(ncpus), c)); for (cpu = 4; cpu < 12; cpu++) assert_se(CPU_ISSET_S(cpu, CPU_ALLOC_SIZE(ncpus), c)); c = cpu_set_mfree(c); /* Garbage */ ncpus = parse_cpu_set_and_warn("0 1 2 3 garbage", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus < 0); assert_se(!c); /* Range with garbage */ ncpus = parse_cpu_set_and_warn("0-3 8-garbage", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus < 0); assert_se(!c); /* Empty string */ c = NULL; ncpus = parse_cpu_set_and_warn("", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus == 0); /* empty string returns 0 */ assert_se(!c); /* Runaway quoted string */ ncpus = parse_cpu_set_and_warn("0 1 2 3 \"4 5 6 7 ", &c, NULL, "fake", 1, "CPUAffinity"); assert_se(ncpus < 0); assert_se(!c); }
bool gomp_affinity_init_level (int level, unsigned long count, bool quiet) { unsigned long i, max = 8 * gomp_cpuset_size; if (gomp_cpusetp) { unsigned long maxcount = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp); if (count > maxcount) count = maxcount; } gomp_places_list = gomp_affinity_alloc (count, quiet); gomp_places_list_len = 0; if (gomp_places_list == NULL) return false; /* SMT (threads). */ if (level == 1) { for (i = 0; i < max && gomp_places_list_len < count; i++) if (CPU_ISSET_S (i, gomp_cpuset_size, gomp_cpusetp)) { gomp_affinity_init_place (gomp_places_list[gomp_places_list_len]); gomp_affinity_add_cpus (gomp_places_list[gomp_places_list_len], i, 1, 0, true); ++gomp_places_list_len; } return true; } else { char name[sizeof ("/sys/devices/system/cpu/cpu/topology/" "thread_siblings_list") + 3 * sizeof (unsigned long)]; size_t prefix_len = sizeof ("/sys/devices/system/cpu/cpu") - 1; cpu_set_t *copy = gomp_alloca (gomp_cpuset_size); FILE *f; char *line = NULL; size_t linelen = 0; memcpy (name, "/sys/devices/system/cpu/cpu", prefix_len); memcpy (copy, gomp_cpusetp, gomp_cpuset_size); for (i = 0; i < max && gomp_places_list_len < count; i++) if (CPU_ISSET_S (i, gomp_cpuset_size, copy)) { sprintf (name + prefix_len, "%lu/topology/%s_siblings_list", i, level == 2 ? "thread" : "core"); f = fopen (name, "r"); if (f != NULL) { if (getline (&line, &linelen, f) > 0) { char *p = line; bool seen_i = false; void *pl = gomp_places_list[gomp_places_list_len]; gomp_affinity_init_place (pl); while (*p && *p != '\n') { unsigned long first, last; errno = 0; first = strtoul (p, &p, 10); if (errno) break; last = first; if (*p == '-') { errno = 0; last = strtoul (p + 1, &p, 10); if (errno || last < first) break; } for (; first <= last; first++) if (CPU_ISSET_S (first, gomp_cpuset_size, copy) && gomp_affinity_add_cpus (pl, first, 1, 0, true)) { CPU_CLR_S (first, gomp_cpuset_size, copy); if (first == i) seen_i = true; } if (*p == ',') ++p; } if (seen_i) gomp_places_list_len++; } fclose (f); } } if (gomp_places_list_len == 0) { if (!quiet) gomp_error ("Error reading %s topology", level == 2 ? "core" : "socket"); free (gomp_places_list); gomp_places_list = NULL; return false; } return true; } return false; }
static void do_test(void) { int i; cpu_set_t *mask; int nrcpus = 1024; pid_t pid; unsigned len; #if __GLIBC_PREREQ(2, 7) realloc: mask = CPU_ALLOC(nrcpus); #else mask = malloc(sizeof(cpu_set_t)); #endif if (mask == NULL) tst_brkm(TFAIL | TTERRNO, cleanup, "fail to get enough memory"); #if __GLIBC_PREREQ(2, 7) len = CPU_ALLOC_SIZE(nrcpus); CPU_ZERO_S(len, mask); #else len = sizeof(cpu_set_t); CPU_ZERO(mask); #endif /* positive test */ TEST(sched_getaffinity(0, len, mask)); if (TEST_RETURN == -1) { CPU_FREE(mask); #if __GLIBC_PREREQ(2, 7) if (errno == EINVAL && nrcpus < (1024 << 8)) { nrcpus = nrcpus << 2; goto realloc; } #else if (errno == EINVAL) tst_resm(TFAIL, "NR_CPUS > 1024, we'd better use a " "newer glibc(>= 2.7)"); else #endif tst_resm(TFAIL | TTERRNO, "fail to get cpu affinity"); cleanup(); } else { tst_resm(TINFO, "cpusetsize is %d", len); tst_resm(TINFO, "mask.__bits[0] = %lu ", mask->__bits[0]); for (i = 0; i < num; i++) { #if __GLIBC_PREREQ(2, 7) TEST(CPU_ISSET_S(i, len, mask)); #else TEST(CPU_ISSET(i, mask)); #endif if (TEST_RETURN != -1) tst_resm(TPASS, "sched_getaffinity() succeed, " "this process %d is running " "processor: %d", getpid(), i); } } #if __GLIBC_PREREQ(2, 7) CPU_ZERO_S(len, mask); #else CPU_ZERO(mask); #endif /* negative tests */ QUICK_TEST(sched_getaffinity(0, len, (cpu_set_t *) - 1)); QUICK_TEST(sched_getaffinity(0, 0, mask)); /* * pid_t -> int -- the actual kernel limit is lower * though, but this is a negative test, not a positive * one. * * Push comes to shove, if the user doesn't have the * ability to kill(3) processes (errno = EPERM), then * set the pid to the highest possible represented * value and cross your fingers in the hope that * a) Linux somehow hasn't started allocating PIDs * this high and b) the PID = INT_MAX isn't in fact * running. */ for (pid = 2; pid < INT_MAX; pid++) { if (kill(pid, 0) == -1) { if (errno == ESRCH) break; else if (errno == EPERM) pid = INT_MAX - 1; } } QUICK_TEST(sched_getaffinity(pid, len, mask)); CPU_FREE(mask); }
int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); }
void print_affinity (struct place p) { static unsigned long size; if (size == 0) { if (min_cpusetsize) size = min_cpusetsize; else { size = sysconf (_SC_NPROCESSORS_CONF); size = CPU_ALLOC_SIZE (size); if (size < sizeof (cpu_set_t)) size = sizeof (cpu_set_t); } } cpu_set_t *cpusetp = (cpu_set_t *) alloca (size); if (pthread_getaffinity_np (pthread_self (), size, cpusetp) == 0) { unsigned long i, len, max = 8 * size; int notfirst = 0, unexpected = 1; printf (" bound to {"); for (i = 0, len = 0; i < max; i++) if (CPU_ISSET_S (i, size, cpusetp)) { if (len == 0) { if (notfirst) { unexpected = 1; printf (","); } else if (i == (unsigned long) p.start) unexpected = 0; notfirst = 1; printf ("%lu", i); } ++len; } else { if (len && len != (unsigned long) p.len) unexpected = 1; if (len > 1) printf (":%lu", len); len = 0; } if (len && len != (unsigned long) p.len) unexpected = 1; if (len > 1) printf (":%lu", len); printf ("}"); if (p.start != -1 && unexpected) { printf (", expected {%d", p.start); if (p.len != 1) printf (":%d", p.len); printf ("} instead"); } else if (p.start != -1) printf (", verified"); } }
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */) { lock(); int cpu = g_n_thread_cpu_core; if (cpu != NO_CPU) { //already reserved unlock(); return cpu; } cpu_set_t* cpu_set = NULL; cpu_set = CPU_ALLOC(MAX_CPU); if (!cpu_set) { unlock(); __log_err("failed to allocate cpu set"); return -1; } size_t cpu_set_size = CPU_ALLOC_SIZE(MAX_CPU); CPU_ZERO_S(cpu_set_size, cpu_set); if (pthread_getaffinity_np(tid, cpu_set_size, cpu_set)) { unlock(); CPU_FREE(cpu_set); __log_err("pthread_getaffinity_np failed for tid=%lu (errno=%d %m)", tid, errno); return -1; } if (CPU_COUNT_S(cpu_set_size, cpu_set) == 0) { unlock(); __log_err("no cpu available for tid=%lu", tid); CPU_FREE(cpu_set); return -1; } if (CPU_COUNT_S(cpu_set_size, cpu_set) == 1) { //already attached for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET_S(cpu, cpu_set_size, cpu_set); cpu++) {} } else { //need to choose one cpu to attach to int min_cpu_count = -1; for (int i = 0; i < MAX_CPU; i++) { if (!CPU_ISSET_S(i, cpu_set_size, cpu_set)) continue; if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) { min_cpu_count = m_cpu_thread_count[i]; cpu = i; } } if (suggested_cpu >= 0 && CPU_ISSET_S(suggested_cpu, cpu_set_size, cpu_set) && m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) { cpu = suggested_cpu; } CPU_ZERO_S(cpu_set_size, cpu_set); CPU_SET_S(cpu, cpu_set_size, cpu_set); __log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu); if (pthread_setaffinity_np(tid, cpu_set_size, cpu_set)) { unlock(); CPU_FREE(cpu_set); __log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d (errno=%d %m)", tid, cpu, errno); return -1; } } CPU_FREE(cpu_set); g_n_thread_cpu_core = cpu; m_cpu_thread_count[cpu]++; unlock(); return cpu; }