int set_cpu_affinity(pid_t pid, unsigned long new_mask) { unsigned long cur_mask; unsigned int len = sizeof(new_mask); if (sched_getaffinity(pid, len, (cpu_set_t *) &cur_mask) < 0) { perror("sched_getaffinity"); return -1; } printf("pid %d's old affinity: %08lx\n", pid, cur_mask); if (sched_setaffinity(pid, len, (cpu_set_t *) &new_mask)) { perror("sched_setaffinity"); return -1; } if (sched_getaffinity(pid, len, (cpu_set_t *) &cur_mask) < 0) { perror("sched_getaffinity"); return -1; } printf(" pid %d's new affinity: %08lx\n", pid, cur_mask); return 0; }
int main(int argc, char **argv) { int opt; bool do_suspend = true; bool succeeded = true; cpu_set_t available_cpus; int err; int cpu; ksft_print_header(); while ((opt = getopt(argc, argv, "n")) != -1) { switch (opt) { case 'n': do_suspend = false; break; default: printf("Usage: %s [-n]\n", argv[0]); printf(" -n: do not trigger a suspend/resume cycle before the test\n"); return -1; } } if (do_suspend) suspend(); err = sched_getaffinity(0, sizeof(available_cpus), &available_cpus); if (err < 0) ksft_exit_fail_msg("sched_getaffinity() failed\n"); for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { bool test_success; if (!CPU_ISSET(cpu, &available_cpus)) continue; test_success = run_test(cpu); if (test_success) { ksft_test_result_pass("CPU %d\n", cpu); } else { ksft_test_result_fail("CPU %d\n", cpu); succeeded = false; } } if (succeeded) ksft_exit_pass(); else ksft_exit_fail(); }
int affinity_processGetProcessorId() { int ret; cpu_set_t cpu_set; CPU_ZERO(&cpu_set); ret = sched_getaffinity(getpid(),sizeof(cpu_set_t), &cpu_set); if (ret < 0) { ERROR; } return getProcessorID(&cpu_set); }
/* ****************************************************************************** SUBROUTINE: set_affinity Set this process to run on the input processor number. The processor numbers start with 0 going to N-1 processors. ****************************************************************************** */ int set_affinity(int processor) { extern int sched_getaffinity(); extern int sched_setaffinity(); unsigned long new_mask; unsigned int len = sizeof(new_mask); unsigned long cur_mask; pid_t p = 0; int ret; new_mask = 1<<(processor); //printf("set_affinity: %ld\n",new_mask); ret = sched_getaffinity(p, len, &cur_mask); // printf("sched_getaffinity = %d, cur_mask = %08lx\n",ret,cur_mask); if(ret != 0) abort(); ret = sched_setaffinity(p, len, &new_mask); // printf("sched_setaffinity = %d, new_mask = %08lx\n",ret,new_mask); if(ret != 0) abort(); ret = sched_getaffinity(p, len, &cur_mask); // printf("sched_getaffinity = %d, cur_mask = %08lx\n",ret,cur_mask); if(ret != 0) abort(); if(cur_mask != new_mask) { printf("affinity did not get set! exiting\n"); exit(-1); } fflush(stdout); return 0; }
/** * \brief Get CPU affinity * * Example of usage: * \code * std::vector<bool> v (2); * getAffinity(&v); * std::cout << "Affinity: " << v[0] << " " << v[1] << std::endl; * \endcode * @param v: vector of booleans containing the current affinity (true/false) * @exception std::runtime_error in case affinity cannot be get */ void Process::getAffinity(std::vector<bool>* v) { cpu_set_t s; CPU_ZERO(&s); if ((sched_getaffinity(pid_, sizeof(s), &s) != 0)) throw std::runtime_error ("Get affinity error"); for (unsigned int j = 0; (j < CPU_SETSIZE) && (j < v->size()); ++j) { if (CPU_ISSET(j, &s)) (*v)[j] = true; else (*v)[j] = false; } }
/* * Return process CPU affinity as a Python long (the bitmask) */ static PyObject* get_process_cpu_affinity(PyObject* self, PyObject* args) { unsigned long mask; unsigned int len = sizeof(mask); long pid; if (!PyArg_ParseTuple(args, "i", &pid)) { return NULL; } if (sched_getaffinity(pid, len, (cpu_set_t *)&mask) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } return Py_BuildValue("l", mask); }
void showCurCpu(int cpuNum) { cpu_set_t get; CPU_ZERO(&get); //if (pthread_getaffinity_np(pthread_self(), sizeof(get), &get) == -1) { if (sched_getaffinity(0, sizeof(get), &get) == -1) { printf("warning: cound not get cpu affinity\n"); exit(1); } for (int i = 0; i < cpuNum; i++) { if (CPU_ISSET(i, &get)) { printf("this process %d is running processor : %d\n", getpid(), i); } } }
static void conf_sched(void) { cpu_set_t set; int res, i; CPU_ZERO(&set); res = sched_getaffinity(0, sizeof(set), &set); if (res < 0) die("sched_getaffinity"); for (i = 0; i < 256; i++) { if (!CPU_ISSET(i, &set)) continue; printf("allowed cpu: %d\n", i); } }
void *func_nonrt(void *arg) { Thread *pthr = (Thread *) arg; int rc, i, j, policy, tid = gettid(); struct sched_param schedp; cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(0, &mask); rc = sched_setaffinity(0, sizeof(mask), &mask); if (rc < 0) { printf("Thread %d: Can't set affinity: %d %s\n", tid, rc, strerror(rc)); exit(-1); } rc = sched_getaffinity(0, sizeof(mask), &mask); printf("Thread started %d on CPU %ld\n", pthr->priority, (long)mask.__bits[0]); pthread_getschedparam(pthr->pthread, &policy, &schedp); printf("Thread running %d\n", pthr->priority); while (1) { pthread_mutex_lock(&glob_mutex); printf ("Thread %d at start pthread pol %d pri %d - Got global lock\n", pthr->priority, policy, schedp.sched_priority); sleep(2); for (i = 0; i < 10000; i++) { if ((i % 100) == 0) { sched_getparam(tid, &schedp); policy = sched_getscheduler(tid); printf("Thread %d(%d) loop %d pthread pol %d " "pri %d\n", tid, pthr->priority, i, policy, schedp.sched_priority); fflush(NULL); } pthr->id++; for (j = 0; j < 5000; j++) { pthread_mutex_lock(&(pthr->mutex)); pthread_mutex_unlock(&(pthr->mutex)); } } pthread_mutex_unlock(&glob_mutex); sched_yield(); } return NULL; }
static void tegra_cache_smc(bool enable, u32 arg) { void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; bool need_affinity_switch; bool can_switch_affinity; bool l2x0_enabled; cpumask_t local_cpu_mask; cpumask_t saved_cpu_mask; unsigned long flags; long ret; /* * ISSUE : Some registers of PL310 controler must be written * from Secure context (and from CPU0)! * * When called form Normal we obtain an abort or do nothing. * Instructions that must be called in Secure: * - Write to Control register (L2X0_CTRL==0x100) * - Write in Auxiliary controler (L2X0_AUX_CTRL==0x104) * - Invalidate all entries (L2X0_INV_WAY==0x77C), * mandatory at boot time. * - Tag and Data RAM Latency Control Registers * (0x108 & 0x10C) must be written in Secure. */ need_affinity_switch = (smp_processor_id() != 0); can_switch_affinity = !irqs_disabled(); WARN_ON(need_affinity_switch && !can_switch_affinity); if (need_affinity_switch && can_switch_affinity) { cpu_set(0, local_cpu_mask); sched_getaffinity(0, &saved_cpu_mask); ret = sched_setaffinity(0, &local_cpu_mask); WARN_ON(ret != 0); } local_irq_save(flags); l2x0_enabled = readl_relaxed(p + L2X0_CTRL) & 1; if (enable && !l2x0_enabled) tegra_generic_smc(0xFFFFF100, 0x00000001, arg); else if (!enable && l2x0_enabled) tegra_generic_smc(0xFFFFF100, 0x00000002, arg); local_irq_restore(flags); if (need_affinity_switch && can_switch_affinity) { ret = sched_setaffinity(0, &saved_cpu_mask); WARN_ON(ret != 0); } }
/* * getCPUMask */ uint64 CPProcess::getCPUMask() { uint64 cpuMask = 1; #ifdef __USE_GNU sint res = sched_getaffinity(getpid(), sizeof(uint64), (cpu_set_t*)&cpuMask); if (res) { nlwarning("sched_getaffinity() returned %d, errno = %d: %s", res, errno, strerror(errno)); return 0; } #endif // __USE_GNU return cpuMask; }
int get_cpu_count() { cpu_set_t cpu_mask; CPU_ZERO(&cpu_mask); int err = sched_getaffinity(0, sizeof(cpu_set_t), &cpu_mask); if (err) { LOG_ERROR << "sched_getaffinity failed\n"; exit(1); } int count = CPU_COUNT(&cpu_mask); printf("%d\n", count); return count; }
int get_cpunum() { int num = 0; #ifdef __linux__ cpu_set_t cpuset; CPU_ZERO(&cpuset); sched_getaffinity(0, sizeof(cpuset), &cpuset); for (int i = 0; i < 32; i++) { if (CPU_ISSET(i, &cpuset)) num++; } #endif return num; }
int32 FLinuxMisc::NumberOfCores() { cpu_set_t AvailableCpusMask; CPU_ZERO(&AvailableCpusMask); if (0 != sched_getaffinity(0, sizeof(AvailableCpusMask), &AvailableCpusMask)) { return 1; // we are running on something, right? } char FileNameBuffer[1024]; unsigned char PossibleCores[CPU_SETSIZE] = { 0 }; for(int32 CpuIdx = 0; CpuIdx < CPU_SETSIZE; ++CpuIdx) { if (CPU_ISSET(CpuIdx, &AvailableCpusMask)) { sprintf(FileNameBuffer, "/sys/devices/system/cpu/cpu%d/topology/core_id", CpuIdx); FILE* CoreIdFile = fopen(FileNameBuffer, "r"); unsigned int CoreId = 0; if (CoreIdFile) { if (1 != fscanf(CoreIdFile, "%d", &CoreId)) { CoreId = 0; } fclose(CoreIdFile); } if (CoreId >= ARRAY_COUNT(PossibleCores)) { CoreId = 0; } PossibleCores[ CoreId ] = 1; } } int32 NumCoreIds = 0; for(int32 Idx = 0; Idx < ARRAY_COUNT(PossibleCores); ++Idx) { NumCoreIds += PossibleCores[Idx]; } return NumCoreIds; }
//----------------------------------------------------------------------------- // Return CPU affinity in a form suitable for messages. Single CPU affinity // returns a non-negative integer CPU number. Multi CPU affinity returns the // negative of the bit mask of affine CPUs. Affinity to no CPUs returns -1. //---------------------------------------------------------------------------- int32_t cwGetCPUaffinity(void) { int numCPU = sysconf( _SC_NPROCESSORS_CONF ); cpu_set_t af; int32_t i, afmask = 0, afcount = 0, afCPU=-1; sched_getaffinity(0, sizeof(af), &af); for (i=0; i<numCPU; ++i) { if (CPU_ISSET(i, &af)) { afCPU = i; afmask |= (1 << i); afcount++; } } if (afcount <= 1) return afCPU; else return -afmask; }
int processor_get_num(void){ int number=0; cpu_set_t cpus; // Returns number of processors available to process (based on affinity mask) if( sched_getaffinity(0, sizeof(cpus), (cpu_set_t*) &cpus) < 0) { number = -1; CPU_ZERO( &cpus ); } for (unsigned i = 0; i < sizeof(cpus)*8; i++) { if( CPU_ISSET( i, &cpus )) { number++; } } return number; }
static runconfig * allocforncores(void) { const unsigned ncoresmax = 128; const unsigned cslen = CPU_ALLOC_SIZE(ncoresmax); printf("assuming no more than %u cores. set length = %u\n", ncoresmax, cslen); cpu_set_t * coreset = CPU_ALLOC(ncoresmax); if(coreset && !sched_getaffinity(getpid(), cslen, coreset)) { } else { fail("can't get current affinity"); } const int ncores = CPU_COUNT_S(cslen, coreset); if(ncores) { } else { fail("don't know how to work on 0 cores\n"); } runconfig *const cfg = malloc(sizeof(runconfig) + sizeof(unsigned) * (ncores - 1)); if(cfg) { } else { fail("can't allocate memory for config structure"); } cfg->ncores = ncores; unsigned cc = 0; // current core for(unsigned i = 0; cc < ncores; i += 1) { if(CPU_ISSET_S(i, cslen, coreset)) { cfg->corelist[cc] = i; cc += 1; } } free(coreset); return cfg; }
void *start_task(void *data) { struct thread *thr = (struct thread *)data; long id = (long) thr->arg; thread_pids[id] = gettid(); unsigned long long start_time; int ret; int high = 0; cpu_set_t cpumask; cpu_set_t save_cpumask; int cpu = 0; unsigned long l; long pid; ret = sched_getaffinity(0, sizeof(save_cpumask), &save_cpumask); if (ret < 0) debug(DBG_ERR, "sched_getaffinity failed: %s\n", strerror(ret)); pid = gettid(); /* Check if we are the highest prio task */ if (id == nr_tasks-1) high = 1; while (!done) { if (high) { /* rotate around the CPUS */ if (!CPU_ISSET(cpu, &save_cpumask)) cpu = 0; CPU_ZERO(&cpumask); CPU_SET(cpu, &cpumask); cpu++; sched_setaffinity(0, sizeof(cpumask), &cpumask); } pthread_barrier_wait(&start_barrier); start_time = rt_gettime(); ftrace_write("Thread %d: started %lld diff %lld\n", pid, start_time, start_time - now); l = busy_loop(start_time); record_time(id, start_time / NS_PER_US, l); pthread_barrier_wait(&end_barrier); } return (void *)pid; }
static int do_test (void) { cpu_set_t cs; if (sched_getaffinity (getpid (), sizeof (cs), &cs) != 0) { printf ("getaffinity failed: %m\n"); return 1; } int result = 0; int cpu = 0; while (CPU_COUNT (&cs) != 0) { if (CPU_ISSET (cpu, &cs)) { cpu_set_t cs2; CPU_ZERO (&cs2); CPU_SET (cpu, &cs2); if (sched_setaffinity (getpid (), sizeof (cs2), &cs2) != 0) { printf ("setaffinity(%d) failed: %m\n", cpu); result = 1; } else { int cpu2 = sched_getcpu (); if (cpu2 == -1 && errno == ENOSYS) { puts ("getcpu syscall not implemented"); return 0; } if (cpu2 != cpu) { printf ("getcpu results %d not possible\n", cpu2); result = 1; } } CPU_CLR (cpu, &cs); } ++cpu; } return result; }
// get the number of CPUs available to the current process boost::optional<unsigned long> ProcessInfo::getNumAvailableCores() { cpu_set_t set; if (sched_getaffinity(0, sizeof(cpu_set_t), &set) == 0) { #ifdef CPU_COUNT // glibc >= 2.6 has CPU_COUNT defined return CPU_COUNT(&set); #else unsigned long count = 0; for (size_t i = 0; i < CPU_SETSIZE; i++) if (CPU_ISSET(i, &set)) count++; if (count > 0) return count; #endif } return boost::none; }
MachineInfo() { m_cpu_num = sysconf(_SC_NPROCESSORS_CONF); m_cpu_frequencies = get_cpu_frequency_from_file("/proc/cpuinfo", m_cpu_num); if (m_cpu_frequencies) return; m_cpu_frequencies = new int64[m_cpu_num]; for (int i = 0; i < m_cpu_num; i++) { cpu_set_t prev_mask; sched_getaffinity(0, sizeof(cpu_set_t), &prev_mask); BindToCPU(i); // Make sure the current process gets scheduled to the target cpu. This // might not be necessary though. usleep(0); m_cpu_frequencies[i] = get_cpu_frequency(); sched_setaffinity(0, sizeof(cpu_set_t), &prev_mask); } }
/* * Class: xerial_jnuma_NumaNative * Method: getAffinity * Signature: (I[JI)V */ JNIEXPORT void JNICALL Java_xerial_jnuma_NumaNative_getAffinity (JNIEnv *env, jobject obj, jint pid, jlongArray maskBuf, jint numCPUs) { uint64_t* in = (uint64_t*) (*env)->GetPrimitiveArrayCritical(env, (jarray) maskBuf, 0); cpu_set_t mask; if (in == 0) { throwException(env, obj, 10); } CPU_ZERO(&mask); const int ret = sched_getaffinity(0, sizeof(mask), &mask); if (ret < 0) { throwException(env, obj, ret); } for (int i = 0; i < numCPUs; ++i) { if (CPU_ISSET(i, &mask)) in[i / 64] |= (uint64_t) (((uint64_t) 1) << (i % 64)); } (*env)->ReleasePrimitiveArrayCritical(env, (jarray) maskBuf, (void*) in, (jint) 0); }
static PyObject * get_process_affinity_mask(PyObject *self, PyObject *args) { unsigned long cur_mask; unsigned int len = sizeof(cur_mask); pid_t pid; if (!PyArg_ParseTuple(args, "i:get_process_affinity_mask", &pid)) return NULL; if (sched_getaffinity(pid, len, (cpu_set_t *)&cur_mask) < 0) { PyErr_SetFromErrno(PyExc_ValueError); return NULL; } return Py_BuildValue("l", cur_mask); }
int proc_get_cpuid (void) { #ifdef _LINUX_ int i, ret; cpu_set_t cpu_set; ret = sched_getaffinity (0, sizeof (cpu_set), &cpu_set); if (ret < 0) return -1; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET (i, &cpu_set)) break; } return i; #elif defined (_SOLARIS_) return getcpuid (); #endif }
int main(int argc, char* argv[]) { int rank, size, rc; hwloc_cpuset_t cpus; char *bindings; cpu_set_t *mask; int nrcpus, c; size_t csize; char hostname[1024]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); gethostname(hostname, 1024); cpus = hwloc_bitmap_alloc(); rc = hwloc_get_cpubind(opal_hwloc_topology, cpus, HWLOC_CPUBIND_PROCESS); hwloc_bitmap_list_asprintf(&bindings, cpus); printf("[%s;%d] Hello, World, I am %d of %d [%d local peers]: get_cpubind: %d bitmap %s\n", hostname, (int)getpid(), rank, size, orte_process_info.num_local_peers, rc, (NULL == bindings) ? "NULL" : bindings); nrcpus = sysconf(_SC_NPROCESSORS_ONLN); mask = CPU_ALLOC(nrcpus); csize = CPU_ALLOC_SIZE(nrcpus); CPU_ZERO_S(csize, mask); if ( sched_getaffinity(0, csize, mask) == -1 ) { CPU_FREE(mask); perror("sched_getaffinity"); return -1; } for ( c = 0; c < nrcpus; c++ ) { if ( CPU_ISSET_S(c, csize, mask) ) { printf("[%s:%d] CPU %d is set\n", hostname, (int)getpid(), c); } } CPU_FREE(mask); MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { int rank, thread; cpu_set_t coremask; int niter = 10000000; //number of iterations per FOR loop double x,y; //x,y value for the random coordinate int i; //loop counter int count=0; //Count holds all the number of how many good coordinates double z; //Used to check if x^2+y^2<=1 char clbuf[7 * CPU_SETSIZE], hnbuf[64]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); srand(SEED); memset(clbuf, 0, sizeof(clbuf)); memset(hnbuf, 0, sizeof(hnbuf)); (void)gethostname(hnbuf, sizeof(hnbuf)); #pragma omp parallel firstprivate(x, y, z, i) private(thread, coremask, clbuf) { thread = omp_get_thread_num(); clock_t t; t= clock(); /* Borrowed from https://www.olcf.ornl.gov/tutorials/monte-carlo-pi/ By Jake Wynn */ //Let's do some work generating random nubmers to see if core affinity impacts execution time. if((rank==0)||(rank==1)) // put this labor on ranks 0 and 1. { srandom((int)time(NULL) ^ omp_get_thread_num()); //Give random() a seed value for (i=0; i<niter; ++i) //main loop { x = (double)random()/1989.98; //gets a random x coordinate y = (double)random()/1974.9171; //gets a random y coordinate z = ((x*x)+(y*y)); //Checks to see if number is inside unit circle ++count; //if it is, consider it a valid random point } } (void)sched_getaffinity(0, sizeof(coremask), &coremask); cpuset_to_cstr(&coremask, clbuf); #pragma omp barrier t = clock() - t; printf("Rank %d, thread %d, on %s. core = %s,(%f seconds).\n", rank, thread, hnbuf, clbuf,((float)t)/CLOCKS_PER_SEC); } MPI_Finalize(); return(0); }
int x264_cpu_num_processors( void ) { #if !HAVE_THREAD return 1; #elif defined(_WIN32) static int np = 0; if (!np) np = x264_pthread_num_processors_np(); return np; #elif SYS_LINUX unsigned int bit; int np; cpu_set_t p_aff; memset( &p_aff, 0, sizeof(p_aff) ); sched_getaffinity( 0, sizeof(p_aff), &p_aff ); for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ ) np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1; return np; #elif SYS_BEOS system_info info; get_system_info( &info ); return info.cpu_count; #elif SYS_MACOSX || SYS_FREEBSD || SYS_OPENBSD int ncpu; size_t length = sizeof( ncpu ); #if SYS_OPENBSD int mib[2] = { CTL_HW, HW_NCPU }; if( sysctl(mib, 2, &ncpu, &length, NULL, 0) ) #else if( sysctlbyname("hw.ncpu", &ncpu, &length, NULL, 0) ) #endif { ncpu = 1; } return ncpu; #else return 1; #endif }
static void dump_shed_common() { uint64_t affinity = 0; const char *sched_str; int prio, sched, i; cpu_set_t cpu_set; /* FIXME: This does not work when having more than 64 CPUs */ sched_getaffinity(0, sizeof(cpu_set), &cpu_set); for (i = 0; i < 64; i++) { affinity |= (CPU_ISSET(i, &cpu_set) != 0) << i; } sched = sched_getscheduler(0); switch (sched) { case SCHED_OTHER: sched_str = "SCHED_OTHER"; break; case SCHED_BATCH: sched_str = "SCHED_BATCH"; break; case SCHED_IDLE: sched_str = "SCHED_IDLE"; break; case SCHED_FIFO: sched_str = "SCHED_FIFO"; break; case SCHED_RR: sched_str = "SCHED_RR"; break; case -1: sched_str = "ERROR"; break; default: sched_str = "UNKNOWN"; break; } prio = getpriority(PRIO_PROCESS, 0); fprintf(env_file, "SCHED,%s,%i,%i,%"PRIu64",%i\n", sched_str, get_nprocs_conf(), get_nprocs(), affinity, prio); }
static void setup(void) { tst_require_root(NULL); uid = geteuid(); ncpus = tst_ncpus_max(); /* Current mask */ mask = CPU_ALLOC(ncpus); if (mask == NULL) tst_brkm(TBROK | TERRNO, cleanup, "CPU_ALLOC(%ld) failed", ncpus); mask_size = CPU_ALLOC_SIZE(ncpus); if (sched_getaffinity(0, mask_size, mask) < 0) tst_brkm(TBROK | TERRNO, cleanup, "sched_getaffinity() failed"); /* Mask with one more cpu than available on the system */ emask = CPU_ALLOC(ncpus + 1); if (emask == NULL) tst_brkm(TBROK | TERRNO, cleanup, "CPU_ALLOC(%ld) failed", ncpus + 1); emask_size = CPU_ALLOC_SIZE(ncpus + 1); CPU_ZERO_S(emask_size, emask); CPU_SET_S(ncpus, emask_size, emask); privileged_pid = tst_fork(); if (privileged_pid == 0) { pause(); exit(0); } else if (privileged_pid < 0) { tst_brkm(TBROK | TERRNO, cleanup, "fork() failed"); } /* Dropping the root privileges */ ltpuser = getpwnam(nobody_uid); if (ltpuser == NULL) tst_brkm(TBROK | TERRNO, cleanup, "getpwnam failed for user id %s", nobody_uid); SAFE_SETEUID(cleanup, ltpuser->pw_uid); /* this pid is not used by the OS */ free_pid = tst_get_unused_pid(cleanup); }
char *get_cpu_affinity(char *cpu_string, size_t len) { int ret, i, cpu; cpu_set_t cpu_bitmask; if (len != get_number_cpus() + 1) return NULL; CPU_ZERO(&cpu_bitmask); ret = sched_getaffinity(getpid(), sizeof(cpu_bitmask), &cpu_bitmask); if (ret) { whine("Can't fetch cpu affinity!\n"); return NULL; } for (i = 0, cpu_string[len - 1] = 0; i < len - 1; ++i) { cpu = CPU_ISSET(i, &cpu_bitmask); cpu_string[i] = (cpu ? '1' : '0'); } return cpu_string; }