int main() { unsigned int cpu; int result; cpu_set_t newmask; cpu_set_t mask; cpu_set_t switchmask; cpu_set_t flipmask; CPU_ZERO(&mask); CPU_ZERO(&switchmask); CPU_ZERO(&flipmask); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &switchmask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++) { CPU_SET(cpu, &flipmask); /* 0b11111111111111111111111111111111 */ } assert(sched_getaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); result = sched_setaffinity(0, sizeof(cpu_set_t), &newmask); if (result != 0) { int err = #if defined (__PTW32_USES_SEPARATE_CRT) GetLastError(); #else errno; #endif assert(err != ESRCH); assert(err != EFAULT); assert(err != EPERM); assert(err != EINVAL); assert(err != EAGAIN); assert(err == ENOSYS); assert(CPU_COUNT(&mask) == 1); } else { if (CPU_COUNT(&mask) > 1) { CPU_AND(&newmask, &mask, &switchmask); /* Remove every other CPU */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); CPU_XOR(&newmask, &mask, &flipmask); /* Switch to all alternative CPUs */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); } } return 0; }
int test_affinity5(void) #endif { unsigned int cpu; pthread_t tid; cpu_set_t threadCpus; DWORD_PTR vThreadMask; cpu_set_t keepCpus; pthread_t self = pthread_self(); CPU_ZERO(&keepCpus); for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &keepCpus); /* 0b10101010101010101010101010101010 */ } assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0); if (CPU_COUNT(&threadCpus) > 1) { assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0); assert(pthread_join(tid, NULL) == 0); CPU_AND(&threadCpus, &threadCpus, &keepCpus); assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &threadCpus) == 0); vThreadMask = SetThreadAffinityMask(GetCurrentThread(), (*(PDWORD_PTR)&threadCpus) /* Violating Opacity */); assert(vThreadMask != 0); assert(memcmp(&vThreadMask, &threadCpus, sizeof(DWORD_PTR)) == 0); assert(pthread_create(&tid, NULL, mythread, (void*)&threadCpus) == 0); assert(pthread_join(tid, NULL) == 0); } return 0; }
void ThreadSetDefault() { if (numthreads == -1) { cpu_set_t cs; CPU_ZERO(&cs); if (sched_getaffinity(0, sizeof(cs), &cs) != 0) { numthreads = 1; return; } int count = 0; for (int i = 0; i < CPU_COUNT(&cs); i++) { if (CPU_ISSET(i, &cs)) count++; } if (count < 1) numthreads = 1; else numthreads = count; } }
WELS_THREAD_ERROR_CODE WelsQueryLogicalProcessInfo (WelsLogicalProcessInfo* pInfo) { #ifdef ANDROID_NDK pInfo->ProcessorCount = android_getCpuCount(); return WELS_THREAD_ERROR_OK; #elif defined(LINUX) cpu_set_t cpuset; CPU_ZERO (&cpuset); if (!sched_getaffinity (0, sizeof (cpuset), &cpuset)) pInfo->ProcessorCount = CPU_COUNT (&cpuset); else pInfo->ProcessorCount = 1; return WELS_THREAD_ERROR_OK; #else size_t len = sizeof (pInfo->ProcessorCount); if (sysctlbyname (HW_NCPU_NAME, &pInfo->ProcessorCount, &len, NULL, 0) == -1) pInfo->ProcessorCount = 1; return WELS_THREAD_ERROR_OK; #endif//LINUX }
unsigned long gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp) { #ifdef CPU_COUNT_S /* glibc 2.7 and above provide a macro for this. */ return CPU_COUNT_S (cpusetsize, cpusetp); #else #ifdef CPU_COUNT if (cpusetsize == sizeof (cpu_set_t)) /* glibc 2.6 and above provide a macro for this. */ return CPU_COUNT (cpusetp); #endif size_t i; unsigned long ret = 0; extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int) ? 1 : -1] __attribute__((unused)); for (i = 0; i < cpusetsize / sizeof (cpusetp->__bits[0]); i++) { unsigned long int mask = cpusetp->__bits[i]; if (mask == 0) continue; ret += __builtin_popcountl (mask); } return ret; #endif }
static int get_logical_cpus(AVCodecContext *avctx) { int ret, nb_cpus = 1; #if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT) cpu_set_t cpuset; CPU_ZERO(&cpuset); ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); if (!ret) { nb_cpus = CPU_COUNT(&cpuset); } #elif HAVE_GETPROCESSAFFINITYMASK DWORD_PTR proc_aff, sys_aff; ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff); if (ret) nb_cpus = av_popcount64(proc_aff); #elif HAVE_SYSCTL && defined(HW_NCPU) int mib[2] = { CTL_HW, HW_NCPU }; size_t len = sizeof(nb_cpus); ret = sysctl(mib, 2, &nb_cpus, &len, NULL, 0); if (ret == -1) nb_cpus = 0; #elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN) nb_cpus = sysconf(_SC_NPROC_ONLN); #elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) nb_cpus = sysconf(_SC_NPROCESSORS_ONLN); #endif av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); return nb_cpus; }
boost::uint32_t GetAffinity() { #if defined(__APPLE__) || defined(__FreeBSD__) // no-op return 0; #elif defined(WIN32) DWORD_PTR curMask; DWORD_PTR systemCpus; GetProcessAffinityMask(GetCurrentProcess(), &curMask, &systemCpus); return curMask; #else cpu_set_t curAffinity; CPU_ZERO(&curAffinity); sched_getaffinity(0, sizeof(cpu_set_t), &curAffinity); boost::uint32_t mask = 0; int numCpus = std::min(CPU_COUNT(&curAffinity), 32); // w/o the min(.., 32) `(1 << n)` could overflow! for (int n = numCpus - 1; n >= 0; --n) { if (CPU_ISSET(n, &curAffinity)) { mask |= (1 << n); } } return mask; #endif }
/** * This is the actual function that the thread executes. * It receives a block from the scheduler, class its run() method and returns it to the scheduler. * The thread runs until the it is told to stop by setting the m_stop boolean flag */ void operator()() { if(CPU_COUNT(&m_mask)>0) { pthread_t id = pthread_self(); int ret = pthread_setaffinity_np(id, sizeof(m_mask), &m_mask); if(ret != 0) { perror("setaffinity"); throw(std::runtime_error("set affinity failed")); } } while(!m_stop.load()) { std::shared_ptr<Block>torun(m_scheduler.next_task(m_id)); if(torun) { torun->run(); m_scheduler.task_done(m_id, std::move(torun)); } else { std::this_thread::sleep_for(std::chrono::milliseconds(10)); } } m_stop.store(false); }
int cpu_manager::reserve_cpu_for_thread(pthread_t tid, int suggested_cpu /* = NO_CPU */) { lock(); int cpu = g_n_thread_cpu_core; if (cpu != NO_CPU) { //already reserved unlock(); return cpu; } cpu_set_t cpu_set; CPU_ZERO(&cpu_set); int ret = pthread_getaffinity_np(tid, sizeof(cpu_set_t), &cpu_set); if (ret) { unlock(); __log_err("pthread_getaffinity_np failed for tid=%lu, ret=%d (errno=%d %m)", tid, ret, errno); return -1; } int avail_cpus = CPU_COUNT(&cpu_set); if (avail_cpus == 0) { unlock(); __log_err("no cpu available for tid=%lu", tid); return -1; } if (avail_cpus == 1) { //already attached for (cpu = 0; cpu < MAX_CPU && !CPU_ISSET(cpu, &cpu_set); cpu++) {} } else { //need to choose one cpu to attach to int min_cpu_count = -1; for (int i = 0, j = 0; i < MAX_CPU && j < avail_cpus; i++) { if (!CPU_ISSET(i, &cpu_set)) continue; j++; if (min_cpu_count < 0 || m_cpu_thread_count[i] < min_cpu_count) { min_cpu_count = m_cpu_thread_count[i]; cpu = i; } } if (suggested_cpu >= 0 && CPU_ISSET(suggested_cpu, &cpu_set) && m_cpu_thread_count[suggested_cpu] <= min_cpu_count + 1 ) { cpu = suggested_cpu; } CPU_ZERO(&cpu_set); CPU_SET(cpu, &cpu_set); __log_dbg("attach tid=%lu running on cpu=%d to cpu=%d", tid, sched_getcpu(), cpu); ret = pthread_setaffinity_np(tid, sizeof(cpu_set_t), &cpu_set); if (ret) { unlock(); __log_err("pthread_setaffinity_np failed for tid=%lu to cpu=%d, ret=%d (errno=%d %m)", tid, cpu, ret, errno); return -1; } } g_n_thread_cpu_core = cpu; if (cpu > NO_CPU && cpu < MAX_CPU) m_cpu_thread_count[cpu]++; unlock(); return cpu; }
unsigned get_cpus_online(void) { #ifdef _WIN32 SYSTEM_INFO info; GetSystemInfo(&info); if((int)info.dwNumberOfProcessors > 0) return (unsigned)info.dwNumberOfProcessors; #elif defined(hpux) || defined(__hpux) || defined(_hpux) struct pst_dynamic psd; if(pstat_getdynamic(&psd, sizeof(psd), (size_t)1, 0) == 1) return (unsigned)psd.psd_proc_cnt; #endif /* CPU_COUNT is a gnu extention, avail after glibc2.6 */ #if HAVE_PTHREAD_SETAFFINITY_NP-0 == 1 && defined CPU_COUNT { /* * ask the affinity stuff, we may be confined to a smaller * set of CPUs then are avail in HW */ cpu_set_t cst; int res = pthread_getaffinity_np(pthread_self(), sizeof(cst), &cst); if(0 == res) return CPU_COUNT(&cst); } #endif #ifdef _SC_NPROCESSORS_ONLN { long ncpus; if((ncpus = (long)sysconf(_SC_NPROCESSORS_ONLN)) > 0) return (unsigned)ncpus; } #elif defined HAVE_SYS_SYSCTL_H { int ncpus; size_t len = sizeof(ncpus); # ifdef HAVE_SYSCTLBYNAME /* try the online-cpu-thingy first (only osx?) */ if(!sysctlbyname("hw.activecpu", &ncpus, &len , NULL, 0)) return (unsigned)ncpu; if(!sysctlbyname("hw.ncpu", &ncpus, &len , NULL, 0)) return (unsigned)ncpu; # else int mib[2] = {CTL_HW, HW_NCPU}; if(!sysctl(mib, 2, &ncpus, &len, NULL, 0)) return (unsigned)ncpus # endif } #endif /* * if old solaris does not provide NPROCESSORS, * one maybe bang on processor_info() and friends * from <sys/processor.h>, see above */ // TODO: warn user? /* we have at least one cpu, we are running on it... */ return 1; }
MVMuint32 MVM_platform_cpu_count(void) { cpu_set_t set; if (pthread_getaffinity_np(pthread_self(), sizeof set, &set) != 0) return 0; return CPU_COUNT(&set); }
TEST(pty, bug_28979140) { // This test is to test a kernel bug, which uses a lock free ring-buffer to // pass data through a raw pty, but missing necessary memory barriers. cpu_set_t cpus; ASSERT_EQ(0, sched_getaffinity(0, sizeof(cpu_set_t), &cpus)); if (CPU_COUNT(&cpus) < 2) { GTEST_LOG_(INFO) << "This test tests bug happens only on multiprocessors."; return; } constexpr uint32_t TEST_DATA_COUNT = 200000; // 1. Open raw pty. int master; int slave; ASSERT_EQ(0, openpty(&master, &slave, nullptr, nullptr, nullptr)); termios tattr; ASSERT_EQ(0, tcgetattr(slave, &tattr)); cfmakeraw(&tattr); ASSERT_EQ(0, tcsetattr(slave, TCSADRAIN, &tattr)); // 2. Make master thread and slave thread running on different cpus: // master thread uses first available cpu, and slave thread uses other cpus. PtyReader_28979140_Arg arg; arg.main_cpu_id = -1; for (int i = 0; i < CPU_SETSIZE; i++) { if (CPU_ISSET(i, &cpus)) { arg.main_cpu_id = i; break; } } ASSERT_GE(arg.main_cpu_id, 0); // 3. Create thread for slave reader. pthread_t thread; arg.slave_fd = slave; arg.data_count = TEST_DATA_COUNT; arg.matched = true; ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void*(*)(void*)>(PtyReader_28979140), &arg)); CPU_ZERO(&cpus); CPU_SET(arg.main_cpu_id, &cpus); ASSERT_EQ(0, sched_setaffinity(0, sizeof(cpu_set_t), &cpus)); // 4. Send data to slave. uint32_t counter = 0; while (counter <= TEST_DATA_COUNT) { ASSERT_TRUE(android::base::WriteFully(master, &counter, sizeof(counter))); ASSERT_TRUE(arg.matched) << "failed at count = " << counter; counter++; } ASSERT_EQ(0, pthread_join(thread, nullptr)); ASSERT_TRUE(arg.finished); ASSERT_TRUE(arg.matched); close(master); }
int get_cpu_count(void) { cpu_set_t cpu_set; CPU_ZERO(&cpu_set); sched_getaffinity(0, sizeof(cpu_set), &cpu_set); return CPU_COUNT(&cpu_set); }
/* req is one-based, cpu_set is zero-based */ SEXP mc_affinity(SEXP req) { if (req != R_NilValue && TYPEOF(req) != INTSXP && TYPEOF(req) != REALSXP) error(_("invalid CPU affinity specification")); if (TYPEOF(req) == REALSXP) req = coerceVector(req, INTSXP); if (TYPEOF(req) == INTSXP) { int max_cpu = 0, i, n = LENGTH(req), *v = INTEGER(req); for (i = 0; i < n; i++) { if (v[i] > max_cpu) max_cpu = v[i]; if (v[i] < 1) error(_("invalid CPU affinity specification")); } /* These are both one-based */ if (max_cpu <= CPU_SETSIZE) { /* can use static set */ cpu_set_t cs; CPU_ZERO(&cs); for (i = 0; i < n; i++) CPU_SET(v[i] - 1, &cs); sched_setaffinity(0, sizeof(cpu_set_t), &cs); } else { #ifndef CPU_ALLOC error(_("requested CPU set is too large for this system")); #else size_t css = CPU_ALLOC_SIZE(max_cpu); cpu_set_t *cs = CPU_ALLOC(max_cpu); CPU_ZERO_S(css, cs); for (i = 0; i < n; i++) CPU_SET_S(v[i] - 1, css, cs); sched_setaffinity(0, css, cs); #endif } } { /* FIXME: in theory we may want to use *_S versions as well, but that would require some knowledge about the number of available CPUs and comparing that to CPU_SETSIZE, so for now we just use static cpu_set -- the mask will be still set correctly, just the returned set will be truncated at CPU_SETSIZE */ cpu_set_t cs; CPU_ZERO(&cs); if (sched_getaffinity(0, sizeof(cs), &cs)) { if (req == R_NilValue) error(_("retrieving CPU affinity set failed")); return R_NilValue; } else { SEXP res = allocVector(INTSXP, CPU_COUNT(&cs)); int i, *v = INTEGER(res); for (i = 0; i < CPU_SETSIZE; i++) if (CPU_ISSET(i, &cs)) *(v++) = i + 1; return res; } } }
void* server_worker_t::main(void * arg) { conet::init_conet_env(); server_worker_t *self = (server_worker_t *)(arg); if (self->cpu_affinity && CPU_COUNT(self->cpu_affinity) > 0) { int ret = 0; pthread_t tid = pthread_self(); ret = pthread_setaffinity_np(tid, sizeof(cpu_set_t), self->cpu_affinity); if (ret) { PLOG_ERROR("set affinity failed , ", (tid, ret)); } else { PLOG_INFO("set affinity success, ", (tid)); } } int size = self->conf.servers_size(); for (int i=0; i<size; ++i) { RpcServer const & server_conf = self->conf.servers(i); rpc_pb_server_t *rpc_server = self->build_rpc_server(server_conf); if (rpc_server) { self->rpc_servers.push_back(rpc_server); } } int ret = 0; size = self->rpc_servers.size(); for (int i=0; i<size; ++i) { // 启动server ret = self->rpc_servers[i]->start(); if (ret) { PLOG_ERROR("error start"); } } coroutine_t *exit_co = NULL; while (likely(!self->exit_finsished)) { if (unlikely(self->stop_flag && exit_co == NULL)) { exit_co = conet::alloc_coroutine( conet::ptr_cast<conet::co_main_func_t>( &server_worker_t::proc_server_exit), self); conet::resume(exit_co); } conet::dispatch(); } conet::free_coroutine(exit_co); conet::free_conet_env(); return NULL; }
/* TODO: OS specific? Might be better to use different method */ int get_cpu_count(void) { cpu_set_t cs; CPU_ZERO(&cs); if (sched_getaffinity(0, sizeof(cs), &cs)) return 1; return CPU_COUNT(&cs); }
static int nth_set_cpu(unsigned int n, cpu_set_t* cpuSet) { n %= CPU_COUNT(cpuSet); for (unsigned int setCpusSeen = 0, currentCpu = 0; true; ++currentCpu) { if (CPU_ISSET(currentCpu, cpuSet)) { ++setCpusSeen; if (setCpusSeen > n) { return currentCpu; } } } }
void init_generator_test() { cpu_set_t bitmap; init_generator(); // setting current cpu to 0 sched_getaffinity(0, sizeof(bitmap), &bitmap); CU_ASSERT_EQUAL(CPU_COUNT(&bitmap), 1); // checking if number of cpu > 0 CU_ASSERT_NOT_EQUAL(CPU_ISSET(0, &bitmap),0); // checking if it is set cpu number 0 CPU_ZERO(&bitmap); CPU_SET(1,&bitmap); sched_setaffinity(0, sizeof(bitmap), &bitmap); init_generator(); sched_getaffinity(0, sizeof(bitmap), &bitmap); CU_ASSERT_EQUAL(CPU_COUNT(&bitmap),1); CU_ASSERT_NOT_EQUAL(CPU_ISSET(0, &bitmap), 0); }
u32 getThreadAffinityMask() { cpu_set_t affinity; int r = pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity); ASSERT(r == 0); if(CPU_COUNT(&affinity) == 0) return 0; for(int i = 0; i < 1024; ++i) { if (CPU_ISSET(i, &affinity)) return i; } return 0; }
/* * Report the number of CPUs in the affinity mask of the main thread */ static int sysconf_cpu_count(void) { cpu_set_t cpuset; int ret; ret = pthread_getaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); if (ret != 0) return 0; return CPU_COUNT(&cpuset); }
static int SetThreadAffinity( pthread_t *pt, affinity_type_t at, ...) { int i, res, numcpus; cpu_set_t cpuset; res = pthread_getaffinity_np( *pt, sizeof(cpu_set_t), &cpuset); if (res != 0) { return 1; } numcpus = CPU_COUNT(&cpuset); switch(at) { case MASKMOD2: CPU_ZERO(&cpuset); for( i=0; i<numcpus; i+=2) { CPU_SET( i, &cpuset); } break; case STRICTLYFIRST: if( numcpus > 1) { CPU_ZERO(&cpuset); CPU_SET(0, &cpuset); } break; case ALLBUTFIRST: if( numcpus > 1) { CPU_CLR(0, &cpuset); } break; case MAXCORES: { int j, num_cores; va_list args; va_start( args, at); num_cores = va_arg( args, int); va_end( args); for( j=num_cores; j<numcpus; j++) { CPU_CLR( j, &cpuset); } } break; default: break; } /* set the affinity mask */ res = pthread_setaffinity_np( *pt, sizeof(cpu_set_t), &cpuset); if( res != 0) { return 1; } return 0; }
int32 FLinuxMisc::NumberOfCoresIncludingHyperthreads() { cpu_set_t AvailableCpusMask; CPU_ZERO(&AvailableCpusMask); if (0 != sched_getaffinity(0, sizeof(AvailableCpusMask), &AvailableCpusMask)) { return 1; // we are running on something, right? } return CPU_COUNT(&AvailableCpusMask); }
ssize_t getThreadAffinity(pthread_t pth) { cpu_set_t cset; CPU_ZERO(&cset); int error = pthread_getaffinity_np(pth, sizeof(cset), &cset); if (error != 0) perror("pthread_getaffinity_np"); for (int j=0; j<CPU_COUNT(&cset); j++) if (CPU_ISSET(j, &cset)) return j; return -1; }
int get_cpu_count(void) { #ifdef __GLIBC__ cpu_set_t cpu_set; CPU_ZERO(&cpu_set); sched_getaffinity(0, sizeof(cpu_set), &cpu_set); return CPU_COUNT(&cpu_set); #else return 1; #endif }
int current_cpus(int pid) { cpu_set_t proc_cpus; size_t mask_size = sizeof proc_cpus; int ret = sched_getaffinity(pid, mask_size, &proc_cpus); if (ret == -1) return errno; int cpu_affinity = CPU_COUNT(&proc_cpus); return cpu_affinity; }
unsigned int thread_entitled_cpus() { cpu_set_t cpuset; #ifdef __USE_GNU #ifdef __ANDROID__ // pthread_getaffinity_np analog from here: https://www.spinics.net/lists/linux-rt-users/msg16928.html if (sched_getaffinity(0, sizeof(cpu_set_t), &cpuset)) #else if (pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset)) #endif #endif return (unsigned int) sysconf(_SC_NPROCESSORS_ONLN); return (unsigned int) CPU_COUNT(&cpuset); }
boost::uint32_t SetAffinity(boost::uint32_t cores_bitmask, bool hard) { if (cores_bitmask == 0) { return ~0; } #if defined(__APPLE__) || defined(__FreeBSD__) // no-op return 0; #elif defined(WIN32) // Create mask DWORD_PTR cpusWanted = (cores_bitmask & cpusSystem); // Set the affinity HANDLE thread = GetCurrentThread(); DWORD_PTR result = 0; if (hard) { result = SetThreadAffinityMask(thread, cpusWanted); } else { result = SetThreadIdealProcessor(thread, (DWORD)cpusWanted); } // Return final mask return (result > 0) ? (boost::uint32_t)cpusWanted : 0; #else // Create mask cpu_set_t cpusWanted; CPU_ZERO(&cpusWanted); int numCpus = std::min(CPU_COUNT(&cpusSystem), 32); // w/o the min(.., 32) `(1 << n)` could overflow! for (int n = numCpus - 1; n >= 0; --n) { if ((cores_bitmask & (1 << n)) != 0) { CPU_SET(n, &cpusWanted); } } CPU_AND(&cpusWanted, &cpusWanted, &cpusSystem); // Set the affinity int result = sched_setaffinity(0, sizeof(cpu_set_t), &cpusWanted); // Return final mask uint32_t finalMask = 0; for (int n = numCpus - 1; n >= 0; --n) { if (CPU_ISSET(n, &cpusWanted)) { finalMask |= (1 << n); } } return (result == 0) ? finalMask : 0; #endif }
int get_cpu_count() { cpu_set_t cpu_mask; CPU_ZERO(&cpu_mask); int err = sched_getaffinity(0, sizeof(cpu_set_t), &cpu_mask); if (err) { LOG_ERROR << "sched_getaffinity failed\n"; exit(1); } int count = CPU_COUNT(&cpu_mask); printf("%d\n", count); return count; }
static void test_cpu_fill_case_1(void) { size_t i; /* * Set to all zeros and verify */ puts( "Exercise CPU_FILL, CPU_ISSET, and CPU_COUNT" ); CPU_FILL(&set1); /* test if all bits clear */ for (i=0 ; i<CPU_SETSIZE ; i++) { rtems_test_assert( CPU_ISSET(i, &set1) == 1 ); } rtems_test_assert( CPU_COUNT(&set1) == _NCPUBITS ); }
static char * printCpuMask(pid_t pid) { cpu_set_t mask; PSCPU_set_t CPUset; int numcpus, i; static char ret[PSCPU_MAX/4+10]; char* lstr; int offset; if (sched_getaffinity(1, sizeof(cpu_set_t), &mask) == 0) { numcpus = CPU_COUNT(&mask); } else { numcpus = 128; } PSCPU_clrAll(CPUset); if (sched_getaffinity(pid, sizeof(cpu_set_t), &mask) == 0) { for (i = 0; i < numcpus; i++) { if(CPU_ISSET(i, &mask)) { PSCPU_setCPU(CPUset, i); } } } else { return "unknown"; } lstr = PSCPU_print(CPUset); strcpy(ret, "0x"); // cut leading zeros offset = 2; while (*(lstr + offset) == '0') { offset++; } if (*(lstr + offset) == '\0') { return "0x0"; } strcpy(ret + 2, lstr + offset); return ret; }