int pmc_cpu_is_primary(int cpu) { #ifdef SMP return (!CPU_ISSET(cpu, &logical_cpus_mask)); #else return (1); #endif }
/** * @brief Copy a Linux cpu_set_t. * * @param dst Will be the copy. * * @param src The source (will be copied). * * @return void */ static void cpu_set_copy( os_cpu_set_t* dst, os_cpu_set_t* src ) { int i; CPU_ZERO( dst ); for (i = 0; i < CPU_SETSIZE; i++) if ( CPU_ISSET(i, src) ) CPU_SET(i, dst); }
static int _mask_to_int(cpu_set_t *mask) { int i, rc = 0; for (i=0; i<CPU_SETSIZE; i++) { if (CPU_ISSET(i, mask)) rc += (1 << i); } return rc; }
static inline unsigned int __cpu_count(const rte_cpuset_t *cpuset) { unsigned int i, count = 0; for (i = 0; i < RTE_MAX_LCORE; i++) if (CPU_ISSET(i, cpuset)) count++; return count; }
// Get affinity mask of the current process // Parameters: // processMask - affinity mask for the specified process // systemMask - affinity mask for the system // Return: // true if it has succeeded, false if it has failed // Remarks: // A process affinity mask is a bit vector in which each bit represents the processors that // a process is allowed to run on. A system affinity mask is a bit vector in which each bit // represents the processors that are configured into a system. // A process affinity mask is a subset of the system affinity mask. A process is only allowed // to run on the processors configured into a system. Therefore, the process affinity mask cannot // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor. bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processAffinityMask, uintptr_t* systemAffinityMask) { if (g_logicalCpuCount > 64) { *processAffinityMask = 0; *systemAffinityMask = 0; return true; } uintptr_t systemMask = GetFullAffinityMask(g_logicalCpuCount); #if HAVE_SCHED_GETAFFINITY int pid = getpid(); cpu_set_t cpuSet; int st = sched_getaffinity(pid, sizeof(cpu_set_t), &cpuSet); if (st == 0) { uintptr_t processMask = 0; for (int i = 0; i < g_logicalCpuCount; i++) { if (CPU_ISSET(i, &cpuSet)) { processMask |= ((uintptr_t)1) << i; } } *processAffinityMask = processMask; *systemAffinityMask = systemMask; return true; } else if (errno == EINVAL) { // There are more processors than can fit in a cpu_set_t // return zero in both masks. *processAffinityMask = 0; *systemAffinityMask = 0; return true; } else { // We should not get any of the errors that the sched_getaffinity can return since none // of them applies for the current thread, so this is an unexpected kind of failure. return false; } #else // HAVE_SCHED_GETAFFINITY // There is no API to manage thread affinity, so let's return both affinity masks // with all the CPUs on the system set. *systemAffinityMask = systemMask; *processAffinityMask = systemMask; return true; #endif // HAVE_SCHED_GETAFFINITY }
cpus_t *read_affinity(void) { cpu_set_t mask; int sz = 0 ; int res = pthread_getaffinity_np(pthread_self(), sizeof(mask), &mask) ; if (res != 0) { errexit("pthread_getaffinity_np",res); } for (int p=0 ; p < CPU_SETSIZE ; p++) { if (CPU_ISSET(p,&mask)) sz++ ; } cpus_t *r = cpus_create(sz) ; for (int p=0, *q=r->cpu ; p < CPU_SETSIZE ; p++) { if (CPU_ISSET(p,&mask)) *q++ = p ; } return r ; }
int main(int argc, char *argv[]) { int s, j, nprocs; cpu_set_t cpuset; pthread_t thread; thread = pthread_self(); nprocs = sysconf(_SC_NPROCESSORS_ONLN); /* Set affinity mask to include CPUs 0 to 7 */ CPU_ZERO(&cpuset); for (j = 0; j < nprocs; j++) CPU_SET(j, &cpuset); CPU_CLR(1, &cpuset); CPU_CLR(2, &cpuset); CPU_CLR(3, &cpuset); CPU_CLR(4, &cpuset); CPU_CLR(5, &cpuset); /* check if the cpu's have actually been set */ for (j = 0; j < nprocs; j++) fprintf(stdout, "CPU: %d, status: %d\n", j, CPU_ISSET(j, &cpuset)); s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset); if (s != 0) handle_error_en(s, "pthread_setaffinity_np"); /* Check the actual affinity mask assigned to the thread */ s = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); if (s != 0) handle_error_en(s, "pthread_getaffinity_np"); printf("Set returned by pthread_getaffinity_np() contained:\n"); for (j = 0; j < CPU_SETSIZE; j++) if (CPU_ISSET(j, &cpuset)) printf(" CPU %d\n", j); exit(EXIT_SUCCESS); }
void Component_exec_i::ccm_activate (void) { #if defined (ACE_HAS_SCHED_GETAFFINITY) if (ACE_OS::num_processors () < 2) { ACE_DEBUG ((LM_DEBUG, "This machine only has a single processor, aborting\n")); return; } cpu_set_t mask; CPU_ZERO (&mask); int retval = sched_getaffinity (0, sizeof (cpu_set_t), &mask); if (retval != 0) { ACE_ERROR ((LM_ERROR, "Error: Non-zero return value from sched_getaffinity %p\n")); return; } int z_set = CPU_ISSET (0, &mask); int o_set = CPU_ISSET (1, &mask); if (cpu_affinity_ == 0 && (!z_set || o_set)) { ACE_ERROR ((LM_ERROR, "Error: Expected to only be on processor zero.\n")); return; } if (cpu_affinity_ == 1 && (z_set || !o_set)) { ACE_ERROR ((LM_ERROR, "Error: Expected to only be on processor one.\n")); } if (cpu_affinity_ > 1) { ACE_ERROR ((LM_ERROR, "Error: Trying to test an affinity I don't support\n")); } #endif }
static void test_cpu_clr_case_1(size_t cpu) { size_t i; /* * Set to all zeros and verify */ printf( "Exercise CPU_FILL, CPU_CLR(%u), and CPU_ISET\n", cpu ); CPU_FILL(&set1); CPU_CLR(cpu, &set1); /* test if all bits except 5 are set */ for (i=0 ; i<CPU_SETSIZE ; i++) { if (i==cpu) rtems_test_assert( CPU_ISSET(i, &set1) == 0 ); else rtems_test_assert( CPU_ISSET(i, &set1) == 1 ); } }
int getcpu_fromset(cpu_set_t set, int max_cpus) { int j; for(j=0; j<max_cpus; j++) { if(CPU_ISSET(j,&set)) return j; } return -1; }
int _cpu_count(cpu_set_t *set) { int i, n = 0; for (i = 0; i < sizeof(*set) / sizeof(__cpu_mask); i++) if (CPU_ISSET(i, set)) n++; return (n); }
/* Old pthread implementations do not have the CPU_COUNT macro. */ static inline int _my_cpu_count(cpu_set_t *set) { int count = 0; for (int i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, set)) count++; } return count; }
static gboolean ufo_cpu_node_equal_real (UfoNode *n1, UfoNode *n2) { UfoCpuNodePrivate *priv1; UfoCpuNodePrivate *priv2; const gsize MAX_CPUS = MIN (16, CPU_SETSIZE); g_return_val_if_fail (UFO_IS_CPU_NODE (n1) && UFO_IS_CPU_NODE (n2), FALSE); priv1 = UFO_CPU_NODE_GET_PRIVATE (n1); priv2 = UFO_CPU_NODE_GET_PRIVATE (n2); for (gsize i = 0; i < MAX_CPUS; i++) { if (CPU_ISSET (i, priv1->mask) != CPU_ISSET (i, priv2->mask)) return FALSE; } return TRUE; }
static int CPU_COUNT(cpu_set_t *set) { size_t i, count = 0; for (i = 0; i < CPU_SETSIZE; i++) if (CPU_ISSET(i, set)) count++; return count; }
int pmc_cpu_is_active(int cpu) { #ifdef SMP return (pmc_cpu_is_present(cpu) && !CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (1); #endif }
static int cpu_enable(cpu_set_t *cpu_set, size_t setsize, int enable) { unsigned int cpu; int online, rc; int configured = -1; for (cpu = 0; cpu < setsize; cpu++) { if (!CPU_ISSET(cpu, cpu_set)) continue; if (!path_exist(_PATH_SYS_CPU "/cpu%d", cpu)) { printf(_("CPU %d does not exist\n"), cpu); continue; } if (!path_exist(_PATH_SYS_CPU "/cpu%d/online", cpu)) { printf(_("CPU %d is not hot pluggable\n"), cpu); continue; } online = path_getnum(_PATH_SYS_CPU "/cpu%d/online", cpu); if ((online == 1) && (enable == 1)) { printf(_("CPU %d is already enabled\n"), cpu); continue; } if ((online == 0) && (enable == 0)) { printf(_("CPU %d is already disabled\n"), cpu); continue; } if (path_exist(_PATH_SYS_CPU "/cpu%d/configure", cpu)) configured = path_getnum(_PATH_SYS_CPU "/cpu%d/configure", cpu); if (enable) { rc = path_writestr("1", _PATH_SYS_CPU "/cpu%d/online", cpu); if ((rc == -1) && (configured == 0)) printf(_("CPU %d enable failed " "(CPU is deconfigured)\n"), cpu); else if (rc == -1) printf(_("CPU %d enable failed (%m)\n"), cpu); else printf(_("CPU %d enabled\n"), cpu); } else { if (onlinecpus && num_online_cpus() == 1) { printf(_("CPU %d disable failed " "(last enabled CPU)\n"), cpu); continue; } rc = path_writestr("0", _PATH_SYS_CPU "/cpu%d/online", cpu); if (rc == -1) printf(_("CPU %d disable failed (%m)\n"), cpu); else { printf(_("CPU %d disabled\n"), cpu); if (onlinecpus) CPU_CLR(cpu, onlinecpus); } } } return EXIT_SUCCESS; }
ProcessorMap::ProcessorMap() { m_nProcs = 0; m_p_nProcessor_Ids = NULL; m_nProcs = DetermineNumberOfProcessors(); if( m_nProcs <= 0 ) { #ifdef OS_SOLARIS fatal("sysconf() reports %i processors online.\n", m_nProcs ); #endif #ifdef OS_LINUX fatal("sched_getaffinity() reports empty processor mask.\n"); #endif } m_p_nProcessor_Ids = new int[m_nProcs]; if(m_p_nProcessor_Ids == NULL ) { fatal("new int[%i] returned NULL -- out of memory?\n", m_nProcs ); } int i; int n = 0; #ifdef OS_SOLARIS int status; for(i=0;n<m_nProcs && i<4096 ;i++) { status = p_online(i,P_STATUS); if(status==-1 && errno==EINVAL) continue; m_p_nProcessor_Ids[n] = i; n++; } #endif #ifdef OS_LINUX cpu_set_t cpus; // Returns number of processors available to process (based on affinity mask) if( sched_getaffinity(0, sizeof(cpus), (cpu_set_t*) &cpus) < 0) { fatal("sched_getaffinity() reports empty processor mask.\n" ); } for (i = 0; n<m_nProcs && i < sizeof(cpus)*8; i++) { if( CPU_ISSET( i, &cpus ) ) { m_p_nProcessor_Ids[n] = i; n++; } } #endif if( n != m_nProcs ) { fatal("Unable to find all processor numbers.\n" ); } }
// 프로세스의 affinity를 get/set함. void affinity_control() { // CPU affinity 설정 구조체 cpu_set_t set; // 구조체의 각 CPU값을 모두 reset하는 매크 CPU_ZERO(&set); // 현재 프로세스에 대해 조회함. if(sched_getaffinity(0, sizeof(cpu_set_t), &set) == -1) errexit("sched_getaffinity"); // affinity 설정값 확인 int i; // CPU_SETSIZE = 1024 for (i = 0; i < CPU_SETSIZE; ++i) { int cpu; // i번 CPU가 set상태인지 확인하는 매크로 cpu = CPU_ISSET(i, &set); printf("CPU #%d = %d\n", i, cpu); } // affinity 설정 CPU_ZERO(&set); // CPU 0번, 1번을 set한다. CPU_SET(0, &set); CPU_SET(1, &set); // CPU 2번, 3번을 clear한다. CPU_CLR(2, &set); CPU_CLR(3, &set); // 현재 프로세스에 대해 affinity를 set에 설정한 값대로 설정한다. if(sched_setaffinity(0, sizeof(cpu_set_t), &set) == -1) errexit("sched_getaffinity"); for (i = 0; i < CPU_SETSIZE; ++i) { int cpu; // i번 CPU가 set상태인지 확인하는 매크로 cpu = CPU_ISSET(i, &set); printf("CPU #%d = %d\n", i, cpu); } }
int set_cpu_affinity(cpu_set_t * test_mask, cpu_set_t * admin_mask) { int status, i, admin_proc; cpu_set_t current_mask; /* handle uniprocessor case */ if (num_processors == 1 || uniprocessor) { CPU_ZERO(admin_mask); CPU_ZERO(test_mask); CPU_SET(0, admin_mask); CPU_SET(0, test_mask); info("admin and test threads running on one processor\n"); return SUCCESS; } /* first set our main thread to run on the first scheduleable processor we can find */ status = sched_getaffinity(0, sizeof(cpu_set_t), ¤t_mask); if (status) { error("failed getting CPU affinity mask: 0x%x\n", status); return FAILURE; } for (i = 0; i < num_processors; i++) { if (CPU_ISSET(i, ¤t_mask)) break; } if (i >= num_processors) { error("No schedulable CPU found for main!\n"); return FAILURE; } admin_proc = i; CPU_ZERO(admin_mask); CPU_SET(admin_proc, admin_mask); status = sched_setaffinity(0, sizeof(cpu_set_t), admin_mask); if (status) { error("set_cpu_affinity: setting CPU affinity mask: 0x%x\n", status); return FAILURE; } info("Admin thread running on processor: %d\n", i); /* Set test affinity so that tests run on the non-admin processors */ CPU_ZERO(test_mask); for (i = admin_proc + 1; i < num_processors; i++) CPU_SET(i, test_mask); if (admin_proc + 1 == num_processors - 1) info("Test threads running on processor: %ld\n", num_processors - 1); else info("Test threads running on processors: %d-%d\n", admin_proc + 1, (int)num_processors - 1); return SUCCESS; }
int32_t odp_cpumask_to_str(const odp_cpumask_t *mask, char *str, int32_t len) { char *p = str; int cpu = odp_cpumask_last(mask); int nibbles; int value; /* Handle bad string length, need at least 4 chars for "0x0" and * terminating null char */ if (len < 4) return -1; /* Failure */ /* Handle no CPU found */ if (cpu < 0) { strcpy(str, "0x0"); return strlen(str) + 1; /* Success */ } /* CPU was found and cpu >= 0 */ /* Compute number of nibbles in cpumask that have bits set */ nibbles = (cpu / 4) + 1; /* Verify minimum space (account for "0x" and termination) */ if (len < (3 + nibbles)) return -1; /* Failure */ /* Prefix */ *p++ = '0'; *p++ = 'x'; /* * Now we can scan the cpus down to zero and * build the string one nibble at a time */ value = 0; do { /* Set bit to go into the current nibble */ if (CPU_ISSET(cpu, &mask->set)) value |= 1 << (cpu % 4); /* If we are on a nibble boundary flush value to string */ if (0 == (cpu % 4)) { if (value < 0xA) *p++ = '0' + value; else *p++ = 'A' + value - 0xA; value = 0; } } while (cpu--); /* Terminate the string */ *p++ = 0; return p - str; /* Success */ }
static inline int next_cpu_in_set(cpu_set_t *cpus, int last) { int n; for (n = last+1; n < CPU_SETSIZE; n++) { if (CPU_ISSET(n, cpus)) { return n; } } return -1; }
/* * Get CPU number of n'th CPU in cpu_set. N as values 1 ... CPU_SETSIZE. */ int armas_nth_cpu(cpu_set_t *cpus, int n) { int k; for (k = 0; k < CPU_SETSIZE; k++) { if (CPU_ISSET(k, cpus)) n--; if (n == 0) break; } return k == CPU_SETSIZE ? -1 : k; }
mt::LinuxCPUAffinityThreadInitializer:: LinuxCPUAffinityThreadInitializer(const cpu_set_t& cpu) { for (int i = 0; i < CPU_SETSIZE; ++i) { CPU_CLR(i, &mCPU); if (CPU_ISSET(i, &cpu)) CPU_SET(i, &mCPU); } }
/** * check if a cpu is allowed to be used */ int cpu_allowed(int id) { cpu_set_t mask; CPU_ZERO( &mask ); if (!sched_getaffinity(0, sizeof(cpu_set_t), &mask)) { return CPU_ISSET( id, &mask ); } return 0; }
void init_generator_test() { cpu_set_t bitmap; init_generator(); // setting current cpu to 0 sched_getaffinity(0, sizeof(bitmap), &bitmap); CU_ASSERT_EQUAL(CPU_COUNT(&bitmap), 1); // checking if number of cpu > 0 CU_ASSERT_NOT_EQUAL(CPU_ISSET(0, &bitmap),0); // checking if it is set cpu number 0 CPU_ZERO(&bitmap); CPU_SET(1,&bitmap); sched_setaffinity(0, sizeof(bitmap), &bitmap); init_generator(); sched_getaffinity(0, sizeof(bitmap), &bitmap); CU_ASSERT_EQUAL(CPU_COUNT(&bitmap),1); CU_ASSERT_NOT_EQUAL(CPU_ISSET(0, &bitmap), 0); }
static int nth_set_cpu(unsigned int n, cpu_set_t* cpuSet) { n %= CPU_COUNT(cpuSet); for (unsigned int setCpusSeen = 0, currentCpu = 0; true; ++currentCpu) { if (CPU_ISSET(currentCpu, cpuSet)) { ++setCpusSeen; if (setCpusSeen > n) { return currentCpu; } } } }
int GetCPUCount() { cpu_set_t cs; int i,count = 0; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < 8; i++) { if (CPU_ISSET(i, &cs)) count++; } return count; }
u32 getThreadAffinityMask() { cpu_set_t affinity; int r = pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity); ASSERT(r == 0); if(CPU_COUNT(&affinity) == 0) return 0; for(int i = 0; i < 1024; ++i) { if (CPU_ISSET(i, &affinity)) return i; } return 0; }
void vm_activate_cpu(struct vm *vm, int vcpuid) { KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("vm_activate_cpu: invalid vcpuid %d", vcpuid)); KASSERT(!CPU_ISSET(vcpuid, &vm->active_cpus), ("vm_activate_cpu: vcpuid %d is already active", vcpuid)); VCPU_CTR0(vm, vcpuid, "activated"); CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); }
void get_proc_info (int* cpu_count, int* numa_nodes, cpu_set_t* affinity) { int iter; char buff[2048]; cpu_set_t active; FILE* info_input; char const* pos; char const* str_proc = "processor"; char const* str_node = "physical id"; int cpu_id, node_id; CPU_ZERO (&active); sched_getaffinity(0, sizeof(active), &active); *cpu_count = 0; for (iter = 0; iter < CPU_SETSIZE; ++iter) { if (CPU_ISSET (iter, &active)) { ++(*cpu_count); } } return; info_input = fopen(CPU_INFO, "r"); if (info_input != 0 && affinity != 0) { *numa_nodes = 1; node_id = -1; cpu_id = -1; while (fgets(buff, 2048, info_input)) { if (strncmp(buff, str_proc, sizeof(str_proc)) == 0) { pos = strchr(buff + sizeof(str_proc), ':'); if (pos) cpu_id = atoi(pos + 1); } if (strncmp(buff, str_node, sizeof(str_node)) == 0) { pos = strchr(buff + sizeof(str_node), ':'); if (pos) node_id = atoi(pos + 1); } if (node_id + 1 > *numa_nodes) *numa_nodes = node_id + 1; if (node_id >= 0 && cpu_id >= 0) { CPU_SET(cpu_id, affinity + node_id); cpu_id = -1; node_id = -1; } } } fclose(info_input); }