int main() { unsigned int cpu; int result; cpu_set_t newmask; cpu_set_t mask; cpu_set_t switchmask; cpu_set_t flipmask; CPU_ZERO(&mask); CPU_ZERO(&switchmask); CPU_ZERO(&flipmask); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &switchmask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++) { CPU_SET(cpu, &flipmask); /* 0b11111111111111111111111111111111 */ } assert(sched_getaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); result = sched_setaffinity(0, sizeof(cpu_set_t), &newmask); if (result != 0) { int err = #if defined (__PTW32_USES_SEPARATE_CRT) GetLastError(); #else errno; #endif assert(err != ESRCH); assert(err != EFAULT); assert(err != EPERM); assert(err != EINVAL); assert(err != EAGAIN); assert(err == ENOSYS); assert(CPU_COUNT(&mask) == 1); } else { if (CPU_COUNT(&mask) > 1) { CPU_AND(&newmask, &mask, &switchmask); /* Remove every other CPU */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); CPU_XOR(&newmask, &mask, &flipmask); /* Switch to all alternative CPUs */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); } } return 0; }
cpu_set_t seissol::parallel::getFreeCPUsMask() { cpu_set_t workerUnion = getWorkerUnionMask(); cpu_set_t set; CPU_ZERO(&set); for (int i = 0; i < get_nprocs() ; ++i) { CPU_SET(i, &set); } CPU_XOR(&set, &set, &workerUnion); return set; }
int main() { int result; unsigned int cpu; cpu_set_t newmask; cpu_set_t processCpus; cpu_set_t mask; cpu_set_t switchmask; cpu_set_t flipmask; pthread_t self = pthread_self(); CPU_ZERO(&mask); CPU_ZERO(&switchmask); CPU_ZERO(&flipmask); if (pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == ENOSYS) { printf("pthread_get/set_affinity_np API not supported for this platform: skipping test."); return 0; } assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == 0); printf("This thread has a starting affinity with %d CPUs\n", CPU_COUNT(&processCpus)); assert(!CPU_EQUAL(&mask, &processCpus)); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &switchmask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++) { CPU_SET(cpu, &flipmask); /* 0b11111111111111111111111111111111 */ } result = pthread_setaffinity_np(self, sizeof(cpu_set_t), &processCpus); if (result != 0) { assert(result != ESRCH); assert(result != EFAULT); assert(result != EPERM); assert(result != EINVAL); assert(result != EAGAIN); assert(result == ENOSYS); assert(CPU_COUNT(&mask) == 1); } else { if (CPU_COUNT(&mask) > 1) { CPU_AND(&newmask, &processCpus, &switchmask); /* Remove every other CPU */ assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0); assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0); assert(CPU_EQUAL(&mask, &newmask)); CPU_XOR(&newmask, &mask, &flipmask); /* Switch to all alternative CPUs */ assert(!CPU_EQUAL(&mask, &newmask)); assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0); assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0); assert(CPU_EQUAL(&mask, &newmask)); } } return 0; }
int test_affinity1(void) #endif { unsigned int cpu; cpu_set_t newmask; cpu_set_t src1mask; cpu_set_t src2mask; cpu_set_t src3mask; CPU_ZERO(&newmask); CPU_ZERO(&src1mask); memset(&src2mask, 0, sizeof(cpu_set_t)); assert(memcmp(&src1mask, &src2mask, sizeof(cpu_set_t)) == 0); assert(CPU_EQUAL(&src1mask, &src2mask)); assert(CPU_COUNT(&src1mask) == 0); CPU_ZERO(&src1mask); CPU_ZERO(&src2mask); CPU_ZERO(&src3mask); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src1mask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*4; cpu++) { CPU_SET(cpu, &src2mask); /* 0b00000000000000001111111111111111 */ } for (cpu = sizeof(cpu_set_t)*4; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src2mask); /* 0b01010101010101011111111111111111 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src3mask); /* 0b01010101010101010101010101010101 */ } assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4)); assert(CPU_COUNT(&src2mask) == ((sizeof(cpu_set_t)*4 + (sizeof(cpu_set_t)*2)))); assert(CPU_COUNT(&src3mask) == (sizeof(cpu_set_t)*4)); CPU_SET(0, &newmask); CPU_SET(1, &newmask); CPU_SET(3, &newmask); assert(CPU_ISSET(1, &newmask)); CPU_CLR(1, &newmask); assert(!CPU_ISSET(1, &newmask)); CPU_OR(&newmask, &src1mask, &src2mask); assert(CPU_EQUAL(&newmask, &src2mask)); CPU_AND(&newmask, &src1mask, &src2mask); assert(CPU_EQUAL(&newmask, &src1mask)); CPU_XOR(&newmask, &src1mask, &src3mask); memset(&src2mask, 0, sizeof(cpu_set_t)); assert(memcmp(&newmask, &src2mask, sizeof(cpu_set_t)) == 0); /* * Need to confirm the bitwise logical right-shift in CpuCount(). * i.e. zeros inserted into MSB on shift because cpu_set_t is * unsigned. */ CPU_ZERO(&src1mask); for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src1mask); /* 0b10101010101010101010101010101010 */ } assert(CPU_ISSET(sizeof(cpu_set_t)*8-1, &src1mask)); assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4)); return 0; }
void odp_cpumask_xor(odp_cpumask_t *dest, const odp_cpumask_t *src1, const odp_cpumask_t *src2) { CPU_XOR(&dest->set, &src1->set, &src2->set); }