bool gomp_affinity_same_place (void *p, void *q) { #ifdef CPU_EQUAL_S return CPU_EQUAL_S (gomp_cpuset_size, (cpu_set_t *) p, (cpu_set_t *) q); #else return memcmp (p, q, gomp_cpuset_size) == 0; #endif }
static void test_scheduler_get_processors(void) { #if defined(__RTEMS_HAVE_SYS_CPUSET_H__) rtems_status_code sc; rtems_name name = BLUE; rtems_id scheduler_id; cpu_set_t cpusetone; cpu_set_t cpuset; size_t big = 2 * CHAR_BIT * sizeof(cpu_set_t); size_t cpusetbigsize = CPU_ALLOC_SIZE(big); cpu_set_t *cpusetbigone; cpu_set_t *cpusetbig; CPU_ZERO(&cpusetone); CPU_SET(0, &cpusetone); sc = rtems_scheduler_ident(name, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_scheduler_get_processor_set(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_scheduler_get_processor_set(scheduler_id, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); cpusetbigone = CPU_ALLOC(big); rtems_test_assert(cpusetbigone != NULL); cpusetbig = CPU_ALLOC(big); rtems_test_assert(cpusetbig != NULL); CPU_ZERO_S(cpusetbigsize, cpusetbigone); CPU_SET_S(0, cpusetbigsize, cpusetbigone); sc = rtems_scheduler_get_processor_set(scheduler_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone)); CPU_FREE(cpusetbig); CPU_FREE(cpusetbigone); #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */ }
bool _Scheduler_priority_affinity_SMP_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, size_t cpusetsize, const cpu_set_t *cpuset ) { Scheduler_priority_affinity_SMP_Node *node; States_Control current_state; /* * Validate that the cpset meets basic requirements. */ if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) { return false; } node = _Scheduler_priority_affinity_SMP_Thread_get_node( thread ); /* * The old and new set are the same, there is no point in * doing anything. */ if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) ) return true; current_state = thread->current_state; if ( _States_Is_ready( current_state ) ) { _Scheduler_priority_affinity_SMP_Block( scheduler, thread ); } CPU_COPY( node->Affinity.set, cpuset ); if ( _States_Is_ready( current_state ) ) { /* * FIXME: Do not ignore threads in need for help. */ (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread ); } return true; }
/** * _CPU_set_Is_valid * * This routine validates a cpuset size corresponds to * the system correct size, that at least one * valid cpu is set and that no invalid cpus are set. */ bool _CPU_set_Is_valid( const cpu_set_t *cpuset, size_t setsize ) { cpu_set_t temp; if ( !cpuset ) return false; if ( setsize != cpuset_default.setsize ) return false; /* Validate at least 1 valid cpu is set in cpuset */ CPU_AND_S( cpuset_default.setsize, &temp, cpuset, cpuset_default.set ); if ( CPU_COUNT_S( setsize, &temp ) == 0 ) return false; /* Validate that no invalid cpu's are set in cpuset */ if ( !CPU_EQUAL_S( setsize, &temp, cpuset ) ) return false; return true; }
static void test_task_get_set_affinity(void) { #if defined(__RTEMS_HAVE_SYS_CPUSET_H__) rtems_id self_id = rtems_task_self(); rtems_id task_id; rtems_status_code sc; cpu_set_t cpusetone; cpu_set_t cpuset; size_t big = 2 * CHAR_BIT * sizeof(cpu_set_t); size_t cpusetbigsize = CPU_ALLOC_SIZE(big); cpu_set_t *cpusetbigone; cpu_set_t *cpusetbig; CPU_ZERO(&cpusetone); CPU_SET(0, &cpusetone); sc = rtems_task_create( rtems_build_name('T', 'A', 'S', 'K'), 2, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &task_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_task_get_affinity(RTEMS_SELF, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_set_affinity(RTEMS_SELF, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_get_affinity(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_set_affinity(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); cpusetbigone = CPU_ALLOC(big); rtems_test_assert(cpusetbigone != NULL); cpusetbig = CPU_ALLOC(big); rtems_test_assert(cpusetbig != NULL); CPU_ZERO_S(cpusetbigsize, cpusetbigone); CPU_SET_S(0, cpusetbigsize, cpusetbigone); sc = rtems_task_get_affinity(task_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone)); sc = rtems_task_set_affinity(task_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); CPU_FREE(cpusetbig); CPU_FREE(cpusetbigone); #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */ }
// Overrides osThread int CpuAffinityThread::entryPoint() { #if AMDT_BUILD_TARGET == AMDT_WINDOWS_OS //set the thread affinity to the 1 core specified #pragma message ("TODO: Handle more than 64 cores") GT_ASSERT(m_core < 64); DWORD_PTR affinityMask = (DWORD_PTR)1 << m_core; SetThreadAffinityMask(osGetCurrentThreadHandle(), affinityMask); //Let the thread affinity take affect while (GetCurrentProcessorNumber() != m_core) { osSleep(1); } #else int numCPUs; osGetAmountOfLocalMachineCPUs(numCPUs); if (0 >= numCPUs) { // at least 1, as CPU_ALLOC_SIZE(0) returns 0 numCPUs = m_core + 1; } size_t size = CPU_ALLOC_SIZE(numCPUs); cpu_set_t* mask = CPU_ALLOC(numCPUs); GT_ASSERT(nullptr != mask); // Step 1, bind thread to the logical processor CPU_ZERO_S(size, mask); CPU_SET(m_core, mask); if (-1 == sched_setaffinity((pid_t)syscall(__NR_gettid), size, mask)) { CPU_FREE(mask); return -1; } // Step 2, get the thread's current mask and make sure that it // is running on the target processor cpu_set_t* currentMask = CPU_ALLOC(numCPUs); GT_ASSERT(nullptr != currentMask); int retries = 8; // just don't loop forever! do { pthread_yield(); // trigger re-scheduling CPU_ZERO_S(size, currentMask); sched_getaffinity((pid_t)syscall(__NR_gettid), size, currentMask); } while (!CPU_EQUAL_S(size, mask, currentMask) && (0 != retries--)); // OK, cleanup CPU_FREE(currentMask); CPU_FREE(mask); if (0 == retries) // not a fatal error - an offline processor can cause this { return -1; } #endif osCpuid cpuInfo; m_pSessionTopology->processor = cpuInfo.getcore(); m_pSessionTopology->numaNode = cpuInfo.getNodeId(); return 0; }