int main() { unsigned int cpu; int result; cpu_set_t newmask; cpu_set_t mask; cpu_set_t switchmask; cpu_set_t flipmask; CPU_ZERO(&mask); CPU_ZERO(&switchmask); CPU_ZERO(&flipmask); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &switchmask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++) { CPU_SET(cpu, &flipmask); /* 0b11111111111111111111111111111111 */ } assert(sched_getaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); result = sched_setaffinity(0, sizeof(cpu_set_t), &newmask); if (result != 0) { int err = #if defined (__PTW32_USES_SEPARATE_CRT) GetLastError(); #else errno; #endif assert(err != ESRCH); assert(err != EFAULT); assert(err != EPERM); assert(err != EINVAL); assert(err != EAGAIN); assert(err == ENOSYS); assert(CPU_COUNT(&mask) == 1); } else { if (CPU_COUNT(&mask) > 1) { CPU_AND(&newmask, &mask, &switchmask); /* Remove every other CPU */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); CPU_XOR(&newmask, &mask, &flipmask); /* Switch to all alternative CPUs */ assert(sched_setaffinity(0, sizeof(cpu_set_t), &newmask) == 0); assert(sched_getaffinity(0, sizeof(cpu_set_t), &mask) == 0); assert(!CPU_EQUAL(&newmask, &mask)); } } return 0; }
void set_cpu_affinity(void) { // set cpu affinity cpu_set_t mask; CPU_ZERO(&mask); int i; uint32_t m = 1; for (i = 0; i < 32; i++, m <<= 1) { if (cfg.cpus & m) CPU_SET(i, &mask); } if (sched_setaffinity(0, sizeof(mask), &mask) == -1) { fprintf(stderr, "Warning: cannot set cpu affinity\n"); fprintf(stderr, " "); perror("sched_setaffinity"); } // verify cpu affinity cpu_set_t mask2; CPU_ZERO(&mask2); if (sched_getaffinity(0, sizeof(mask2), &mask2) == -1) { fprintf(stderr, "Warning: cannot verify cpu affinity\n"); fprintf(stderr, " "); perror("sched_getaffinity"); } else if (arg_debug) { if (CPU_EQUAL(&mask, &mask2)) printf("CPU affinity set\n"); else printf("CPU affinity not set\n"); } }
void * mythread(void * arg) { pthread_attr_t *attrPtr = (pthread_attr_t *) arg; cpu_set_t threadCpus, attrCpus; assert(pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &threadCpus) == 0); assert(pthread_attr_getaffinity_np(attrPtr, sizeof(cpu_set_t), &attrCpus) == 0); assert(CPU_EQUAL(&attrCpus, &threadCpus)); return (void*) 0; }
static void test_scheduler_get_processors(void) { #if defined(__RTEMS_HAVE_SYS_CPUSET_H__) rtems_status_code sc; rtems_name name = BLUE; rtems_id scheduler_id; cpu_set_t cpusetone; cpu_set_t cpuset; size_t big = 2 * CHAR_BIT * sizeof(cpu_set_t); size_t cpusetbigsize = CPU_ALLOC_SIZE(big); cpu_set_t *cpusetbigone; cpu_set_t *cpusetbig; CPU_ZERO(&cpusetone); CPU_SET(0, &cpusetone); sc = rtems_scheduler_ident(name, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_scheduler_get_processor_set(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_scheduler_get_processor_set(scheduler_id, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_scheduler_get_processor_set(scheduler_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); cpusetbigone = CPU_ALLOC(big); rtems_test_assert(cpusetbigone != NULL); cpusetbig = CPU_ALLOC(big); rtems_test_assert(cpusetbig != NULL); CPU_ZERO_S(cpusetbigsize, cpusetbigone); CPU_SET_S(0, cpusetbigsize, cpusetbigone); sc = rtems_scheduler_get_processor_set(scheduler_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone)); CPU_FREE(cpusetbig); CPU_FREE(cpusetbigone); #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */ }
static void test_cpu_equal_case_1(void) { /* * CPU_EQUAL */ puts( "Exercise CPU_ZERO, CPU_EQUAL, CPU_CMP, and CPU_EMPTY" ); CPU_ZERO(&set1); CPU_ZERO(&set2); /* test that all bits are equal */ rtems_test_assert( CPU_EQUAL(&set1, &set2) ); /* compare all bits */ rtems_test_assert( CPU_CMP(&set1, &set2) ); /* compare all bits */ rtems_test_assert( CPU_EMPTY(&set1) ); }
static int bind_cpu(thread_t *thread) { size_t setsize; cpu_set_t *cur_cpuset; cpu_set_t *new_cpuset; int ncpus = max_number_of_cpus(); if (thread == NULL) { // if thread is NULL it means the emulator is disabled, return without setting CPU affinity //printf("thread self is null"); return 0; } if (ncpus == 0) { return 1; } setsize = CPU_ALLOC_SIZE(ncpus); cur_cpuset = CPU_ALLOC(ncpus); new_cpuset = CPU_ALLOC(ncpus); CPU_ZERO_S(setsize, cur_cpuset); CPU_ZERO_S(setsize, new_cpuset); CPU_SET_S(thread->cpu_id, setsize, new_cpuset); if (pthread_getaffinity_np(thread->pthread, setsize, cur_cpuset) != 0) { DBG_LOG(ERROR, "Cannot get thread tid [%d] affinity, pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id); return 1; } if (CPU_EQUAL(cur_cpuset, new_cpuset)) { //printf("No need to bind CPU\n"); return 0; } DBG_LOG(INFO, "Binding thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id); if (pthread_setaffinity_np(thread->pthread, setsize, new_cpuset) != 0) { DBG_LOG(ERROR, "Cannot bind thread tid [%d] pthread: 0x%lx on processor %d\n", thread->tid, thread->pthread, thread->cpu_id); return 1; } return 0; }
/* We intercept this call. */ int numa_sched_setaffinity(pid_t pid, struct bitmask *mask) { cpu_set_t requested_mask[CPU_SETSIZE], allowed_mask[CPU_SETSIZE], lnuma_mask[CPU_SETSIZE]; static void * (*real_function)(); int n_cpus; int allow_change; n_cpus = (int) sysconf(_SC_NPROCESSORS_CONF); fprintf(stderr, "There are %d CPUs.\n", n_cpus); /* Check whether the requested mask is allowed. */ /* First gets the list of LSB-allocated CPUs. If it's empty, we */ /* check if we are running exclusively on the node. */ allow_change = 0; if (get_allowed_CPUs(allowed_mask)>0) { int bit; CPU_ZERO(lnuma_mask); for (bit = 0; bit < n_cpus; bit++) if(((1L << bit) & *(mask->maskp)) != 0 ) CPU_SET(bit,lnuma_mask); CPU_OR(requested_mask, lnuma_mask, allowed_mask); allow_change = CPU_EQUAL(requested_mask, allowed_mask); } else { allow_change = have_full_node(n_cpus); } if (allow_change) { real_function = (void *(*) ()) dlsym(RTLD_NEXT, "sched_setaffinity"); return (int) real_function(pid, sizeof(lnuma_mask),lnuma_mask); } else { char *env_var; if ((env_var = getenv("AFFINITY_NO_COMPLAIN"))) return 0; /* * The requested mask does not match with LSF one, we give to numactl * the mask defined by LSF */ else{ fprintf(stderr, "Using cores from cpuset.\n"); real_function = (void *(*) ()) dlsym(RTLD_NEXT, "sched_setaffinity"); return (int) real_function(pid, sizeof(allowed_mask),allowed_mask); } } }
static void * mythread(void * arg) { HANDLE threadH = GetCurrentThread(); cpu_set_t *parentCpus = (cpu_set_t*) arg; cpu_set_t threadCpus; DWORD_PTR vThreadMask; cpuset_to_ulint a, b; assert(pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &threadCpus) == 0); assert(CPU_EQUAL(parentCpus, &threadCpus)); vThreadMask = SetThreadAffinityMask(threadH, (*(PDWORD_PTR)&threadCpus) /* Violating Opacity */); assert(vThreadMask != 0); assert(memcmp(&vThreadMask, &threadCpus, sizeof(DWORD_PTR)) == 0); a.cpuset = *parentCpus; b.cpuset = threadCpus; /* Violates opacity */ printf("CPU affinity: Parent/Thread = 0x%lx/0x%lx\n", a.bits, b.bits); return (void*) 0; }
static void test(void) { rtems_status_code sc; rtems_id task_id; rtems_id scheduler_id; rtems_id scheduler_a_id; rtems_id scheduler_b_id; rtems_id scheduler_c_id; cpu_set_t cpuset; cpu_set_t first_cpu; cpu_set_t second_cpu; cpu_set_t all_cpus; main_task_id = rtems_task_self(); CPU_ZERO(&first_cpu); CPU_SET(0, &first_cpu); CPU_ZERO(&second_cpu); CPU_SET(1, &second_cpu); CPU_ZERO(&all_cpus); CPU_SET(0, &all_cpus); CPU_SET(1, &all_cpus); rtems_test_assert(rtems_get_current_processor() == 0); sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_a_id != scheduler_b_id); sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id); rtems_test_assert(sc == RTEMS_UNSATISFIED); CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_a_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu)); CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_b_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu)); sc = rtems_task_create( rtems_build_name('T', 'A', 'S', 'K'), 1, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &task_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_a_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu)); sc = rtems_task_set_scheduler(task_id, scheduler_b_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_b_id + 1); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu)); sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_task_start(task_id, task, 0); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_b_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); }
int thread_pthread_create(struct thread *t, void * (*s)(void *)) { int i, err; pthread_attr_t attr; if (pthread_attr_init(&attr) != 0) { perror("mvm: pthread_attr_init"); mvm_halt(); } if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) != 0) { perror("mvm: pthread_attr_setdetachstate"); mvm_halt(); } for (i = 0; i < 10; i++) { err = pthread_create(&t->id, &attr, s, (void *)t); if (err == 0) { break; } else if (err == EAGAIN) { usleep(50000 * i); continue; } else { break; } } if (err != 0) { perror("mvm: pthread_create"); mvm_halt(); } #if defined(__linux) cpu_set_t cpuset, retset; pthread_t thread; int retval; /* first thread is assigned to cpu 0 */ static int next = 0; thread = pthread_self(); for (;;) { /* try to assign to next cpu */ CPU_ZERO(&cpuset); CPU_SET(next++, &cpuset); if ((retval = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset)) != 0) { /* if cpu does not exist, start over at 0 */ if (retval == EINVAL) { next = 0; continue; } errno = retval; perror("mvm: pthread_setaffinity_np"); mvm_halt(); } /* retrieve mask */ if ((retval = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &retset)) != 0) { errno = retval; perror("mvm: pthread_setaffinity_np"); mvm_halt(); } /* ensure mask is set correctly, if not start over at 0 */ if (!CPU_EQUAL(&cpuset, &retset)) { next = 0; continue; } /* mask is set properly, break out */ break; } errno = 0; mvm_print("thread %" PRIu32 ": printing bound CPUs:\n", t->ref); for(i = 0; i < CPU_SETSIZE; i++) { if (CPU_ISSET(i, &cpuset)) mvm_print(" CPU %d\n", i); } #endif return 0; }
static void test(void) { rtems_status_code sc; rtems_id task_id; rtems_id scheduler_id; rtems_id scheduler_a_id; rtems_id scheduler_b_id; rtems_id scheduler_c_id; rtems_task_priority prio; cpu_set_t cpuset; cpu_set_t first_cpu; cpu_set_t second_cpu; cpu_set_t all_cpus; uint32_t cpu_count; main_task_id = rtems_task_self(); CPU_ZERO(&first_cpu); CPU_SET(0, &first_cpu); CPU_ZERO(&second_cpu); CPU_SET(1, &second_cpu); CPU_ZERO(&all_cpus); CPU_SET(0, &all_cpus); CPU_SET(1, &all_cpus); cpu_count = rtems_get_processor_count(); rtems_test_assert(rtems_get_current_processor() == 0); sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); if (cpu_count > 1) { sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_a_id != scheduler_b_id); } sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id); rtems_test_assert(sc == RTEMS_UNSATISFIED); sc = rtems_semaphore_create( SCHED_A, 1, RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING, 1, &sema_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); prio = 2; sc = rtems_semaphore_set_priority(sema_id, scheduler_a_id, prio, &prio); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(prio == 1); if (cpu_count > 1) { prio = 1; sc = rtems_semaphore_set_priority(sema_id, scheduler_b_id, prio, &prio); rtems_test_assert(sc == RTEMS_NOT_DEFINED); rtems_test_assert(prio == 2); } CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_a_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu)); if (cpu_count > 1) { CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_b_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu)); } sc = rtems_task_create( rtems_build_name('T', 'A', 'S', 'K'), 1, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &task_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_a_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu)); rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1); rtems_test_assert(sched_get_priority_max(SCHED_RR) == 254); if (cpu_count > 1) { sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_b_id + 1, 1); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu)); sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_task_start(task_id, task, 0); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_semaphore_delete(sema_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); }
int pthread_create (pthread_t * tid, const pthread_attr_t * attr, void *(PTW32_CDECL *start) (void *), void *arg) /* * ------------------------------------------------------ * DOCPUBLIC * This function creates a thread running the start function, * passing it the parameter value, 'arg'. The 'attr' * argument specifies optional creation attributes. * The identity of the new thread is returned * via 'tid', which should not be NULL. * * PARAMETERS * tid * pointer to an instance of pthread_t * * attr * optional pointer to an instance of pthread_attr_t * * start * pointer to the starting routine for the new thread * * arg * optional parameter passed to 'start' * * * DESCRIPTION * This function creates a thread running the start function, * passing it the parameter value, 'arg'. The 'attr' * argument specifies optional creation attributes. * The identity of the new thread is returned * via 'tid', which should not be the NULL pointer. * * RESULTS * 0 successfully created thread, * EINVAL attr invalid, * EAGAIN insufficient resources. * * ------------------------------------------------------ */ { pthread_t thread = { 0 }; // init to shut up MSVC2013: warning C4701 : potentially uninitialized local variable 'thread' used ptw32_thread_t * tp; ptw32_thread_t * sp; register pthread_attr_t a; HANDLE threadH = 0; int result = EAGAIN; int run = PTW32_TRUE; ThreadParms *parms = NULL; unsigned int stackSize; int priority; /* * Before doing anything, check that tid can be stored through * without invoking a memory protection error (segfault). * Make sure that the assignment below can't be optimised out by the compiler. * This is assured by conditionally assigning *tid again at the end. */ tid->x = 0; if (NULL == (sp = (ptw32_thread_t *)pthread_self().p)) { goto FAIL0; } if (attr != NULL) { a = *attr; } else { a = NULL; } thread = ptw32_new(); if (thread.p == NULL) { goto FAIL0; } tp = (ptw32_thread_t *) thread.p; priority = tp->sched_priority; if ((parms = (ThreadParms *) malloc (sizeof (*parms))) == NULL) { goto FAIL0; } parms->tid = thread; parms->start = start; parms->arg = arg; /* * Threads inherit their initial sigmask and CPU affinity from their creator thread. */ #if defined(HAVE_SIGSET_T) tp->sigmask = sp->sigmask; #endif #if defined(HAVE_CPU_AFFINITY) tp->cpuset = sp->cpuset; #endif if (a != NULL) { #if defined(HAVE_CPU_AFFINITY) cpu_set_t none; cpu_set_t attr_cpuset; ((_sched_cpu_set_vector_*)&attr_cpuset)->_cpuset = a->cpuset; CPU_ZERO(&none); if (! CPU_EQUAL(&attr_cpuset, &none)) { tp->cpuset = a->cpuset; } #endif stackSize = (unsigned int)a->stacksize; tp->detachState = a->detachstate; priority = a->param.sched_priority; if (a->thrname != NULL) tp->name = _strdup(a->thrname); #if (THREAD_PRIORITY_LOWEST > THREAD_PRIORITY_NORMAL) /* WinCE */ #else /* Everything else */ /* * Thread priority must be set to a valid system level * without altering the value set by pthread_attr_setschedparam(). */ /* * PTHREAD_EXPLICIT_SCHED is the default because Win32 threads * don't inherit their creator's priority. They are started with * THREAD_PRIORITY_NORMAL (win32 value). The result of not supplying * an 'attr' arg to pthread_create() is equivalent to defaulting to * PTHREAD_EXPLICIT_SCHED and priority THREAD_PRIORITY_NORMAL. */ if (PTHREAD_INHERIT_SCHED == a->inheritsched) { /* * If the thread that called pthread_create() is a Win32 thread * then the inherited priority could be the result of a temporary * system adjustment. This is not the case for POSIX threads. */ priority = sp->sched_priority; } #endif } else { /* * Default stackSize */ stackSize = PTHREAD_STACK_MIN; } tp->state = run ? PThreadStateInitial : PThreadStateSuspended; tp->keys = NULL; /* * Threads must be started in suspended mode and resumed if necessary * after _beginthreadex returns us the handle. Otherwise we set up a * race condition between the creating and the created threads. * Note that we also retain a local copy of the handle for use * by us in case thread.p->threadH gets NULLed later but before we've * finished with it here. */ #if ! defined (PTW32_CONFIG_MINGW) || defined (__MSVCRT__) || defined (__DMC__) tp->threadH = threadH = (HANDLE) _beginthreadex ((void *) NULL, /* No security info */ stackSize, /* default stack size */ ptw32_threadStart, parms, (unsigned) CREATE_SUSPENDED, (unsigned *) &(tp->thread)); if (threadH != 0) { if (a != NULL) { (void) ptw32_setthreadpriority (thread, SCHED_OTHER, priority); } #if defined(HAVE_CPU_AFFINITY) SetThreadAffinityMask(tp->threadH, tp->cpuset); #endif if (run) { ResumeThread (threadH); } } #else { ptw32_mcs_local_node_t stateLock; /* * This lock will force pthread_threadStart() to wait until we have * the thread handle and have set the priority. */ ptw32_mcs_lock_acquire(&tp->stateLock, &stateLock); tp->threadH = threadH = (HANDLE) _beginthread (ptw32_threadStart, stackSize, /* default stack size */ parms); /* * Make the return code match _beginthreadex's. */ if (threadH == (HANDLE) - 1L) { tp->threadH = threadH = 0; } else { if (!run) { /* * beginthread does not allow for create flags, so we do it now. * Note that beginthread itself creates the thread in SUSPENDED * mode, and then calls ResumeThread to start it. */ SuspendThread (threadH); } if (a != NULL) { (void) ptw32_setthreadpriority (thread, SCHED_OTHER, priority); } #if defined(HAVE_CPU_AFFINITY) SetThreadAffinityMask(tp->threadH, tp->cpuset); #endif } ptw32_mcs_lock_release (&stateLock); } #endif result = (threadH != 0) ? 0 : EAGAIN; /* * Fall Through Intentionally */ /* * ------------ * Failure Code * ------------ */ FAIL0: if (result != 0) { ptw32_threadDestroy (thread); tp = NULL; if (parms != NULL) { free (parms); } } else { *tid = thread; } #if defined(_UWIN) if (result == 0) pthread_count++; #endif return (result); } /* pthread_create */
static void test(void) { rtems_status_code sc; rtems_id task_id; rtems_id scheduler_id; rtems_id scheduler_a_id; rtems_id scheduler_b_id; rtems_id scheduler_c_id; rtems_task_priority prio; cpu_set_t cpuset; cpu_set_t first_cpu; cpu_set_t second_cpu; cpu_set_t all_cpus; cpu_set_t online_cpus; uint32_t cpu_count; rtems_test_assert(rtems_get_current_processor() == 0); cpu_count = rtems_get_processor_count(); main_task_id = rtems_task_self(); CPU_ZERO(&first_cpu); CPU_SET(0, &first_cpu); CPU_ZERO(&second_cpu); CPU_SET(1, &second_cpu); CPU_FILL(&all_cpus); CPU_ZERO(&online_cpus); CPU_SET(0, &online_cpus); if (cpu_count > 1) { CPU_SET(1, &online_cpus); } sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); if (cpu_count > 1) { sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_a_id != scheduler_b_id); } sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_semaphore_create( rtems_build_name('C', 'M', 'T', 'X'), 1, RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING, 1, &cmtx_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_semaphore_create( rtems_build_name('I', 'M', 'T', 'X'), 1, RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY, 1, &imtx_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); prio = 2; sc = rtems_semaphore_set_priority(cmtx_id, scheduler_a_id, prio, &prio); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(prio == 1); if (cpu_count > 1) { prio = 1; sc = rtems_semaphore_set_priority(cmtx_id, scheduler_b_id, prio, &prio); rtems_test_assert(sc == RTEMS_NOT_DEFINED); rtems_test_assert(prio == 2); } CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_a_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu)); if (cpu_count > 1) { CPU_ZERO(&cpuset); sc = rtems_scheduler_get_processor_set( scheduler_b_id, sizeof(cpuset), &cpuset ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu)); } sc = rtems_task_create( rtems_build_name('T', 'A', 'S', 'K'), 1, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &task_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_a_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus)); rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1); rtems_test_assert(sched_get_priority_max(SCHED_RR) == 254); sc = rtems_task_set_scheduler(task_id, scheduler_c_id, 1); rtems_test_assert(sc == RTEMS_UNSATISFIED); sc = rtems_task_set_scheduler(task_id, scheduler_c_id + 1, 1); rtems_test_assert(sc == RTEMS_INVALID_ID); if (cpu_count > 1) { sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); CPU_ZERO(&cpuset); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus)); sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_task_set_affinity(task_id, sizeof(online_cpus), &online_cpus); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_a_id, 1); rtems_test_assert(sc == RTEMS_UNSATISFIED); sc = rtems_task_get_scheduler(task_id, &scheduler_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(scheduler_id == scheduler_b_id); sc = rtems_semaphore_obtain(imtx_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_start(task_id, task, 0); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); /* Ensure that the other task waits for the mutex owned by us */ sc = rtems_task_wake_after(2); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_scheduler(RTEMS_SELF, scheduler_b_id, 1); rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE); sc = rtems_semaphore_release(imtx_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_semaphore_delete(cmtx_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_semaphore_delete(imtx_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); test_scheduler_add_remove_processors(); }
/* We intercept this call. */ int sched_setaffinity(pid_t pid, size_t cpusetsize, const cpu_set_t *mask) { cpu_set_t requested_mask[CPU_SETSIZE], allowed_mask[CPU_SETSIZE], used_mask[CPU_SETSIZE]; static void * (*real_function)(); int n_cpus,c_cpus; int allow_change; int *mapp_allowed_cpus,*l_allowed_mask; n_cpus = (int) sysconf(_SC_NPROCESSORS_CONF); fprintf(stderr, "There are %d CPUs.\n", n_cpus); /* Check whether the requested mask is allowed. */ /* First gets the list of LSB-allocated CPUs. If it's empty, we * check if we are running exclusively on the node. */ allow_change = 0; c_cpus = get_allowed_CPUs(allowed_mask); CPU_ZERO(requested_mask); if (c_cpus > 0) { CPU_OR(requested_mask, mask, allowed_mask); allow_change = CPU_EQUAL(requested_mask, allowed_mask); } else { allow_change = have_full_node(n_cpus); } if (allow_change) { fprintf(stderr, "Change allowed.\n"); real_function = (void *(*) ()) dlsym(RTLD_NEXT, "sched_setaffinity"); return (int) real_function(pid, cpusetsize, mask); } else { char *env_var; if ((env_var = getenv("AFFINITY_NO_COMPLAIN"))) return 0; /* * The requested mask does not match with LSF one, we shuffle the * user mask * Algorithm to get the mapping - Urban Borstnik * 1. Let M(:) ← -1. * 1. For each p in A, let M(p) ← p. * 2. Let i←x|A(x)>|A| // I.e., find first entry in A that is greater than * the requested core count. * 3. For each p in P where M(p)<0, do * let M(p) = A(i) * i = (i+1) % |A| */ else{ int p,greater,bit; fprintf(stderr, "Shuffling.\n"); mapp_allowed_cpus = malloc(n_cpus*sizeof(int)); memset (mapp_allowed_cpus, -1, n_cpus*sizeof (int) ); l_allowed_mask = calloc(c_cpus,sizeof(int)); get_logical_allowed_CPUs(l_allowed_mask); greater = c_cpus; for(p=0; p < c_cpus; p++) { mapp_allowed_cpus[l_allowed_mask[p]] = l_allowed_mask[p]; if(l_allowed_mask[p] > greater) greater = p; } int index = greater; for(p=0; p < n_cpus; p++) if(mapp_allowed_cpus[p] == -1) { mapp_allowed_cpus[p] = l_allowed_mask[index]; index = (index+1) % c_cpus; } CPU_ZERO(used_mask); for (bit=0;bit<n_cpus;bit++) if(CPU_ISSET(bit,mask)){ CPU_SET(mapp_allowed_cpus[bit],used_mask); } free(mapp_allowed_cpus); free(l_allowed_mask); real_function = (void *(*) ()) dlsym(RTLD_NEXT, "sched_setaffinity"); return (int) real_function(pid, sizeof(used_mask), used_mask); } } }
int test_affinity1(void) #endif { unsigned int cpu; cpu_set_t newmask; cpu_set_t src1mask; cpu_set_t src2mask; cpu_set_t src3mask; CPU_ZERO(&newmask); CPU_ZERO(&src1mask); memset(&src2mask, 0, sizeof(cpu_set_t)); assert(memcmp(&src1mask, &src2mask, sizeof(cpu_set_t)) == 0); assert(CPU_EQUAL(&src1mask, &src2mask)); assert(CPU_COUNT(&src1mask) == 0); CPU_ZERO(&src1mask); CPU_ZERO(&src2mask); CPU_ZERO(&src3mask); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src1mask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*4; cpu++) { CPU_SET(cpu, &src2mask); /* 0b00000000000000001111111111111111 */ } for (cpu = sizeof(cpu_set_t)*4; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src2mask); /* 0b01010101010101011111111111111111 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src3mask); /* 0b01010101010101010101010101010101 */ } assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4)); assert(CPU_COUNT(&src2mask) == ((sizeof(cpu_set_t)*4 + (sizeof(cpu_set_t)*2)))); assert(CPU_COUNT(&src3mask) == (sizeof(cpu_set_t)*4)); CPU_SET(0, &newmask); CPU_SET(1, &newmask); CPU_SET(3, &newmask); assert(CPU_ISSET(1, &newmask)); CPU_CLR(1, &newmask); assert(!CPU_ISSET(1, &newmask)); CPU_OR(&newmask, &src1mask, &src2mask); assert(CPU_EQUAL(&newmask, &src2mask)); CPU_AND(&newmask, &src1mask, &src2mask); assert(CPU_EQUAL(&newmask, &src1mask)); CPU_XOR(&newmask, &src1mask, &src3mask); memset(&src2mask, 0, sizeof(cpu_set_t)); assert(memcmp(&newmask, &src2mask, sizeof(cpu_set_t)) == 0); /* * Need to confirm the bitwise logical right-shift in CpuCount(). * i.e. zeros inserted into MSB on shift because cpu_set_t is * unsigned. */ CPU_ZERO(&src1mask); for (cpu = 1; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &src1mask); /* 0b10101010101010101010101010101010 */ } assert(CPU_ISSET(sizeof(cpu_set_t)*8-1, &src1mask)); assert(CPU_COUNT(&src1mask) == (sizeof(cpu_set_t)*4)); return 0; }
void Validate_affinity(void ) { pthread_attr_t attr; cpu_set_t cpuset0; cpu_set_t cpuset1; cpu_set_t cpuset2; uint32_t i; int sc; int cpu_count; struct sched_param param; puts( "Init - Set Init priority to high"); sc = pthread_getattr_np( Init_id, &attr ); rtems_test_assert( sc == 0 ); sc = pthread_attr_getschedparam( &attr, ¶m ); rtems_test_assert( sc == 0 ); param.sched_priority = sched_get_priority_max( SCHED_FIFO ); sc = pthread_setschedparam( Init_id, SCHED_FIFO, ¶m ); rtems_test_assert( !sc ); sc = pthread_getaffinity_np( Init_id, sizeof(cpu_set_t), &cpuset0 ); rtems_test_assert( !sc ); /* Get the number of processors that we are using. */ cpu_count = rtems_get_processor_count(); /* Fill the remaining cpus with med priority tasks */ puts( "Init - Create Medium priority tasks"); for (i=0; i<(cpu_count-1); i++){ sc = pthread_create( &Med_id[i], &attr, Thread_1, NULL ); rtems_test_assert( !sc ); } puts( "Init - Verify Medium priority tasks"); for (i=0; i<(cpu_count-1); i++){ sc = pthread_getaffinity_np( Med_id[i], sizeof(cpu_set_t), &cpuset2 ); rtems_test_assert( !sc ); rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) ); } /* * Create low priority thread for each remaining cpu with the affinity * set to only run on one cpu. */ puts( "Init - Create Low priority tasks"); for (i=0; i<cpu_count; i++){ CPU_ZERO(&cpuset1); CPU_SET(i, &cpuset1); sc = pthread_attr_setaffinity_np( &attr, sizeof(cpu_set_t), &cpuset1 ); rtems_test_assert( !sc ); sc = pthread_create( &Low_id[i], &attr, Thread_1, NULL ); rtems_test_assert( !sc ); } /* Verify affinity on low priority tasks */ puts( "Init - Verify Low priority tasks"); for (i=0; i<(cpu_count-1); i++){ CPU_ZERO(&cpuset1); CPU_SET(i, &cpuset1); sc = pthread_getaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset2 ); rtems_test_assert( !sc ); rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) ); } /* Change the affinity for each low priority task */ puts("Init - Change affinity on Low priority tasks"); CPU_COPY(&cpuset1, &cpuset0); for (i=0; i<cpu_count; i++){ CPU_CLR(i, &cpuset1); sc = pthread_setaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset1 ); /* Verify no cpu's are now set in the cpuset */ if (i== (cpu_count-1)) { rtems_test_assert( sc == EINVAL ); sc = pthread_setaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset0 ); } rtems_test_assert( !sc ); } puts("Init - Validate affinity on Low priority tasks"); CPU_COPY(&cpuset1, &cpuset0); for (i=0; i<cpu_count; i++){ CPU_CLR(i, &cpuset1); sc = pthread_getaffinity_np( Low_id[i], sizeof(cpu_set_t), &cpuset2 ); rtems_test_assert( !sc ); if (i== (cpu_count-1)) rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) ); else rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) ); } }
static void test_task_get_set_affinity(void) { #if defined(__RTEMS_HAVE_SYS_CPUSET_H__) rtems_id self_id = rtems_task_self(); rtems_id task_id; rtems_status_code sc; cpu_set_t cpusetone; cpu_set_t cpuset; size_t big = 2 * CHAR_BIT * sizeof(cpu_set_t); size_t cpusetbigsize = CPU_ALLOC_SIZE(big); cpu_set_t *cpusetbigone; cpu_set_t *cpusetbig; CPU_ZERO(&cpusetone); CPU_SET(0, &cpusetone); sc = rtems_task_create( rtems_build_name('T', 'A', 'S', 'K'), 2, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &task_id ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), NULL); rtems_test_assert(sc == RTEMS_INVALID_ADDRESS); sc = rtems_task_get_affinity(RTEMS_SELF, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_set_affinity(RTEMS_SELF, 0, &cpuset); rtems_test_assert(sc == RTEMS_INVALID_NUMBER); sc = rtems_task_get_affinity(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_set_affinity(invalid_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_INVALID_ID); sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone)); cpusetbigone = CPU_ALLOC(big); rtems_test_assert(cpusetbigone != NULL); cpusetbig = CPU_ALLOC(big); rtems_test_assert(cpusetbig != NULL); CPU_ZERO_S(cpusetbigsize, cpusetbigone); CPU_SET_S(0, cpusetbigsize, cpusetbigone); sc = rtems_task_get_affinity(task_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone)); sc = rtems_task_set_affinity(task_id, cpusetbigsize, cpusetbig); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_task_delete(task_id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); CPU_FREE(cpusetbig); CPU_FREE(cpusetbigone); #endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */ }
void Validate_affinity(void ) { cpu_set_t cpuset0; cpu_set_t cpuset1; cpu_set_t cpuset2; uint32_t i; int sc; int cpu_count; rtems_task_priority priority; char ch[2]; puts( "Init - Set Init priority to high"); sc = rtems_task_set_priority( Init_id, 1, &priority ); directive_failed( sc, "Set Init Priority" ); sc = rtems_task_get_affinity( Init_id, sizeof(cpu_set_t), &cpuset0 ); directive_failed( sc, "Get Affinity of Init Task" ); /* Get the number of processors that we are using. */ cpu_count = rtems_get_processor_count(); /* Fill the remaining cpus with med priority tasks */ puts( "Init - Create Medium priority tasks"); for (i=0; i<(cpu_count-1); i++){ sprintf(ch, "%01" PRId32, i+1 ); sc = rtems_task_create( rtems_build_name( 'C', 'P', 'U', ch[0] ), 2, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &Med_id[i] ); directive_failed( sc, "task create" ); sc = rtems_task_start( Med_id[i], Task_1, i+1 ); directive_failed( sc, "task start" ); sc = rtems_task_get_affinity( Med_id[i], sizeof(cpu_set_t), &cpuset2 ); directive_failed( sc, "Get Affinity of Medium Priority Task" ); rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) ); } /* * Create low priority thread for each remaining cpu with the affinity * set to only run on one cpu. */ puts( "Init - Create Low priority tasks"); for (i=0; i<cpu_count; i++){ CPU_ZERO(&cpuset1); CPU_SET(i, &cpuset1); sprintf(ch, "%01" PRId32, (uint32_t) 0 ); sc = rtems_task_create( rtems_build_name( 'X', 'T', 'R', ch[0] ), 10, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &Low_id[i] ); directive_failed( sc, "task create" ); sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 ); directive_failed( sc, "Low priority task set affinity" ); sc = rtems_task_start( Low_id[i], Task_1, i+1 ); directive_failed( sc, "task start" ); } /* Verify affinity on low priority tasks */ puts("Init - Verify affinity on Low priority tasks"); for (i=0; i<cpu_count; i++){ CPU_ZERO(&cpuset1); CPU_SET(i, &cpuset1); sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 ); directive_failed( sc, "Low priority task get affinity" ); rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) ); } /* Change the affinity for each low priority task */ puts("Init - Change affinity on Low priority tasks"); CPU_COPY(&cpuset0, &cpuset1); for (i=0; i<cpu_count; i++){ CPU_CLR(i, &cpuset1); sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 ); /* Verify no cpu's are now set in the cpuset */ if (i== (cpu_count-1)) { rtems_test_assert( sc == RTEMS_INVALID_NUMBER ); sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset0 ); } directive_failed( sc, "Low priority task set affinity" ); } puts("Init - Validate affinity on Low priority tasks"); CPU_COPY(&cpuset0, &cpuset1); for (i=0; i<cpu_count; i++){ CPU_CLR(i, &cpuset1); sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 ); directive_failed( sc, "Low priority task get affinity" ); if (i== (cpu_count-1)) rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) ); else rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) ); } }
int main() { int result; unsigned int cpu; cpu_set_t newmask; cpu_set_t processCpus; cpu_set_t mask; cpu_set_t switchmask; cpu_set_t flipmask; pthread_t self = pthread_self(); CPU_ZERO(&mask); CPU_ZERO(&switchmask); CPU_ZERO(&flipmask); if (pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == ENOSYS) { printf("pthread_get/set_affinity_np API not supported for this platform: skipping test."); return 0; } assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &processCpus) == 0); printf("This thread has a starting affinity with %d CPUs\n", CPU_COUNT(&processCpus)); assert(!CPU_EQUAL(&mask, &processCpus)); for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu += 2) { CPU_SET(cpu, &switchmask); /* 0b01010101010101010101010101010101 */ } for (cpu = 0; cpu < sizeof(cpu_set_t)*8; cpu++) { CPU_SET(cpu, &flipmask); /* 0b11111111111111111111111111111111 */ } result = pthread_setaffinity_np(self, sizeof(cpu_set_t), &processCpus); if (result != 0) { assert(result != ESRCH); assert(result != EFAULT); assert(result != EPERM); assert(result != EINVAL); assert(result != EAGAIN); assert(result == ENOSYS); assert(CPU_COUNT(&mask) == 1); } else { if (CPU_COUNT(&mask) > 1) { CPU_AND(&newmask, &processCpus, &switchmask); /* Remove every other CPU */ assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0); assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0); assert(CPU_EQUAL(&mask, &newmask)); CPU_XOR(&newmask, &mask, &flipmask); /* Switch to all alternative CPUs */ assert(!CPU_EQUAL(&mask, &newmask)); assert(pthread_setaffinity_np(self, sizeof(cpu_set_t), &newmask) == 0); assert(pthread_getaffinity_np(self, sizeof(cpu_set_t), &mask) == 0); assert(CPU_EQUAL(&mask, &newmask)); } } return 0; }
int odp_cpumask_equal(const odp_cpumask_t *mask1, const odp_cpumask_t *mask2) { return CPU_EQUAL(&mask1->set, &mask2->set); }