Esempio n. 1
0
File: init.c Progetto: gedare/rtems
void Validate_setaffinity_errors(void)
{
  int                 sc;
  cpu_set_t           cpuset;

  /* Verify rtems_task_set_affinity checks that all cpu's exist. */
  /* Note this check assumes you are running with less than 32 CPUs */
  CPU_FILL(&cpuset);
  puts( "Init - rtems_task_set_affinity - Lots of cpus - SUCCESS" );
  sc = rtems_task_set_affinity( Init_id, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == RTEMS_SUCCESSFUL );

  /* Verify rtems_task_set_affinity checks that at least one cpu is set */
  CPU_ZERO(&cpuset);
  puts( "Init - rtems_task_set_affinity - no cpu - RTEMS_INVALID_NUMBER" );
  sc = rtems_task_set_affinity( Init_id, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == RTEMS_INVALID_NUMBER );

  /* Verify rtems_task_set_affinity checks that at thread id is valid */
  CPU_ZERO(&cpuset);
  CPU_SET(0, &cpuset);
  puts( "Init - rtems_task_set_affinity - Invalid thread - RTEMS_INVALID_ID" );
  sc = rtems_task_set_affinity( 999, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == RTEMS_INVALID_ID );

  /* Verify rtems_task_set_affinity validates cpusetsize */
  puts( "Init - rtems_task_set_affinity - Invalid cpusetsize - RTEMS_INVALID_NUMBER" );
  sc = rtems_task_set_affinity( Init_id,  1, &cpuset );
  rtems_test_assert( sc == RTEMS_INVALID_NUMBER );

  /* Verifyrtems_task_set_affinity validates cpuset */
  puts( "Init - rtems_task_set_affinity - Invalid cpuset - RTEMS_INVALID_ADDRESS" );
  sc = rtems_task_set_affinity( Init_id, sizeof(cpu_set_t), NULL );
  rtems_test_assert( sc == RTEMS_INVALID_ADDRESS );
}
Esempio n. 2
0
File: init.c Progetto: Fyleo/rtems
void Validate_setaffinity_errors(void)
{
  int                 sc;
  cpu_set_t           cpuset;

  /* Verify pthread_setaffinity_np checks that all cpu's exist. */
  CPU_FILL(&cpuset);
  puts( "Init - pthread_setaffinity_np - Invalid cpu - EINVAL" );
  sc = pthread_setaffinity_np( Init_id, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == EINVAL );

  /* Verify pthread_setaffinity_np checks that at least one cpu is set */
  CPU_ZERO(&cpuset);
  puts( "Init - pthread_setaffinity_np - no cpu - EINVAL" );
  sc = pthread_setaffinity_np( Init_id, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == EINVAL );

  /* Verify pthread_setaffinity_np checks that at thread id is valid */
  CPU_SET(0, &cpuset);
  puts( "Init - pthread_setaffinity_np - Invalid thread - ESRCH" );
  sc = pthread_setaffinity_np( 999, sizeof(cpu_set_t), &cpuset );
  rtems_test_assert( sc == ESRCH );

  /* Verify pthread_setaffinity_np validates cpusetsize */
  puts( "Init - pthread_setaffinity_np - Invalid cpusetsize - EINVAL" );
  sc = pthread_setaffinity_np( Init_id,  sizeof(cpu_set_t) * 2, &cpuset );
  rtems_test_assert( sc == EINVAL );

  /* Verify pthread_setaffinity_np validates cpuset */
  puts( "Init - pthread_setaffinity_np - Invalid cpuset - EFAULT" );
  sc = pthread_setaffinity_np( Init_id, sizeof(cpu_set_t), NULL );
  rtems_test_assert( sc == EFAULT );
}
Esempio n. 3
0
/*
 * Creates the cpuset for thread0.  We make two sets:
 * 
 * 0 - The root set which should represent all valid processors in the
 *     system.  It is initially created with a mask of all processors
 *     because we don't know what processors are valid until cpuset_init()
 *     runs.  This set is immutable.
 * 1 - The default set which all processes are a member of until changed.
 *     This allows an administrator to move all threads off of given cpus to
 *     dedicate them to high priority tasks or save power etc.
 */
struct cpuset *
cpuset_thread0(void)
{
	struct cpuset *set;
	int error;

	cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
	    NULL, NULL, UMA_ALIGN_PTR, 0);
	mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
	/*
	 * Create the root system set for the whole machine.  Doesn't use
	 * cpuset_create() due to NULL parent.
	 */
	set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
	CPU_FILL(&set->cs_mask);
	LIST_INIT(&set->cs_children);
	LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
	set->cs_ref = 1;
	set->cs_flags = CPU_SET_ROOT;
	cpuset_zero = set;
	cpuset_root = &set->cs_mask;
	/*
	 * Now derive a default, modifiable set from that to give out.
	 */
	set = uma_zalloc(cpuset_zone, M_WAITOK);
	error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
	KASSERT(error == 0, ("Error creating default set: %d\n", error));
	/*
	 * Initialize the unit allocator. 0 and 1 are allocated above.
	 */
	cpuset_unr = new_unrhdr(2, INT_MAX, NULL);

	return (set);
}
Esempio n. 4
0
static void test_cpu_fill_case_1(void)
{
  size_t i;

  /*
   * Set to all zeros and verify
   */
  puts( "Exercise CPU_FILL, CPU_ISSET, and CPU_COUNT" );
  CPU_FILL(&set1);

  /* test if all bits clear */
  for (i=0 ; i<CPU_SETSIZE ; i++) {
    rtems_test_assert( CPU_ISSET(i, &set1) == 1 );
  }
  rtems_test_assert( CPU_COUNT(&set1) == _NCPUBITS );
}
Esempio n. 5
0
static void test_cpu_copy_case_1(void)
{
  size_t i;

  /*
   * CPU_EQUAL
   */
  puts( "Exercise CPU_ZERO, CPU_COPY, and CPU_ISET" );
  CPU_ZERO(&set1);
  CPU_FILL(&set2);

  CPU_COPY(&set2, &set1);

  /* test if all bits clear in set2 */
  for (i=0 ; i<CPU_SETSIZE ; i++) {
    rtems_test_assert( CPU_ISSET(i, &set2) == 0 );
  }
}
Esempio n. 6
0
static void test_cpu_clr_case_1(size_t cpu)
{
  size_t i;

  /*
   * Set to all zeros and verify
   */
  printf( "Exercise CPU_FILL, CPU_CLR(%u), and CPU_ISET\n", cpu );
  CPU_FILL(&set1);
  CPU_CLR(cpu, &set1);

  /* test if all bits except 5 are set */
  for (i=0 ; i<CPU_SETSIZE ; i++) {
    if (i==cpu)
      rtems_test_assert( CPU_ISSET(i, &set1) == 0 );
    else
      rtems_test_assert( CPU_ISSET(i, &set1) == 1 );
  }
}
Esempio n. 7
0
File: init.c Progetto: gedare/rtems
static void test(void)
{
  rtems_status_code sc;
  rtems_id task_id;
  rtems_id scheduler_id;
  rtems_id scheduler_a_id;
  rtems_id scheduler_b_id;
  rtems_id scheduler_c_id;
  rtems_task_priority prio;
  cpu_set_t cpuset;
  cpu_set_t first_cpu;
  cpu_set_t second_cpu;
  cpu_set_t all_cpus;
  cpu_set_t online_cpus;
  uint32_t cpu_count;

  rtems_test_assert(rtems_get_current_processor() == 0);

  cpu_count = rtems_get_processor_count();
  main_task_id = rtems_task_self();

  CPU_ZERO(&first_cpu);
  CPU_SET(0, &first_cpu);

  CPU_ZERO(&second_cpu);
  CPU_SET(1, &second_cpu);

  CPU_FILL(&all_cpus);

  CPU_ZERO(&online_cpus);
  CPU_SET(0, &online_cpus);

  if (cpu_count > 1) {
    CPU_SET(1, &online_cpus);
  }

  sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  if (cpu_count > 1) {
    sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_a_id != scheduler_b_id);
  }

  sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_create(
    rtems_build_name('C', 'M', 'T', 'X'),
    1,
    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
    1,
    &cmtx_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_create(
    rtems_build_name('I', 'M', 'T', 'X'),
    1,
    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
    1,
    &imtx_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  prio = 2;
  sc = rtems_semaphore_set_priority(cmtx_id, scheduler_a_id, prio, &prio);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(prio == 1);

  if (cpu_count > 1) {
    prio = 1;
    sc = rtems_semaphore_set_priority(cmtx_id, scheduler_b_id, prio, &prio);
    rtems_test_assert(sc == RTEMS_NOT_DEFINED);
    rtems_test_assert(prio == 2);
  }

  CPU_ZERO(&cpuset);
  sc = rtems_scheduler_get_processor_set(
    scheduler_a_id,
    sizeof(cpuset),
    &cpuset
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu));

  if (cpu_count > 1) {
    CPU_ZERO(&cpuset);
    sc = rtems_scheduler_get_processor_set(
      scheduler_b_id,
      sizeof(cpuset),
      &cpuset
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu));
  }

  sc = rtems_task_create(
    rtems_build_name('T', 'A', 'S', 'K'),
    1,
    RTEMS_MINIMUM_STACK_SIZE,
    RTEMS_DEFAULT_MODES,
    RTEMS_DEFAULT_ATTRIBUTES,
    &task_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_task_get_scheduler(task_id, &scheduler_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(scheduler_id == scheduler_a_id);

  CPU_ZERO(&cpuset);
  sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus));

  rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1);
  rtems_test_assert(sched_get_priority_max(SCHED_RR) == 254);

  sc = rtems_task_set_scheduler(task_id, scheduler_c_id, 1);
  rtems_test_assert(sc == RTEMS_UNSATISFIED);

  sc = rtems_task_set_scheduler(task_id, scheduler_c_id + 1, 1);
  rtems_test_assert(sc == RTEMS_INVALID_ID);

  if (cpu_count > 1) {
    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    CPU_ZERO(&cpuset);
    sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus));

    sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu);
    rtems_test_assert(sc == RTEMS_INVALID_NUMBER);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_task_set_affinity(task_id, sizeof(online_cpus), &online_cpus);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_a_id, 1);
    rtems_test_assert(sc == RTEMS_UNSATISFIED);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_semaphore_obtain(imtx_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(task_id, task, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    /* Ensure that the other task waits for the mutex owned by us */
    sc = rtems_task_wake_after(2);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(RTEMS_SELF, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);

    sc = rtems_semaphore_release(imtx_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  sc = rtems_task_delete(task_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_delete(cmtx_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_delete(imtx_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  test_scheduler_add_remove_processors();
}