コード例 #1
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void test_func_test( size_t set_size, cpu_set_t *cpu_set,
    SMP_barrier_State *bs )
{
  ctx.count[rtems_get_current_processor()] = 0;
  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  rtems_test_assert( ctx.count[rtems_get_current_processor()] ==
      rtems_get_processor_count() );
}
コード例 #2
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void test_smp_cache_manager( void )
{
  rtems_status_code sc;
  size_t worker_index;
  uint32_t cpu_count = rtems_get_processor_count();

  for (worker_index = 1; worker_index < cpu_count; ++worker_index) {
    rtems_id worker_id;

    sc = rtems_task_create(
      rtems_build_name('W', 'R', 'K', '0'+worker_index),
      WORKER_PRIORITY,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &worker_id
    );
    rtems_test_assert( sc == RTEMS_SUCCESSFUL );

    sc = rtems_task_start( worker_id, worker_task, 0 );
    rtems_test_assert( sc == RTEMS_SUCCESSFUL );
  }

  all_tests();
}
コード例 #3
0
ファイル: init.c プロジェクト: Avanznow/rtems
static void Init(rtems_task_argument arg)
{
  uint32_t self = rtems_get_current_processor();
  uint32_t cpu_count = rtems_get_processor_count();

  rtems_test_begink();

  main_cpu = self;

  if (cpu_count >= CPU_COUNT) {
    rtems_status_code sc;
    rtems_id id;

    sc = rtems_task_create(
      rtems_build_name( 'W', 'A', 'I', 'T' ),
      1,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &id
    );
    assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(id, acquire_giant_and_fatal_task, 0);
    assert(sc == RTEMS_SUCCESSFUL);

    wait_for_giant();
  } else {
    rtems_test_endk();
    exit(0);
  }
}
コード例 #4
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void test_func_isrdisabled_test( size_t set_size, cpu_set_t *cpu_set,
    SMP_barrier_State *bs )
{
  ISR_Level isr_level;

  ctx.count[rtems_get_current_processor()] = 0;
  _ISR_Disable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );

  _ISR_Enable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  rtems_test_assert( ctx.count[rtems_get_current_processor()] ==
      rtems_get_processor_count() );
}
コード例 #5
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static void Init(rtems_task_argument arg)
{
  uint32_t load = 0;

  TEST_BEGIN();

  printf("<Test>\n");

  cache_line_size = rtems_cache_get_data_line_size();
  if (cache_line_size == 0) {
    cache_line_size = 32;
  }

  data_size = rtems_cache_get_data_cache_size(0);
  if (data_size == 0) {
    data_size = cache_line_size;
  }

  main_data = malloc(data_size);
  rtems_test_assert(main_data != NULL);

  test(false, load);
  test(true, load);

  for (load = 1; load < rtems_get_processor_count(); ++load) {
    rtems_status_code sc;
    rtems_id id;
    volatile int *load_data = NULL;

    load_data = malloc(data_size);
    if (load_data == NULL) {
      load_data = main_data;
    }

    sc = rtems_task_create(
      rtems_build_name('L', 'O', 'A', 'D'),
      1,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &id
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(id, load_task, (rtems_task_argument) load_data);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    test(true, load);
  }

  printf("</Test>\n");

  TEST_END();
  rtems_test_exit(0);
}
コード例 #6
0
ファイル: testparallel.c プロジェクト: AoLaD/rtems
static void run_tests(
  rtems_test_parallel_context *ctx,
  const rtems_test_parallel_job *jobs,
  size_t job_count,
  size_t worker_index
)
{
  SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER;
  size_t i;

  for (i = 0; i < job_count; ++i) {
    const rtems_test_parallel_job *job = &jobs[i];
    size_t n = rtems_get_processor_count();
    size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1;

    while (j < n) {
      size_t active_worker = j + 1;

      if (rtems_test_parallel_is_master_worker(worker_index)) {
        rtems_interval duration = (*job->init)(ctx, job->arg, active_worker);

        if (duration > 0) {
          start_worker_stop_timer(ctx, duration);
        }
      }

      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);

      if (worker_index <= j) {
        (*job->body)(ctx, job->arg, active_worker, worker_index);
      }

      _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count);

      if (rtems_test_parallel_is_master_worker(worker_index)) {
        (*job->fini)(ctx, job->arg, active_worker);
      }

      ++j;
    }
  }
}
コード例 #7
0
ファイル: init.c プロジェクト: Avanznow/rtems
static void Init(rtems_task_argument arg)
{
  TEST_BEGIN();

  if (rtems_get_processor_count() >= 2) {
    test();
  }

  TEST_END();
  rtems_test_exit(0);
}
コード例 #8
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void test_func_giant_taken_test( size_t set_size, cpu_set_t *cpu_set,
    SMP_barrier_State *bs )
{
  ctx.count[rtems_get_current_processor()] = 0;

  if ( rtems_get_current_processor() == 0)
    _Thread_Disable_dispatch();

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  rtems_test_assert( ctx.count[rtems_get_current_processor()] ==
      rtems_get_processor_count() );

  if ( rtems_get_current_processor() == 0)
    _Thread_Enable_dispatch();
}
コード例 #9
0
ファイル: init.c プロジェクト: greenmeent/rtems
rtems_task Init(
  rtems_task_argument argument
)
{
  uint32_t           i;
  char               ch;
  uint32_t           cpu_num;
  rtems_id           id;
  rtems_status_code  status;

  locked_print_initialize();
  rtems_test_begin();

  if ( rtems_get_processor_count() == 1 ) {
    success();
  }

  for ( i=0; i<rtems_get_processor_count() ; i++ ) {
    ch = '1' + i;

    status = rtems_task_create(
      rtems_build_name( 'T', 'A', ch, ' ' ),
      1,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &id
    );
    directive_failed( status, "task create" );

    cpu_num = rtems_get_current_processor();
    locked_printf(" CPU %" PRIu32 " start task TA%c\n", cpu_num, ch);

    status = rtems_task_start( id, Test_task, i+1 );
    directive_failed( status, "task start" );
  }

  while (1)
    ;
}
コード例 #10
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void standard_funcs_giant_taken_test( size_t set_size,
    cpu_set_t *cpu_set, SMP_barrier_State *bs )
{
  if ( rtems_get_current_processor() == 0)
    _Thread_Disable_dispatch();

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  cache_manager_smp_functions( set_size, cpu_set );

  if ( rtems_get_current_processor() == 0)
    _Thread_Enable_dispatch();
}
コード例 #11
0
ファイル: init.c プロジェクト: krohini1593/rtems
static void standard_funcs_isrdisabled_test( size_t set_size,
    cpu_set_t *cpu_set, SMP_barrier_State *bs  )
{
  ISR_Level isr_level;

  _ISR_Disable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  cache_manager_smp_functions( set_size, cpu_set );

  _ISR_Enable_without_giant( isr_level );
}
コード例 #12
0
ファイル: init.c プロジェクト: gedare/rtems
static void Init(rtems_task_argument arg)
{
  TEST_BEGIN();

  if (rtems_get_processor_count() == CPU_COUNT) {
    test();
  } else {
    puts("warning: wrong processor count to run the test");
  }

  TEST_END();
  rtems_test_exit(0);
}
コード例 #13
0
ファイル: cpuusagereset.c プロジェクト: AoLaD/rtems
/*
 *  rtems_cpu_usage_reset
 */
void rtems_cpu_usage_reset( void )
{
  uint32_t cpu_count;
  uint32_t cpu_index;

  _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset );

  cpu_count = rtems_get_processor_count();
  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );

    cpu->cpu_usage_timestamp = CPU_usage_Uptime_at_last_reset;
  }

  rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler);
}
コード例 #14
0
ファイル: init.c プロジェクト: gedare/rtems
static void test(test_context *ctx)
{
  uint32_t cpu_count;
  int prio_ceiling;
  int eno;

  cpu_count = rtems_get_processor_count();

  rtems_test_assert(rtems_get_current_processor() == 0);

  eno = pthread_mutexattr_init(&ctx->mtx_attr);
  rtems_test_assert(eno == 0);

  eno = pthread_mutexattr_setprotocol(&ctx->mtx_attr, PTHREAD_PRIO_PROTECT);
  rtems_test_assert(eno == 0);

  eno = pthread_mutex_init(&ctx->mtx_a, &ctx->mtx_attr);
  rtems_test_assert(eno == 0);

  eno = pthread_mutex_getprioceiling(&ctx->mtx_a, &prio_ceiling);
  rtems_test_assert(eno == 0);
  rtems_test_assert(prio_ceiling == 126);

  eno = pthread_mutex_lock(&ctx->mtx_a);
  rtems_test_assert(eno == 0);

  eno = pthread_mutex_unlock(&ctx->mtx_a);
  rtems_test_assert(eno == 0);

  if (cpu_count > 1) {
    void *exit_code;

    eno = pthread_create(&ctx->thread_b, NULL, thread_b, ctx);
    rtems_test_assert(eno == 0);

    exit_code = NULL;
    eno = pthread_join(ctx->thread_b, &exit_code);
    rtems_test_assert(eno == 0);
    rtems_test_assert(exit_code == ctx);
  }

  eno = pthread_mutex_destroy(&ctx->mtx_a);
  rtems_test_assert(eno == 0);

  eno = pthread_mutexattr_destroy(&ctx->mtx_attr);
  rtems_test_assert(eno == 0);
}
コード例 #15
0
ファイル: init.c プロジェクト: Dipupo/rtems
static void Init(rtems_task_argument arg)
{
  rtems_resource_snapshot snapshot;

  TEST_BEGIN();

  rtems_resource_snapshot_take(&snapshot);

  if (rtems_get_processor_count() == CPU_COUNT) {
    test();
  }

  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));

  TEST_END();
  rtems_test_exit(0);
}
コード例 #16
0
ファイル: init.c プロジェクト: Avanznow/rtems
rtems_task Init(
  rtems_task_argument argument
)
{
  uint32_t           i;
  char               ch;
  uint32_t           cpu_num;
  rtems_id           id;
  rtems_status_code  status;

  TEST_BEGIN();

  locked_print_initialize();

  for ( killtime=0; killtime<1000000; killtime++ )
    ;
  
  for ( i=0; i<rtems_get_processor_count() -1; i++ ) {
    ch = '1' + i;

    status = rtems_task_create(
      rtems_build_name( 'T', 'A', ch, ' ' ),
      1,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &id
    );
    directive_failed( status, "task create" );

    cpu_num = rtems_get_current_processor();
    locked_printf(" CPU %" PRIu32 " start task TA%c\n", cpu_num, ch);

    status = rtems_task_start( id, Test_task, i+1 );
    directive_failed( status, "task start" );
  }

  locked_printf(" kill 10 clock ticks\n" );
  while ( rtems_clock_get_ticks_since_boot() < 10 )
    ;

  rtems_cpu_usage_report();

  TEST_END();
  rtems_test_exit(0);
}
コード例 #17
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static void test_send_message_while_processing_a_message(
  test_context *ctx
)
{
  uint32_t cpu_count = rtems_get_processor_count();
  uint32_t cpu_index_self = rtems_get_current_processor();
  uint32_t cpu_index;
  SMP_barrier_State *bs = &ctx->main_barrier_state;

  _SMP_Set_test_message_handler(barrier_handler);

  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
    if (cpu_index != cpu_index_self) {
      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);

      /* (A) */
      barrier(ctx, bs);

      rtems_test_assert(ctx->counters[cpu_index].value == 1);
      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);

      /* (B) */
      barrier(ctx, bs);

      rtems_test_assert(ctx->counters[cpu_index].value == 1);

      /* (C) */
      barrier(ctx, bs);

      /* (A) */
      barrier(ctx, bs);

      rtems_test_assert(ctx->counters[cpu_index].value == 2);

      /* (B) */
      barrier(ctx, bs);

      /* (C) */
      barrier(ctx, bs);

      ctx->counters[cpu_index].value = 0;
    }
  }
}
コード例 #18
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static void test_send_message_flood(
  test_context *ctx
)
{
  uint32_t cpu_count = rtems_get_processor_count();
  uint32_t cpu_index_self = rtems_get_current_processor();
  uint32_t cpu_index;

  _SMP_Set_test_message_handler(counter_handler);

  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
    uint32_t i;

    /* Wait 1us so that all outstanding messages have been processed */
    rtems_counter_delay_nanoseconds(1000000);

    for (i = 0; i < cpu_count; ++i) {
      if (i != cpu_index) {
        ctx->copy_counters[i] = ctx->counters[i].value;
      }
    }

    for (i = 0; i < 100000; ++i) {
      _SMP_Send_message(cpu_index, SMP_MESSAGE_TEST);
    }

    for (i = 0; i < cpu_count; ++i) {
      if (i != cpu_index) {
        rtems_test_assert(ctx->copy_counters[i] == ctx->counters[i].value);
      }
    }
  }

  for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
    printf(
      "inter-processor interrupts for processor %"
        PRIu32 "%s: %" PRIu32 "\n",
      cpu_index,
      cpu_index == cpu_index_self ? " (main)" : "",
      ctx->counters[cpu_index].value
    );
  }
}
コード例 #19
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static rtems_status_code test_driver_init(
  rtems_device_major_number major,
  rtems_device_minor_number minor,
  void *arg
)
{
  uint32_t self = rtems_get_current_processor();
  uint32_t cpu_count = rtems_get_processor_count();
  uint32_t cpu;

  rtems_test_begink();

  assert(rtems_configuration_get_maximum_processors() == MAX_CPUS);

  main_cpu = self;

  for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
    const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
    Per_CPU_State state = per_cpu->state;

    if (cpu == self) {
      assert(state == PER_CPU_STATE_INITIAL);
    } else if (cpu < cpu_count) {
      assert(
        state == PER_CPU_STATE_INITIAL
          || state == PER_CPU_STATE_READY_TO_START_MULTITASKING
      );
    } else {
      assert(state == PER_CPU_STATE_INITIAL);
    }
  }

  if (cpu_count > 1) {
    rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0xdeadbeef);
  } else {
    rtems_test_endk();
    exit(0);
  }

  return RTEMS_SUCCESSFUL;
}
コード例 #20
0
ファイル: bspreset.c プロジェクト: Fyleo/rtems
void bsp_reset(void)
{
  uint32_t self_cpu = rtems_get_current_processor();

  if (self_cpu == 0) {
    volatile struct irqmp_regs *irqmp = LEON3_IrqCtrl_Regs;

    if (irqmp != NULL) {
      /*
       * Value was choosen to get something in the magnitude of 1ms on a 200MHz
       * processor.
       */
      uint32_t max_wait = 1234567;

      uint32_t cpu_count = rtems_get_processor_count();
      uint32_t halt_mask = 0;
      uint32_t i;

      for (i = 0; i < cpu_count; ++i) {
        if (i != self_cpu) {
          halt_mask |= UINT32_C(1) << i;
        }
      }

      /* Wait some time for secondary processors to halt */
      i = 0;
      while ((irqmp->mpstat & halt_mask) != halt_mask && i < max_wait) {
        ++i;
      }
    }

    __asm__ volatile (
      "mov 1, %g1\n"
      "ta 0\n"
      "nop"
    );
  }

  leon3_power_down_loop();
}
コード例 #21
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static void fatal_extension(
  rtems_fatal_source source,
  bool is_internal,
  rtems_fatal_code code
)
{
  SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;

  if (
    source == RTEMS_FATAL_SOURCE_APPLICATION
      || source == RTEMS_FATAL_SOURCE_SMP
  ) {
    uint32_t self = rtems_get_current_processor();

    assert(!is_internal);

    if (self == main_cpu) {
      uint32_t cpu;

      assert(source == RTEMS_FATAL_SOURCE_APPLICATION);
      assert(code == 0xdeadbeef);

      for (cpu = 0; cpu < MAX_CPUS; ++cpu) {
        const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu );
        Per_CPU_State state = per_cpu->state;

        assert(state == PER_CPU_STATE_SHUTDOWN);
      }

      rtems_test_endk();
    } else {
      assert(source == RTEMS_FATAL_SOURCE_SMP);
      assert(code == SMP_FATAL_SHUTDOWN);
    }
  }

  _SMP_barrier_Wait(&barrier, &barrier_state, rtems_get_processor_count());
}
コード例 #22
0
ファイル: testparallel.c プロジェクト: AoLaD/rtems
void rtems_test_parallel(
  rtems_test_parallel_context *ctx,
  rtems_test_parallel_worker_setup worker_setup,
  const rtems_test_parallel_job *jobs,
  size_t job_count
)
{
  rtems_status_code sc;
  size_t worker_index;
  rtems_task_priority worker_priority;

  _Atomic_Init_ulong(&ctx->stop, 0);
  _SMP_barrier_Control_initialize(&ctx->barrier);
  ctx->worker_count = rtems_get_processor_count();
  ctx->worker_ids[0] = rtems_task_self();

  if (RTEMS_ARRAY_SIZE(ctx->worker_ids) < ctx->worker_count) {
    rtems_fatal_error_occurred(0xdeadbeef);
  }

  sc = rtems_task_set_priority(
    RTEMS_SELF,
    RTEMS_CURRENT_PRIORITY,
    &worker_priority
  );
  if (sc != RTEMS_SUCCESSFUL) {
    rtems_fatal_error_occurred(0xdeadbeef);
  }

  sc = rtems_timer_create(
    rtems_build_name('S', 'T', 'O', 'P'),
    &ctx->stop_worker_timer_id
  );
  if (sc != RTEMS_SUCCESSFUL) {
    rtems_fatal_error_occurred(0xdeadbeef);
  }

  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
    worker_arg warg = {
      .ctx = ctx,
      .jobs = jobs,
      .job_count = job_count,
      .worker_index = worker_index
    };
    rtems_id worker_id;

    sc = rtems_task_create(
      rtems_build_name(
        'W',
        digit(worker_index, 100),
        digit(worker_index, 10),
        digit(worker_index, 1)
      ),
      worker_priority,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &worker_id
    );
    if (sc != RTEMS_SUCCESSFUL) {
      rtems_fatal_error_occurred(0xdeadbeef);
    }

    ctx->worker_ids[worker_index] = worker_id;

    if (worker_setup != NULL) {
      (*worker_setup)(ctx, worker_index, worker_id);
    }

    sc = rtems_task_start(worker_id, worker_task, (rtems_task_argument) &warg);
    _Assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    _Assert(sc == RTEMS_SUCCESSFUL);
  }

  run_tests(ctx, jobs, job_count, 0);

  for (worker_index = 1; worker_index < ctx->worker_count; ++worker_index) {
    sc = rtems_task_delete(ctx->worker_ids[worker_index]);
    _Assert(sc == RTEMS_SUCCESSFUL);
  }

  sc = rtems_timer_delete(ctx->stop_worker_timer_id);
  _Assert(sc == RTEMS_SUCCESSFUL);
}
コード例 #23
0
ファイル: init.c プロジェクト: gedare/rtems
static void test(void)
{
  rtems_status_code sc;
  rtems_id task_id;
  rtems_id scheduler_id;
  rtems_id scheduler_a_id;
  rtems_id scheduler_b_id;
  rtems_id scheduler_c_id;
  rtems_task_priority prio;
  cpu_set_t cpuset;
  cpu_set_t first_cpu;
  cpu_set_t second_cpu;
  cpu_set_t all_cpus;
  cpu_set_t online_cpus;
  uint32_t cpu_count;

  rtems_test_assert(rtems_get_current_processor() == 0);

  cpu_count = rtems_get_processor_count();
  main_task_id = rtems_task_self();

  CPU_ZERO(&first_cpu);
  CPU_SET(0, &first_cpu);

  CPU_ZERO(&second_cpu);
  CPU_SET(1, &second_cpu);

  CPU_FILL(&all_cpus);

  CPU_ZERO(&online_cpus);
  CPU_SET(0, &online_cpus);

  if (cpu_count > 1) {
    CPU_SET(1, &online_cpus);
  }

  sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  if (cpu_count > 1) {
    sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_a_id != scheduler_b_id);
  }

  sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_create(
    rtems_build_name('C', 'M', 'T', 'X'),
    1,
    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
    1,
    &cmtx_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_create(
    rtems_build_name('I', 'M', 'T', 'X'),
    1,
    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY,
    1,
    &imtx_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  prio = 2;
  sc = rtems_semaphore_set_priority(cmtx_id, scheduler_a_id, prio, &prio);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(prio == 1);

  if (cpu_count > 1) {
    prio = 1;
    sc = rtems_semaphore_set_priority(cmtx_id, scheduler_b_id, prio, &prio);
    rtems_test_assert(sc == RTEMS_NOT_DEFINED);
    rtems_test_assert(prio == 2);
  }

  CPU_ZERO(&cpuset);
  sc = rtems_scheduler_get_processor_set(
    scheduler_a_id,
    sizeof(cpuset),
    &cpuset
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu));

  if (cpu_count > 1) {
    CPU_ZERO(&cpuset);
    sc = rtems_scheduler_get_processor_set(
      scheduler_b_id,
      sizeof(cpuset),
      &cpuset
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu));
  }

  sc = rtems_task_create(
    rtems_build_name('T', 'A', 'S', 'K'),
    1,
    RTEMS_MINIMUM_STACK_SIZE,
    RTEMS_DEFAULT_MODES,
    RTEMS_DEFAULT_ATTRIBUTES,
    &task_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_task_get_scheduler(task_id, &scheduler_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(scheduler_id == scheduler_a_id);

  CPU_ZERO(&cpuset);
  sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus));

  rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1);
  rtems_test_assert(sched_get_priority_max(SCHED_RR) == 254);

  sc = rtems_task_set_scheduler(task_id, scheduler_c_id, 1);
  rtems_test_assert(sc == RTEMS_UNSATISFIED);

  sc = rtems_task_set_scheduler(task_id, scheduler_c_id + 1, 1);
  rtems_test_assert(sc == RTEMS_INVALID_ID);

  if (cpu_count > 1) {
    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    CPU_ZERO(&cpuset);
    sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &online_cpus));

    sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu);
    rtems_test_assert(sc == RTEMS_INVALID_NUMBER);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_task_set_affinity(task_id, sizeof(online_cpus), &online_cpus);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_a_id, 1);
    rtems_test_assert(sc == RTEMS_UNSATISFIED);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_semaphore_obtain(imtx_id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(task_id, task, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    /* Ensure that the other task waits for the mutex owned by us */
    sc = rtems_task_wake_after(2);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(RTEMS_SELF, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);

    sc = rtems_semaphore_release(imtx_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  sc = rtems_task_delete(task_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_delete(cmtx_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_delete(imtx_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  test_scheduler_add_remove_processors();
}
コード例 #24
0
ファイル: init.c プロジェクト: gedare/rtems
static void test_scheduler_add_remove_processors(void)
{
  rtems_status_code sc;
  rtems_id scheduler_a_id;
  rtems_id scheduler_c_id;

  sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_scheduler_add_processor(scheduler_c_id, 62);
  rtems_test_assert(sc == RTEMS_NOT_CONFIGURED);

  sc = rtems_scheduler_add_processor(scheduler_c_id, 63);
  rtems_test_assert(sc == RTEMS_INCORRECT_STATE);

  sc = rtems_scheduler_remove_processor(scheduler_c_id, 62);
  rtems_test_assert(sc == RTEMS_INVALID_NUMBER);

  sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
  rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);

  if (rtems_get_processor_count() > 1) {
    rtems_id scheduler_id;
    rtems_id scheduler_b_id;
    rtems_id task_id;
    cpu_set_t first_cpu;

    sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_scheduler_remove_processor(scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_scheduler_add_processor(scheduler_a_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    rtems_test_assert(rtems_get_current_processor() == 0);

    sc = rtems_scheduler_remove_processor(scheduler_a_id, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    rtems_test_assert(rtems_get_current_processor() == 1);

    CPU_ZERO(&first_cpu);
    CPU_SET(0, &first_cpu);
    sc = rtems_scheduler_ident_by_processor_set(
      sizeof(first_cpu),
      &first_cpu,
      &scheduler_id
    );
    rtems_test_assert(sc == RTEMS_INCORRECT_STATE);

    sc = rtems_scheduler_add_processor(scheduler_a_id, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    rtems_test_assert(rtems_get_current_processor() == 1);

    sc = rtems_task_create(
      rtems_build_name('T', 'A', 'S', 'K'),
      2,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &task_id
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(task_id, sticky_task, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    while (!ready) {
      /* Wait */
    }

    sc = rtems_scheduler_remove_processor(scheduler_a_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    rtems_test_assert(rtems_get_current_processor() == 0);

    sc = rtems_event_transient_send(task_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_delete(task_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_scheduler_add_processor(scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }
}
コード例 #25
0
ファイル: init.c プロジェクト: Avanznow/rtems
rtems_task Init(
  rtems_task_argument argument
)
{
  int                cpu_num;
  rtems_id           id;
  rtems_status_code  status;
  rtems_interval     per_second;
  rtems_interval     then;
  rtems_id           Timer;

  locked_print_initialize();
  rtems_test_begin_with_plugin(locked_printf_plugin, NULL);

  if ( rtems_get_processor_count() == 1 ) {
    success();
  }

  /* Create/verify semaphore */
  status = rtems_semaphore_create(
    rtems_build_name ('S', 'E', 'M', '1'),
    1,                                             
    RTEMS_LOCAL                   |
    RTEMS_SIMPLE_BINARY_SEMAPHORE |
    RTEMS_PRIORITY,
    1,
    &Semaphore
  );
  directive_failed( status, "rtems_semaphore_create" );

  /* Lock semaphore */
  status = rtems_semaphore_obtain( Semaphore, RTEMS_WAIT, 0);
  directive_failed( status,"rtems_semaphore_obtain of SEM1\n");

  /* Create and Start test task. */
  status = rtems_task_create(
    rtems_build_name( 'T', 'A', '1', ' ' ),
    1,
    RTEMS_MINIMUM_STACK_SIZE,
    RTEMS_DEFAULT_MODES,
    RTEMS_DEFAULT_ATTRIBUTES,
    &id
  );
  directive_failed( status, "task create" );

  cpu_num = rtems_get_current_processor();
  locked_printf(" CPU %d start task TA1\n", cpu_num );
  status = rtems_task_start( id, Test_task, 1 );
  directive_failed( status, "task start" );

  /* Create and start TSR */
  locked_printf(" CPU %d create and start timer\n", cpu_num );
  status = rtems_timer_create( rtems_build_name( 'T', 'M', 'R', '1' ), &Timer);
  directive_failed( status, "rtems_timer_create" );

  per_second = rtems_clock_get_ticks_per_second();
  status = rtems_timer_fire_after( Timer, 2 * per_second, TimerMethod, NULL );
  directive_failed( status, "rtems_timer_fire_after");

  /*
   *  Wait long enough that TSR should have fired.
   *
   *  Spin so CPU 0 is consumed.  This forces task to run on CPU 1.
   */
  then = rtems_clock_get_ticks_since_boot() + 4 * per_second;
  while (1) {
    if ( rtems_clock_get_ticks_since_boot() > then )
      break;
    if ( TSRFired && TaskRan )
      break;
  };
  
  /* Validate the timer fired and that the task ran */
  if ( !TSRFired )
    locked_printf( "*** ERROR TSR DID NOT FIRE ***" );

  if ( !TaskRan ) {
    locked_printf( "*** ERROR TASK DID NOT RUN ***" );
    rtems_test_exit(0);
  }

  /* End the program */
  success();
}
コード例 #26
0
ファイル: init.c プロジェクト: gedare/rtems
void Validate_affinity(void )
{
  cpu_set_t            cpuset0;
  cpu_set_t            cpuset1;
  cpu_set_t            cpuset2;
  uint32_t             i;
  int                  sc;
  int                  cpu_count;
  rtems_task_priority  priority;
  char                 ch[2];

  puts( "Init - Set Init priority to high");
  sc = rtems_task_set_priority( Init_id, 1, &priority );
  directive_failed( sc, "Set Init Priority" );

  sc = rtems_task_get_affinity( Init_id, sizeof(cpu_set_t), &cpuset0 );
  directive_failed( sc, "Get Affinity of Init Task" );

  /* Get the number of processors that we are using. */
  cpu_count = rtems_get_processor_count();

  /* Fill the remaining cpus with med priority tasks */
  puts( "Init - Create Medium priority tasks");
  for (i=0; i<(cpu_count-1); i++){
    sprintf(ch, "%01" PRId32, i+1 );
    sc = rtems_task_create(
      rtems_build_name( 'C', 'P', 'U', ch[0] ),
      2,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &Med_id[i]
    );
    directive_failed( sc, "task create" );

    sc = rtems_task_start( Med_id[i], Task_1, i+1 );
    directive_failed( sc, "task start" );

    sc = rtems_task_get_affinity( Med_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Get Affinity of Medium Priority Task" );
    rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
  }

  /*
   * Create low priority thread for each remaining cpu with the affinity
   * set to only run on one cpu.
   */
  puts( "Init - Create  Low priority tasks");
  for (i=0; i<cpu_count; i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sprintf(ch, "%01" PRId32, (uint32_t) 0 );
    sc = rtems_task_create(
      rtems_build_name( 'X', 'T', 'R', ch[0] ),
      10,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &Low_id[i]
    );
    directive_failed( sc, "task create" );

    sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 );
    directive_failed( sc, "Low priority task set affinity" );

    sc = rtems_task_start( Low_id[i], Task_1, i+1 );
    directive_failed( sc, "task start" );
  }


  /* Verify affinity on low priority tasks */
  puts("Init - Verify affinity on Low priority tasks");
  for (i=0; i<cpu_count; i++){
    CPU_ZERO(&cpuset1);
    CPU_SET(i, &cpuset1);

    sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Low priority task get affinity" );
    rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }

  /* Change the affinity for each low priority task */
  puts("Init - Change affinity on Low priority tasks");
  CPU_COPY(&cpuset0, &cpuset1);
  for (i=0; i<cpu_count; i++){

    CPU_CLR(i, &cpuset1);
    sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset1 );

    /* Verify no cpu's are now set in the cpuset */
    if (i== (cpu_count-1)) {
      rtems_test_assert( sc == RTEMS_INVALID_NUMBER );
      sc = rtems_task_set_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset0 );
    }

    directive_failed( sc, "Low priority task set affinity" );
  }

  puts("Init - Validate affinity on Low priority tasks");
  CPU_COPY(&cpuset0, &cpuset1);
  for (i=0; i<cpu_count; i++){
    CPU_CLR(i, &cpuset1);

    sc = rtems_task_get_affinity( Low_id[i], sizeof(cpu_set_t), &cpuset2 );
    directive_failed( sc, "Low priority task get affinity" );
    if (i== (cpu_count-1))
      rtems_test_assert( CPU_EQUAL(&cpuset0, &cpuset2) );
    else
      rtems_test_assert( CPU_EQUAL(&cpuset1, &cpuset2) );
  }
}
コード例 #27
0
ファイル: init.c プロジェクト: heshamelmatary/rtems-gsoc2014
static void test(void)
{
  rtems_status_code   sc;
  rtems_task_argument i;
  size_t              size;
  uint32_t            cpu_count;
  rtems_task_priority priority;

  /* Get the number of processors that we are using. */
  cpu_count = rtems_get_processor_count();
  if (cpu_count != 4) {
    printf("Test requires a minimum of 4 cores\n");
    return;
  }

  size = sizeof(cpu_set_t);
  task_data[0].id = rtems_task_self();
  printf("Create Semaphore\n");

  sc = rtems_semaphore_create(  
    rtems_build_name('S', 'E', 'M', '0'),
    1,                                               /* initial count = 1 */
    RTEMS_LOCAL                   |
    RTEMS_SIMPLE_BINARY_SEMAPHORE |
    RTEMS_NO_INHERIT_PRIORITY     |
    RTEMS_NO_PRIORITY_CEILING     |
    RTEMS_FIFO,
    0,
    &task_sem
  );  
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);


  /* Create and start tasks on each cpu with the appropriate affinity. */
  for (i = 1; i < TASK_COUNT; i++) {

      sc = rtems_task_create(
        rtems_build_name('T', 'A', '0', '0'+i),
        task_data[ i ].priority,
        RTEMS_MINIMUM_STACK_SIZE,
        RTEMS_DEFAULT_MODES,
        RTEMS_DEFAULT_ATTRIBUTES,
        &task_data[ i ].id
      );
      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  
      sc = rtems_task_set_affinity( 
        task_data[ i ].id, 
        size, 
        &task_data[i].cpuset
      );
      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
      
      printf(
        "Start TA%d at priority %d on cpu %d\n", 
         i, 
         task_data[i].priority, 
         task_data[i].expected_cpu
      );
      sc = rtems_task_start( task_data[ i ].id, task, i );
      rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  /* spin for 100 ticks */
  test_delay(100);
 
  verify_tasks();

  i = TASK_COUNT - 1;
  task_data[ i ].priority = 4;
  printf("Set TA%d priority %d\n", i,task_data[i].priority );
  sc = rtems_task_set_priority(
    task_data[ i ].id,
    task_data[ i ].priority,
    &priority
  );
  test_delay(25);

  while( rtems_semaphore_obtain (task_sem, RTEMS_NO_WAIT, 0) != RTEMS_SUCCESSFUL );
  for (i = 0; i < TASK_COUNT; i++) {
    task_data[ i ].expected_cpu = task_data[ i ].migrate_cpu;
    task_data[ i ].actual_cpu = -1;
    task_data[ i ].ran = false;
  }
  rtems_semaphore_release(task_sem);
  test_delay(25);
  verify_tasks();
}
コード例 #28
0
ファイル: init.c プロジェクト: AlexShiLucky/rtems
static void Init(rtems_task_argument arg)
{
  test_context *ctx = &test_instance;
  rtems_status_code sc;
  rtems_resource_snapshot snapshot;
  uint32_t cpu_count = rtems_get_processor_count();
  uint32_t cpu_index;

  TEST_BEGIN();

  rtems_resource_snapshot_take(&snapshot);

  sc = rtems_barrier_create(
    rtems_build_name('B', 'A', 'R', 'I'),
    RTEMS_BARRIER_AUTOMATIC_RELEASE,
    cpu_count,
    &ctx->barrier_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
    rtems_id scheduler_id;

    sc = rtems_task_create(
      rtems_build_name('T', 'A', 'S', 'K'),
      1,
      RTEMS_MINIMUM_STACK_SIZE,
      RTEMS_DEFAULT_MODES,
      RTEMS_DEFAULT_ATTRIBUTES,
      &ctx->task_id[cpu_index]
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_scheduler_ident(SCHED_NAME(cpu_index), &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(ctx->task_id[cpu_index], scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_start(ctx->task_id[cpu_index], test_task, cpu_index);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  tests();

  barrier_wait(ctx);

  sc = rtems_barrier_delete(ctx->barrier_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  done(0);

  for (cpu_index = 1; cpu_index < cpu_count; ++cpu_index) {
    sc = rtems_task_delete(ctx->task_id[cpu_index]);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    rtems_test_assert(ctx->cpu_index[cpu_index] == cpu_index);

    done(cpu_index);
  }

  rtems_test_assert(rtems_resource_snapshot_check(&snapshot));

  TEST_END();
  rtems_test_exit(0);
}
コード例 #29
0
ファイル: init.c プロジェクト: WattTech/rtems
static void Init(rtems_task_argument arg)
{
  rtems_status_code sc;
  uint32_t i;
  uint32_t cpu;
  uint32_t cpu_count;
  uint32_t read;
  uint32_t enter_count;
  uint32_t exit_count;
  uint32_t clock_tick_count;
  uint32_t res_should_be;
  rtems_name name;
  rtems_capture_record_t *recs;
  rtems_capture_record_t *prev_rec;
  empty_record_t *record;
  enter_add_number_record_t *enter_add_number_rec;
  exit_add_number_record_t *exit_add_number_rec;
  rtems_vector_number vec;
  clock_interrupt_handler cih = {.found = 0};

  TEST_BEGIN();

  /* Get the number of processors that we are using. */
  cpu_count = rtems_get_processor_count();

  sc = rtems_capture_open(50000, NULL);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_capture_watch_global(true);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_capture_control(true);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  /* Run main test */
  test(cpu_count);

  /* Try to find the clock interrupt handler */
  for ( vec=BSP_INTERRUPT_VECTOR_MIN; vec<BSP_INTERRUPT_VECTOR_MAX; vec++ ) {
    rtems_interrupt_handler_iterate(vec, locate_clock_interrupt_handler, &cih);
    if ( cih.found )
      break;
  }

  /* If we find the clock interrupt handler we replace it with
   * a wrapper and wait for a fixed number of ticks.
   */
  if ( cih.found ) {
#ifdef VERBOSE
    printf("Found a clock handler\n");
#endif
    org_clock_handler = cih.handler;
    rtems_interrupt_handler_install(vec, cih.info,
        cih.options | RTEMS_INTERRUPT_REPLACE, clock_tick_wrapper, cih.arg);

    rtems_task_wake_after(CLOCK_TICKS);
  }

  /* Disable capturing */
  sc = rtems_capture_control(false);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  clock_tick_count = 0;

  /* Read out the trace from all processors */
  for ( cpu = 0; cpu < cpu_count; cpu++ ) {
    sc = rtems_capture_read(cpu, &read, &recs);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    prev_rec = recs;
    enter_count = 0;
    exit_count = 0;
    res_should_be = 0;

    for ( i = 0; i < read; i++ ) {

      /* Verify that time goes forward */
      rtems_test_assert(recs->time>=prev_rec->time);

      if ( recs->events & RTEMS_CAPTURE_TIMESTAMP ) {
        record = (empty_record_t*)((char*) recs +
            sizeof(rtems_capture_record_t));

        switch ( record->id ) {
        case enter_add_number:
          rtems_test_assert(enter_count==exit_count);
          enter_count++;
          enter_add_number_rec = (enter_add_number_record_t*)record;
          res_should_be = add_number(enter_add_number_rec->a,
              enter_add_number_rec->b);
          rtems_object_get_classic_name(recs->task_id, &name);

#ifdef VERBOSE
          /* Print record */
          printf("Time: %"PRIu64"us Task: %4s => Add %"PRIu32" and"
              " %"PRIu32"\n",
              recs->time/1000,
              (char*)&name,
              enter_add_number_rec->a,
              enter_add_number_rec->b);
#endif
          break;
        case exit_add_number:
          rtems_test_assert(enter_count==exit_count+1);
          exit_count++;
          exit_add_number_rec = (exit_add_number_record_t*)record;
          /* Verify that the result matches the expected result */
          rtems_test_assert(res_should_be == exit_add_number_rec->res);

#ifdef VERBOSE
          /* Print record */
          rtems_object_get_classic_name(recs->task_id, &name);
          printf("Time: %"PRIu64"us Task: %4s => Result is %"PRIu32"\n",
              recs->time/1000,
              (char*)&name,
              exit_add_number_rec->res);
#endif
          break;
        case clock_tick:
          clock_tick_count++;
#ifdef VERBOSE
          rtems_object_get_classic_name(recs->task_id, &name);
          printf("Time: %"PRIu64"us Task: %4s => Clock tick\n",
              recs->time/1000,
              (char*)&name);
#endif
          break;
        default:
          rtems_test_assert(0);
        }
      }

      prev_rec = recs;
      recs = (rtems_capture_record_t*) ((char*) recs + recs->size);
    }

    rtems_test_assert(enter_count == exit_count);
    rtems_test_assert(enter_count == TASKS_PER_CPU * ITERATIONS);

    rtems_capture_release(cpu, read);
  }

  if( cih.found )
    rtems_test_assert(clock_tick_count == cpu_count * CLOCK_TICKS);

  TEST_END();
  rtems_test_exit(0);
}
コード例 #30
0
ファイル: init.c プロジェクト: greenmeent/rtems
static void test(void)
{
  rtems_status_code sc;
  rtems_id task_id;
  rtems_id scheduler_id;
  rtems_id scheduler_a_id;
  rtems_id scheduler_b_id;
  rtems_id scheduler_c_id;
  rtems_task_priority prio;
  cpu_set_t cpuset;
  cpu_set_t first_cpu;
  cpu_set_t second_cpu;
  cpu_set_t all_cpus;
  uint32_t cpu_count;

  main_task_id = rtems_task_self();

  CPU_ZERO(&first_cpu);
  CPU_SET(0, &first_cpu);

  CPU_ZERO(&second_cpu);
  CPU_SET(1, &second_cpu);

  CPU_ZERO(&all_cpus);
  CPU_SET(0, &all_cpus);
  CPU_SET(1, &all_cpus);

  cpu_count = rtems_get_processor_count();

  rtems_test_assert(rtems_get_current_processor() == 0);

  sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  if (cpu_count > 1) {
    sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_a_id != scheduler_b_id);
  }

  sc = rtems_scheduler_ident(SCHED_C, &scheduler_c_id);
  rtems_test_assert(sc == RTEMS_UNSATISFIED);

  sc = rtems_semaphore_create(
    SCHED_A,
    1,
    RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_PRIORITY_CEILING,
    1,
    &sema_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  prio = 2;
  sc = rtems_semaphore_set_priority(sema_id, scheduler_a_id, prio, &prio);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(prio == 1);

  if (cpu_count > 1) {
    prio = 1;
    sc = rtems_semaphore_set_priority(sema_id, scheduler_b_id, prio, &prio);
    rtems_test_assert(sc == RTEMS_NOT_DEFINED);
    rtems_test_assert(prio == 2);
  }

  CPU_ZERO(&cpuset);
  sc = rtems_scheduler_get_processor_set(
    scheduler_a_id,
    sizeof(cpuset),
    &cpuset
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu));

  if (cpu_count > 1) {
    CPU_ZERO(&cpuset);
    sc = rtems_scheduler_get_processor_set(
      scheduler_b_id,
      sizeof(cpuset),
      &cpuset
    );
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu));
  }

  sc = rtems_task_create(
    rtems_build_name('T', 'A', 'S', 'K'),
    1,
    RTEMS_MINIMUM_STACK_SIZE,
    RTEMS_DEFAULT_MODES,
    RTEMS_DEFAULT_ATTRIBUTES,
    &task_id
  );
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_task_get_scheduler(task_id, &scheduler_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(scheduler_id == scheduler_a_id);

  CPU_ZERO(&cpuset);
  sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  rtems_test_assert(CPU_EQUAL(&cpuset, &first_cpu));

  rtems_test_assert(sched_get_priority_min(SCHED_RR) == 1);
  rtems_test_assert(sched_get_priority_max(SCHED_RR) == 254);

  if (cpu_count > 1) {
    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_b_id + 1, 1);
    rtems_test_assert(sc == RTEMS_INVALID_ID);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    CPU_ZERO(&cpuset);
    sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(CPU_EQUAL(&cpuset, &second_cpu));

    sc = rtems_task_set_affinity(task_id, sizeof(all_cpus), &all_cpus);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_affinity(task_id, sizeof(first_cpu), &first_cpu);
    rtems_test_assert(sc == RTEMS_INVALID_NUMBER);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_task_set_affinity(task_id, sizeof(second_cpu), &second_cpu);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_get_scheduler(task_id, &scheduler_id);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    rtems_test_assert(scheduler_id == scheduler_b_id);

    sc = rtems_task_start(task_id, task, 0);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_task_set_scheduler(task_id, scheduler_b_id, 1);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);

    sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
    rtems_test_assert(sc == RTEMS_SUCCESSFUL);
  }

  sc = rtems_task_delete(task_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);

  sc = rtems_semaphore_delete(sema_id);
  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
}