Exemplo n.º 1
0
/*
 *  CallerName -- print the calling tasks name or id as configured
 */
const char *CallerName(void)
{
  static char buffer[32];
  Thread_Control *executing = _Thread_Get_executing();
#if defined(TEST_PRINT_TASK_ID)
  sprintf( buffer, "0x%08x -- %d",
      rtems_task_self(), _Thread_Get_priority( executing ) );
#else
  volatile union {
    uint32_t u;
    unsigned char c[4];
  } TempName;

  #if defined(TEST_ON_RTEMS_45)
    TempName.u = *(uint32_t *)executing->Object.name;
  #else
    TempName.u = executing->Object.name.name_u32;
  #endif
    sprintf( buffer, "%c%c%c%c -- %" PRIdPriority_Control,
      TempName.c[0], TempName.c[1], TempName.c[2], TempName.c[3],
      _Thread_Get_priority( executing )
  );
#endif
  return buffer;
}
Exemplo n.º 2
0
void AccessRemoteHw(void)
{
  rtems_status_code     Sts;

#if defined(TEST_PRINT_STATISTICS)
  rtems_task_priority   EnterPrio;   /* Statistics log */
  rtems_task_priority   AccessPrio;  /*      :         */
  rtems_task_priority   LeavePrio;   /*      :         */
  uint32_t              EnterCnt;    /*      :         */
  uint32_t              AccessCnt;   /*      :         */
  uint32_t              LeaveCnt;    /*      :         */
#endif

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  EnterPrio = _Thread_Get_priority( _Thread_Executing );
  EnterCnt  = _Thread_Executing->resource_count;
#endif

  printf("AccessRemoteHw called by %s\n", CallerName());

  /* Obtain exclusive access to remote HW, Start HW, Wait for completion,
   * Release access
   */

  Sts = rtems_semaphore_obtain(RemoteHwAccess_R, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
  directive_failed( Sts, "rtems_semaphore_obtain(RemoteHwAccess_R...)" );

  /* Carry out the remote access via the Local HW interface */
  printf("AccessRemoteHw access local %s\n", CallerName());
  AccessLocalHw();

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  AccessPrio = _Thread_Get_priority( _Thread_Executing );
  AccessCnt  = _Thread_Executing->resource_count;
#endif

  Sts = rtems_semaphore_release(RemoteHwAccess_R);
  directive_failed( Sts, "rtems_semaphore_release(RemoreHwAccess_R" );

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  LeavePrio = _Thread_Get_priority( _Thread_Executing );
  LeaveCnt  = _Thread_Executing->resource_count;

  printf(
    "\nAccessRemoteHw from %s statistics:\n"
    " - Prio: %d -> %d -> %d\n - Cnt: %d -> %d -> %d\n",
    CallerName(),
    EnterPrio, AccessPrio, LeavePrio,
    EnterCnt, AccessCnt, LeaveCnt);
#endif

  printf("AccessRemoteHw returns to %s\n", CallerName());
  return;
}
Exemplo n.º 3
0
static void _Thread_queue_Priority_do_enqueue(
  Thread_queue_Heads *heads,
  Thread_Control     *the_thread
)
{
  Thread_queue_Priority_queue *priority_queue;
  Scheduler_Node              *scheduler_node;
  Priority_Control             current_priority;

  priority_queue = _Thread_queue_Priority_queue( heads, the_thread );

#if defined(RTEMS_SMP)
  if ( _RBTree_Is_empty( &priority_queue->Queue ) ) {
    _Chain_Append_unprotected( &heads->Heads.Fifo, &priority_queue->Node );
  }
#endif

  scheduler_node = _Scheduler_Thread_get_own_node( the_thread );
  current_priority = _Thread_Get_priority( the_thread );

  _RBTree_Initialize_node( &scheduler_node->Wait.Node.RBTree );
  _RBTree_Insert_inline(
    &priority_queue->Queue,
    &scheduler_node->Wait.Node.RBTree,
    &current_priority,
    _Thread_queue_Priority_less
  );
}
rtems_status_code rtems_task_set_priority(
  rtems_id             id,
  rtems_task_priority  new_priority,
  rtems_task_priority *old_priority_p
)
{
  Thread_Control          *the_thread;
  ISR_lock_Context         lock_context;
  const Scheduler_Control *scheduler;
  Priority_Control         old_priority;
  rtems_status_code        status;

  if ( old_priority_p == NULL ) {
    return RTEMS_INVALID_ADDRESS;
  }

  the_thread = _Thread_Get( id, &lock_context );

  if ( the_thread == NULL ) {
#if defined(RTEMS_MULTIPROCESSING)
    return _RTEMS_tasks_MP_Set_priority( id, new_priority, old_priority_p );
#else
    return RTEMS_INVALID_ID;
#endif
  }

  if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
    RTEMS_tasks_Set_priority_context  context;
    Per_CPU_Control                  *cpu_self;

    cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
    _ISR_lock_ISR_enable( &lock_context );

    context.new_priority = new_priority;
    _Thread_Change_priority(
      the_thread,
      0,
      &context,
      _RTEMS_tasks_Set_priority_filter,
      false
    );

    _Thread_Dispatch_enable( cpu_self );
    scheduler = context.scheduler;
    old_priority = context.old_priority;
    status = context.status;
  } else {
    _Thread_State_acquire_critical( the_thread, &lock_context );
    scheduler = _Scheduler_Get_own( the_thread );
    old_priority = _Thread_Get_priority( the_thread );
    _Thread_State_release( the_thread, &lock_context );
    status = RTEMS_SUCCESSFUL;
  }

  *old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority );
  return status;
}
Scheduler_Void_or_thread _Scheduler_priority_Unblock (
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread
)
{
  Scheduler_priority_Context *context;
  Scheduler_priority_Node    *node;
  unsigned int                priority;
  bool                        prepend_it;

  context = _Scheduler_priority_Get_context( scheduler );
  node = _Scheduler_priority_Thread_get_node( the_thread );
  priority = (unsigned int )
    _Scheduler_Node_get_priority( &node->Base, &prepend_it );
  (void) prepend_it;

  if ( priority != node->Ready_queue.current_priority ) {
    _Scheduler_priority_Ready_queue_update(
      &node->Ready_queue,
      priority,
      &context->Bit_map,
      &context->Ready[ 0 ]
    );
  }

  _Scheduler_priority_Ready_queue_enqueue(
    &the_thread->Object.Node,
    &node->Ready_queue,
    &context->Bit_map
  );

  /* TODO: flash critical section? */

  /*
   *  If the thread that was unblocked is more important than the heir,
   *  then we have a new heir.  This may or may not result in a
   *  context switch.
   *
   *  Normal case:
   *    If the current thread is preemptible, then we need to do
   *    a context switch.
   *  Pseudo-ISR case:
   *    Even if the thread isn't preemptible, if the new heir is
   *    a pseudo-ISR system task, we need to do a context switch.
   */
  if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
    _Scheduler_Update_heir( the_thread, priority == PRIORITY_PSEUDO_ISR );
  }

  SCHEDULER_RETURN_VOID_OR_NULL;
}
Exemplo n.º 6
0
static bool _Thread_queue_Priority_less(
  const void        *left,
  const RBTree_Node *right
)
{
  const Priority_Control *the_left;
  const Scheduler_Node   *scheduler_node;
  const Thread_Control   *the_right;

  the_left = left;
  scheduler_node = SCHEDULER_NODE_OF_WAIT_RBTREE_NODE( right );
  the_right = _Scheduler_Node_get_owner( scheduler_node );

  return *the_left < _Thread_Get_priority( the_right );
}
Exemplo n.º 7
0
Scheduler_Void_or_bool _Scheduler_simple_Unblock(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  Scheduler_Node          *node
)
{
  Scheduler_simple_Context *context;
  Priority_Control          priority;

  (void) node;

  context = _Scheduler_simple_Get_context( scheduler );
  _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
  priority = _Thread_Get_priority( the_thread );

  /*
   *  If the thread that was unblocked is more important than the heir,
   *  then we have a new heir.  This may or may not result in a
   *  context switch.
   *
   *  Normal case:
   *    If the current thread is preemptible, then we need to do
   *    a context switch.
   *  Pseudo-ISR case:
   *    Even if the thread isn't preemptible, if the new heir is
   *    a pseudo-ISR system task, we need to do a context switch.
   */
  if ( priority < _Thread_Get_priority( _Thread_Heir ) ) {
    _Scheduler_Update_heir(
      the_thread,
      priority == PRIORITY_PSEUDO_ISR
    );
  }

  SCHEDULER_RETURN_VOID_OR_BOOL;
}
Exemplo n.º 8
0
rtems_status_code rtems_task_set_priority(
  rtems_id             id,
  rtems_task_priority  new_priority,
  rtems_task_priority *old_priority_p
)
{
  Thread_Control          *the_thread;
  Thread_queue_Context     queue_context;
  const Scheduler_Control *scheduler;
  Priority_Control         old_priority;
  rtems_status_code        status;

  if ( old_priority_p == NULL ) {
    return RTEMS_INVALID_ADDRESS;
  }

  _Thread_queue_Context_initialize( &queue_context );
  _Thread_queue_Context_clear_priority_updates( &queue_context );
  the_thread = _Thread_Get( id, &queue_context.Lock_context.Lock_context );

  if ( the_thread == NULL ) {
#if defined(RTEMS_MULTIPROCESSING)
    return _RTEMS_tasks_MP_Set_priority( id, new_priority, old_priority_p );
#else
    return RTEMS_INVALID_ID;
#endif
  }

  _Thread_Wait_acquire_critical( the_thread, &queue_context );

  scheduler = _Thread_Scheduler_get_home( the_thread );
  old_priority = _Thread_Get_priority( the_thread );

  if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
    status = _RTEMS_tasks_Set_priority(
      the_thread,
      scheduler,
      new_priority,
      &queue_context
    );
  } else {
    _Thread_Wait_release( the_thread, &queue_context );
    status = RTEMS_SUCCESSFUL;
  }

  *old_priority_p = _RTEMS_Priority_From_core( scheduler, old_priority );
  return status;
}
Exemplo n.º 9
0
void _Thread_Cancel(
  Thread_Control *the_thread,
  Thread_Control *executing,
  void           *exit_value
)
{
  ISR_lock_Context   lock_context;
  Thread_Life_state  previous;
  Per_CPU_Control   *cpu_self;
  Priority_Control   priority;

  _Assert( the_thread != executing );

  _Thread_State_acquire( the_thread, &lock_context );

  _Thread_Set_exit_value( the_thread, exit_value );
  previous = _Thread_Change_life_locked(
    the_thread,
    0,
    THREAD_LIFE_TERMINATING,
    0
  );

  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
  priority = _Thread_Get_priority( executing );

  if ( _States_Is_dormant( the_thread->current_state ) ) {
    _Thread_State_release( the_thread, &lock_context );
    _Thread_Make_zombie( the_thread );
  } else if ( _Thread_Is_life_change_allowed( previous ) ) {
    _Thread_Add_life_change_request( the_thread );
    _Thread_State_release( the_thread, &lock_context );

    _Thread_Finalize_life_change( the_thread, priority );
  } else {
    _Thread_Add_life_change_request( the_thread );
    _Thread_Clear_state_locked( the_thread, STATES_SUSPENDED );
    _Thread_State_release( the_thread, &lock_context );

    _Thread_Raise_real_priority( the_thread, priority );
    _Thread_Remove_life_change_request( the_thread );
  }

  _Thread_Dispatch_enable( cpu_self );
}
Exemplo n.º 10
0
static bool _Thread_Raise_real_priority_filter(
  Thread_Control   *the_thread,
  Priority_Control *new_priority_ptr,
  void             *arg
)
{
  Priority_Control real_priority;
  Priority_Control new_priority;
  Priority_Control current_priority;

  real_priority = the_thread->real_priority;
  new_priority = *new_priority_ptr;
  current_priority = _Thread_Get_priority( the_thread );

  new_priority = _Thread_Priority_highest( real_priority, new_priority );
  *new_priority_ptr = new_priority;

  the_thread->real_priority = new_priority;

  return _Thread_Priority_less_than( current_priority, new_priority );
}
Exemplo n.º 11
0
static bool _RTEMS_tasks_Set_priority_filter(
  Thread_Control   *the_thread,
  Priority_Control *new_priority_p,
  void             *arg
)
{
  RTEMS_tasks_Set_priority_context *context;
  const Scheduler_Control          *scheduler;
  bool                              valid;
  Priority_Control                  current_priority;
  Priority_Control                  new_priority;

  context = arg;
  scheduler = _Scheduler_Get_own( the_thread );
  current_priority = _Thread_Get_priority( the_thread );

  context->scheduler = scheduler;
  context->old_priority = current_priority;

  new_priority = _RTEMS_Priority_To_core(
    scheduler,
    context->new_priority,
    &valid
  );

  *new_priority_p = new_priority;

  if ( !valid ) {
    context->status = RTEMS_INVALID_PRIORITY;
    return false;
  }

  the_thread->real_priority = new_priority;
  context->status = STATUS_SUCCESSFUL;

  return _Thread_Priority_less_than( current_priority, new_priority )
    || !_Thread_Owns_resources( the_thread );
}
Exemplo n.º 12
0
static void
rtems_bsd_dump_thread(Thread_Control *thread)
{
	const struct thread *td = rtems_bsd_get_thread(thread);

	if (td != NULL) {
		char buf[5];
		const char *name = td->td_name;

		if (name == NULL || name[0] == '\0') {
			rtems_object_get_name(thread->Object.id, sizeof(buf), &buf[0]);
			name = &buf[0];
		}

		fprintf(
			stdout,
			" 0x%08" PRIx32 " | %8" PRIu32 " | %s\n",
			thread->Object.id,
			_Thread_Get_priority(thread),
			name
		);
	}
}
Exemplo n.º 13
0
void AccessLocalHw(void)
{
  rtems_status_code     Sts;

#if defined(TEST_PRINT_STATISTICS)
  rtems_task_priority   AccessPrio;  /*      :         */
  uint32_t              AccessCnt;   /*      :         */
  rtems_task_priority   EnterPrio;   /* Statistics log */
  uint32_t              EnterCnt;    /*      :         */
  rtems_task_priority   LeavePrio;   /*      :         */
  uint32_t              LeaveCnt;    /*      :         */
#endif

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  EnterPrio = _Thread_Get_priority( _Thread_Executing );
  EnterCnt  = _Thread_Executing->resource_count;
#endif

  printf("  AccessLocalHw called by %s\n", CallerName());

  /* Obtain exclusive access to local HW, Start HW, Wait for completion,
   * Release access
   */

  Sts = rtems_semaphore_obtain(LocalHwAccess_R, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
  directive_failed( Sts, "rtems_semaphore_obtain(LocalHwAccess_R...)" );

  StartHw = TRUE;

  Sts = rtems_semaphore_obtain(LocalHwSync_S, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
  directive_failed( Sts, "rtems_semaphore_obtain(LocalHwAccess_R...)" );

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  AccessPrio = _Thread_Get_priority( _Thread_Executing );
  AccessCnt  = _Thread_Executing->resource_count;
#endif

  Sts = rtems_semaphore_release(LocalHwAccess_R);
  directive_failed( Sts, "rtems_semaphore_release(LocalHwAccess_R)" );

#if defined(TEST_PRINT_STATISTICS)
  /* Store information about the current situation */
  LeavePrio = _Thread_Get_priority( _Thread_Executing );
  LeaveCnt  = _Thread_Executing->resource_count;

  printf(
    "  AccessLocalHw from %s statistics:\n"
    " - Prio: %d -> %d -> %d\n - Cnt: %d -> %d -> %d\n",
    CallerName(),
    EnterPrio, AccessPrio, LeavePrio,
    EnterCnt, AccessCnt, LeaveCnt
  );
#endif

  printf("  AccessLocalHw returns to %s\n", CallerName());
  #if defined(TEST_EXIT_AFTER_ITERATIONS)
    if ( ++Iterations == 10 ) {
      TEST_END();
      exit(0);
    }
  #endif
  return;
}
Exemplo n.º 14
0
static void
rtems_cpuusage_top_thread (rtems_task_argument arg)
{
  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
  char                   name[13];
  int                    i;
  Heap_Information_block wksp;
  uint32_t               ival, fval;
  int                    task_count;
  rtems_event_set        out;
  rtems_status_code      sc;
  bool                   first_time = true;

  data->thread_active = true;

  _TOD_Get_uptime(&data->last_uptime);

  CPU_usage_Set_to_zero(&data->zero);

  while (data->thread_run)
  {
    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
    size_t            tasks_size;
    size_t            usage_size;
    Timestamp_Control load;

    data->task_count = 0;
    rtems_iterate_over_all_threads_2(task_counter, data);

    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
    usage_size = sizeof(Timestamp_Control) * (data->task_count + 1);

    if (data->task_count > data->task_size)
    {
      data->tasks = realloc(data->tasks, tasks_size);
      data->usage = realloc(data->usage, usage_size);
      data->current_usage = realloc(data->current_usage, usage_size);
      if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL))
      {
        rtems_printf(data->printer, "top worker: error: no memory\n");
        data->thread_run = false;
        break;
      }
    }

    memset(data->tasks, 0, tasks_size);
    memset(data->usage, 0, usage_size);
    memset(data->current_usage, 0, usage_size);

    _Timestamp_Set_to_zero(&data->total);
    _Timestamp_Set_to_zero(&data->current);
    data->stack_size = 0;

    _TOD_Get_uptime(&data->uptime);
    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
    data->last_uptime = data->uptime;

    rtems_iterate_over_all_threads_2(task_usage, data);

    if (data->task_count > data->task_size)
    {
      data->last_tasks = realloc(data->last_tasks, tasks_size);
      data->last_usage = realloc(data->last_usage, usage_size);
      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
      {
        rtems_printf(data->printer, "top worker: error: no memory\n");
        data->thread_run = false;
        break;
      }
      data->task_size = data->task_count;
    }

    memcpy(data->last_tasks, data->tasks, tasks_size);
    memcpy(data->last_usage, data->usage, usage_size);
    data->last_task_count = data->task_count;

    /*
     * We need to loop again to get suitable current usage values as we need a
     * last sample to work.
     */
    if (first_time)
    {
      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
      first_time = false;
      continue;
    }

    _Protected_heap_Get_information(&_Workspace_Area, &wksp);

    if (data->single_page)
      rtems_printf(data->printer,
                   "\x1b[H\x1b[J"
                   " ENTER:Exit  SPACE:Refresh"
                   "  S:Scroll  A:All  <>:Order  +/-:Lines\n");
    rtems_printf(data->printer, "\n");

    /*
     * Uptime and period of this sample.
     */
    rtems_printf(data->printer, "Uptime: ");
    print_time(data, &data->uptime, 20);
    rtems_printf(data->printer, " Period: ");
    print_time(data, &data->period, 20);

    /*
     * Task count, load and idle levels.
     */
    rtems_printf(data->printer, "\nTasks: %4i  ", data->task_count);

    _Timestamp_Subtract(&data->idle, &data->total, &load);
    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
    rtems_printf(data->printer,
                 "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
    _Timestamp_Divide(&load, &data->period, &ival, &fval);
    rtems_printf(data->printer,
                 "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
    rtems_printf(data->printer,
                 "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);

    /*
     * Memory usage.
     */
    if (rtems_configuration_get_unified_work_area())
    {
      rtems_printf(data->printer, "\nMem: ");
      print_memsize(data, wksp.Free.total, "free");
      print_memsize(data, wksp.Used.total, "used");
    }
    else
    {
      region_information_block libc_heap;
      malloc_info(&libc_heap);
      rtems_printf(data->printer, "\nMem: Wksp: ");
      print_memsize(data, wksp.Free.total, "free");
      print_memsize(data, wksp.Used.total, "used  Heap: ");
      print_memsize(data, libc_heap.Free.total, "free");
      print_memsize(data, libc_heap.Used.total, "used");
    }

    print_memsize(data, data->stack_size, "stack\n");

    rtems_printf(data->printer,
       "\n"
        " ID         | NAME                | RPRI | CPRI   | TIME                | TOTAL   | CURRENT\n"
        "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n",
       data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--",
       data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--",
       data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--",
                          data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--",
       data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--"
    );

    task_count = 0;

    for (i = 0; i < data->task_count; i++)
    {
      Thread_Control*   thread = data->tasks[i];
      Timestamp_Control usage;
      Timestamp_Control current_usage;

      if (thread == NULL)
        break;

      if (data->single_page && (data->show != 0) && (i >= data->show))
        break;

      /*
       * We need to count the number displayed to clear the remainder of the
       * the display.
       */
      ++task_count;

      /*
       * If the API os POSIX print the entry point.
       */
      rtems_object_get_name(thread->Object.id, sizeof(name), name);
      if (name[0] == '\0')
        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry);

      rtems_printf(data->printer,
                   " 0x%08" PRIx32 " | %-19s |  %3" PRId64 " |  %3" PRId64 "   | ",
                   thread->Object.id,
                   name,
                   thread->Real_priority.priority,
                   _Thread_Get_priority(thread));

      usage = data->usage[i];
      current_usage = data->current_usage[i];

      /*
       * Print the information
       */
      print_time(data, &usage, 19);
      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
      rtems_printf(data->printer,
                   " |%4" PRIu32 ".%03" PRIu32, ival, fval);
      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
      rtems_printf(data->printer,
                   " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
    }

    if (data->single_page && (data->show != 0) && (task_count < data->show))
    {
      i = data->show - task_count;
      while (i > 0)
      {
        rtems_printf(data->printer, "\x1b[K\n");
        i--;
      }
    }

    sc = rtems_event_receive(RTEMS_EVENT_1,
                             RTEMS_EVENT_ANY,
                             RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs),
                             &out);
    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
    {
      rtems_printf(data->printer,
                   "error: event receive: %s\n", rtems_status_text(sc));
      break;
    }
  }

  free(data->tasks);
  free(data->last_tasks);
  free(data->last_usage);
  free(data->current_usage);

  data->thread_active = false;

  rtems_task_delete (RTEMS_SELF);
}
Exemplo n.º 15
0
/*
 * Create the sorted table with the current and total usage.
 */
static void
task_usage(Thread_Control* thread, void* arg)
{
  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
  Timestamp_Control     usage;
  Timestamp_Control     current = data->zero;
  int                   j;

  data->stack_size += thread->Start.Initial_stack.size;

  _Thread_Get_CPU_time_used(thread, &usage);

  for (j = 0; j < data->last_task_count; j++)
  {
    if (thread == data->last_tasks[j])
    {
      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
      break;
    }
  }

  /*
   * When not using nanosecond CPU usage resolution, we have to count the
   * number of "ticks" we gave credit for to give the user a rough guideline as
   * to what each number means proportionally.
   */
  _Timestamp_Add_to(&data->total, &usage);
  _Timestamp_Add_to(&data->current, &current);

  if (thread->Object.id == 0x09010001)
  {
    data->idle = usage;
    data->current_idle = current;
  }

  /*
   * Create the tasks to display soring as we create.
   */
  for (j = 0; j < data->task_count; j++)
  {
    if (data->tasks[j])
    {
      int k;

      /*
       * Sort on the current load.
       */
      switch (data->sort_order)
      {
        default:
          data->sort_order = RTEMS_TOP_SORT_CURRENT;
          /* drop through */
        case RTEMS_TOP_SORT_CURRENT:
          if (CPU_usage_Equal_to(&current, &data->zero) ||
              CPU_usage_Less_than(&current, &data->current_usage[j]))
            continue;
        case RTEMS_TOP_SORT_TOTAL:
          if (CPU_usage_Equal_to(&usage, &data->zero) ||
              CPU_usage_Less_than(&usage, &data->usage[j]))
            continue;
        case RTEMS_TOP_SORT_REAL_PRI:
          if (thread->Real_priority.priority > data->tasks[j]->Real_priority.priority)
            continue;
        case RTEMS_TOP_SORT_CURRENT_PRI:
          if (
            _Thread_Get_priority( thread )
              > _Thread_Get_priority( data->tasks[j] )
          ) {
            continue;
          }
        case RTEMS_TOP_SORT_ID:
          if (thread->Object.id < data->tasks[j]->Object.id)
            continue;
      }

      for (k = (data->task_count - 1); k >= j; k--)
      {
        data->tasks[k + 1] = data->tasks[k];
        data->usage[k + 1]  = data->usage[k];
        data->current_usage[k + 1]  = data->current_usage[k];
      }
    }
    data->tasks[j] = thread;
    data->usage[j] = usage;
    data->current_usage[j] = current;
    break;
  }
}