Ejemplo n.º 1
0
void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread )
{
  Chain_Node       *node;
  const Chain_Node *tail;

  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );

  do {
    Scheduler_Node          *scheduler_node;
    const Scheduler_Control *scheduler;
    ISR_lock_Context         lock_context;
    bool                     success;

    scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
    scheduler = _Scheduler_Node_get_scheduler( scheduler_node );

    _Scheduler_Acquire_critical( scheduler, &lock_context );
    success = ( *scheduler->Operations.ask_for_help )(
      scheduler,
      the_thread,
      scheduler_node
    );
    _Scheduler_Release_critical( scheduler, &lock_context );

    if ( success ) {
      break;
    }

    node = _Chain_Next( node );
  } while ( node != tail );
}
Ejemplo n.º 2
0
bool _Scheduler_Set_affinity(
  Thread_Control       *the_thread,
  size_t                cpusetsize,
  const cpu_set_t      *cpuset
)
{
  Processor_mask             affinity;
  Processor_mask_Copy_status status;
  const Scheduler_Control   *scheduler;
  Scheduler_Node            *node;
  ISR_lock_Context           lock_context;
  bool                       ok;

  status = _Processor_mask_From_cpu_set_t( &affinity, cpusetsize, cpuset );
  if ( !_Processor_mask_Is_at_most_partial_loss( status ) ) {
    return false;
  }

  /*
   * Reduce affinity set to the online processors to be in line with
   * _Thread_Initialize() which sets the default affinity to the set of online
   * processors.
   */
  _Processor_mask_And( &affinity, _SMP_Get_online_processors(), &affinity );

  scheduler = _Thread_Scheduler_get_home( the_thread );
  _Scheduler_Acquire_critical( scheduler, &lock_context );

  node = _Thread_Scheduler_get_home_node( the_thread );
#if defined(RTEMS_SMP)
  ok = ( *scheduler->Operations.set_affinity )(
    scheduler,
    the_thread,
    node,
    &affinity
  );

  if ( ok ) {
    _Processor_mask_Assign( &the_thread->Scheduler.Affinity, &affinity );
  }
#else
  ok = _Scheduler_default_Set_affinity_body(
    scheduler,
    the_thread,
    node,
    &affinity
  );
#endif

  _Scheduler_Release_critical( scheduler, &lock_context );
  return ok;
}
Ejemplo n.º 3
0
static void CPU_usage_Per_thread_handler(
  Thread_Control *the_thread
)
{
  const Scheduler_Control *scheduler;
  ISR_lock_Context         state_lock_context;
  ISR_lock_Context         scheduler_lock_context;

  _Thread_State_acquire( the_thread, &state_lock_context );
  scheduler = _Scheduler_Get( the_thread );
  _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );

  _Timestamp_Set_to_zero( &the_thread->cpu_time_used );

  _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
  _Thread_State_release( the_thread, &state_lock_context );
}
Ejemplo n.º 4
0
void _Thread_Scheduler_process_requests( Thread_Control *the_thread )
{
  ISR_lock_Context  lock_context;
  Scheduler_Node   *scheduler_node;

  _Thread_Scheduler_acquire_critical( the_thread, &lock_context );

  scheduler_node = the_thread->Scheduler.requests;

  if ( scheduler_node != NULL ) {
    Scheduler_Node *next;
    Scheduler_Node *remove;

    the_thread->Scheduler.requests = NULL;
    remove = NULL;

    do {
      Scheduler_Node_request request;

      request = scheduler_node->Thread.request;
      scheduler_node->Thread.request = SCHEDULER_NODE_REQUEST_NOT_PENDING;

      next = scheduler_node->Thread.next_request;
#if defined(RTEMS_DEBUG)
      scheduler_node->Thread.next_request = NULL;
#endif

      if ( request == SCHEDULER_NODE_REQUEST_ADD ) {
        ++the_thread->Scheduler.helping_nodes;
        _Chain_Append_unprotected(
          &the_thread->Scheduler.Scheduler_nodes,
          &scheduler_node->Thread.Scheduler_node.Chain
        );
      } else if ( request == SCHEDULER_NODE_REQUEST_REMOVE ) {
        --the_thread->Scheduler.helping_nodes;
        _Chain_Extract_unprotected(
          &scheduler_node->Thread.Scheduler_node.Chain
        );
        scheduler_node->Thread.Scheduler_node.next = remove;
        remove = scheduler_node;
      } else {
        _Assert( request == SCHEDULER_NODE_REQUEST_NOTHING );
      }

      scheduler_node = next;
    } while ( scheduler_node != NULL );

    _Thread_Scheduler_release_critical( the_thread, &lock_context );

    scheduler_node = remove;

    while ( scheduler_node != NULL ) {
      const Scheduler_Control *scheduler;
      ISR_lock_Context         lock_context;

      next = scheduler_node->Thread.Scheduler_node.next;
#if defined(RTEMS_DEBUG)
      scheduler_node->Thread.Scheduler_node.next = NULL;
#endif

      scheduler = _Scheduler_Node_get_scheduler( scheduler_node );

      _Scheduler_Acquire_critical( scheduler, &lock_context );
      ( *scheduler->Operations.withdraw_node )(
        scheduler,
        the_thread,
        scheduler_node,
        THREAD_SCHEDULER_READY
      );
      _Scheduler_Release_critical( scheduler, &lock_context );

      scheduler_node = next;
    }
  } else {
    _Thread_Scheduler_release_critical( the_thread, &lock_context );
  }
}