示例#1
0
void _Event_Timeout(
  Objects_Id  id,
  void       *ignored
)
{
  Thread_Control    *the_thread;
  Objects_Locations  location;
  ISR_Level          level;

  the_thread = _Thread_Get( id, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:

      /*
       *  If the event manager is not synchronized, then it is either
       *  "nothing happened", "timeout", or "satisfied".   If the_thread
       *  is the executing thread, then it is in the process of blocking
       *  and it is the thread which is responsible for the synchronization
       *  process.
       *
       *  If it is not satisfied, then it is "nothing happened" and
       *  this is the "timeout" transition.  After a request is satisfied,
       *  a timeout is not allowed to occur.
       */
      _ISR_Disable( level );
        #if defined(RTEMS_DEBUG)
          if ( !the_thread->Wait.count ) {  /* verify thread is waiting */
            _Thread_Unnest_dispatch();
            _ISR_Enable( level );
            return;
          }
        #endif

        the_thread->Wait.count = 0;
        if ( _Thread_Is_executing( the_thread ) ) {
          if ( _Event_Sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED )
            _Event_Sync_state = THREAD_BLOCKING_OPERATION_TIMEOUT;
        }

        the_thread->Wait.return_code = RTEMS_TIMEOUT;
      _ISR_Enable( level );
      _Thread_Unblock( the_thread );
      _Thread_Unnest_dispatch();
      break;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:  /* impossible */
#endif
    case OBJECTS_ERROR:
      break;
  }
}
示例#2
0
文件: init.c 项目: lanzhongheng/rtems
/*
 *  Timer Service Routine
 *
 *  If we are in an ISR, then this is a normal clock tick.
 *  If we are not, then it is the test case.
 */
rtems_timer_service_routine test_unblock_task(
  rtems_id  timer,
  void     *arg
)
{
  bool              in_isr;
  rtems_status_code status;

  in_isr = rtems_interrupt_is_in_progress();
  status = rtems_task_is_suspended( blocked_task_id );
  if ( in_isr ) {
    status = rtems_timer_fire_after( timer, 1, test_unblock_task, NULL );
    directive_failed( status, "timer_fire_after failed" );
    return;
  }

  if ( (status != RTEMS_ALREADY_SUSPENDED) ) {
    status = rtems_timer_fire_after( timer, 1, test_unblock_task, NULL );
    directive_failed( status, "timer_fire_after failed" );
    return;
  }

  blocked_task_status = 2;
  _Thread_Disable_dispatch();
  status = rtems_task_resume( blocked_task_id );
  _Thread_Unnest_dispatch();
#if defined( RTEMS_SMP )
  directive_failed_with_level( status, "rtems_task_resume", 1 );
#else
  directive_failed( status, "rtems_task_resume" );
#endif
}
示例#3
0
/*
 *  Timer Service Routine
 *
 *  If we are in an ISR, then this is a normal clock tick.
 *  If we are not, then it is the test case.
 */
epos_timer_service_routine test_unblock_task(
  epos_id  timer,
  void     *arg
)
{
  bool              in_isr;
  epos_status_code status;

  in_isr = epos_interrupt_is_in_progress();
  status = epos_task_is_suspended( blocked_task_id );
  if ( in_isr ) {
    status = epos_timer_fire_after( timer, 1, test_unblock_task, NULL );
    directive_failed( status, "timer_fire_after failed" );
    return;
  }

  if ( (status != RTEMS_ALREADY_SUSPENDED) ) {
    status = epos_timer_fire_after( timer, 1, test_unblock_task, NULL );
    directive_failed( status, "timer_fire_after failed" );
    return;
  } 

  blocked_task_status = 2;
  _Thread_Disable_dispatch();
  status = epos_task_resume( blocked_task_id );
  _Thread_Unnest_dispatch();
  directive_failed( status, "epos_task_resume" );
}
示例#4
0
static inline void _Thread_Create_idle_helper(
  uint32_t name_u32,
  int      cpu
)
{
  Objects_Name    name;
  Thread_Control *idle;

  name.name_u32 = name_u32;

  /*
   *  The entire workspace is zeroed during its initialization.  Thus, all
   *  fields not explicitly assigned were explicitly zeroed by
   *  _Workspace_Initialization.
   */
  idle = _Thread_Internal_allocate();

  /*
   *  This is only called during initialization and we better be sure
   *  that when _Thread_Initialize unnests dispatch that we do not
   *  do anything stupid.
   */
  _Thread_Disable_dispatch();

  _Thread_Initialize(
    &_Thread_Internal_information,
    idle,
    NULL,        /* allocate the stack */
    _Stack_Ensure_minimum( Configuration.idle_task_stack_size ),
    CPU_IDLE_TASK_IS_FP,
    PRIORITY_MAXIMUM,
    true,        /* preemptable */
    THREAD_CPU_BUDGET_ALGORITHM_NONE,
    NULL,        /* no budget algorithm callout */
    0,           /* all interrupts enabled */
    name
  );

  _Thread_Unnest_dispatch();

  /*
   *  WARNING!!! This is necessary to "kick" start the system and
   *             MUST be done before _Thread_Start is invoked.
   */
  _Per_CPU_Information[ cpu ].idle      =
  _Per_CPU_Information[ cpu ].heir      =
  _Per_CPU_Information[ cpu ].executing = idle;

  _Thread_Start(
    idle,
    THREAD_START_NUMERIC,
    Configuration.idle_task,
    NULL,
    0
  );
}
示例#5
0
void _Objects_MP_Is_remote (
  Objects_Information  *information,
  Objects_Id            the_id,
  Objects_Locations    *location,
  Objects_Control     **the_object
)
{
  uint32_t            node;
  Chain_Control      *the_chain;
  Chain_Node         *the_node;
  Objects_MP_Control *the_global_object;

  node = _Objects_Get_node( the_id );

  /*
   *  NOTE: The local node was search (if necessary) by
   *        _Objects_Name_to_id_XXX before this was invoked.
   *
   *        The NODE field of an object id cannot be 0
   *        because 0 is an invalid node number.
   */

  if ( node == 0 ||
       _Objects_Is_local_node( node ) ||
       node > _Objects_Maximum_nodes ||
       information->global_table == NULL ) {

    *location   = OBJECTS_ERROR;
    *the_object = NULL;
    return;
  }

  _Thread_Disable_dispatch();

  the_chain = &information->global_table[ node ];

  for ( the_node = _Chain_First( the_chain ) ;
        !_Chain_Is_tail( the_chain, the_node ) ;
        the_node = _Chain_Next( the_node ) ) {

    the_global_object = (Objects_MP_Control *) the_node;

    if ( _Objects_Are_ids_equal( the_global_object->Object.id, the_id ) ) {
      _Thread_Unnest_dispatch();
      *location   = OBJECTS_REMOTE;
      *the_object = (Objects_Control *) the_global_object;
      return;
    }
  }

  _Thread_Enable_dispatch();
  *location   = OBJECTS_ERROR;
  *the_object = NULL;

}
示例#6
0
文件: task1.c 项目: krohini1593/rtems
static void thread_resume( Thread_Control *thread )
{
#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Disable_dispatch();
#endif

  _Thread_Clear_state( thread, STATES_SUSPENDED );

#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Unnest_dispatch();
#endif
}
示例#7
0
文件: task1.c 项目: krohini1593/rtems
static void thread_ready( Thread_Control *thread )
{
#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Disable_dispatch();
#endif

  _Thread_Ready( thread );

#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Unnest_dispatch();
#endif
}
示例#8
0
文件: task1.c 项目: krohini1593/rtems
static void thread_set_state( Thread_Control *thread, States_Control state )
{
#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Disable_dispatch();
#endif

  _Thread_Set_state( thread, state );

#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _Thread_Unnest_dispatch();
#endif
}
示例#9
0
文件: init.c 项目: AlexShiLucky/rtems
static Semaphore_Control *get_semaphore_control(rtems_id id)
{
  Objects_Locations location;
  Semaphore_Control *sem;

  sem = (Semaphore_Control *)
    _Objects_Get(&_Semaphore_Information, id, &location);
  _Thread_Unnest_dispatch();

  rtems_test_assert(sem != NULL && location == OBJECTS_LOCAL);

  return sem;
}
示例#10
0
文件: init.c 项目: Avanznow/rtems
static rtems_rate_monotonic_period_states getState(void)
{
  Objects_Locations       location;
  Rate_monotonic_Control *period;

  period = (Rate_monotonic_Control *)_Objects_Get(
    &_Rate_monotonic_Information, Period, &location );
  if ( location != OBJECTS_LOCAL ) {
    puts( "Bad object lookup" );
    rtems_test_exit(0);
  }
  _Thread_Unnest_dispatch();

  return period->state;
}
示例#11
0
文件: init.c 项目: ragunath3252/rtems
static Thread_blocking_operation_States getState(void)
{
    Objects_Locations  location;
    Semaphore_Control *sem;

    sem = (Semaphore_Control *)_Objects_Get(
              &_Semaphore_Information, Semaphore, &location );
    if ( location != OBJECTS_LOCAL ) {
        puts( "Bad object lookup" );
        rtems_test_exit(0);
    }
    _Thread_Unnest_dispatch();

    return sem->Core_control.semaphore.Wait_queue.sync_state;
}
示例#12
0
void _POSIX_Thread_Evaluate_cancellation_and_enable_dispatch(
  Thread_Control    *the_thread
)
{
  POSIX_API_Control *thread_support;

  thread_support = the_thread->API_Extensions[ THREAD_API_POSIX ];

  if ( thread_support->cancelability_state == PTHREAD_CANCEL_ENABLE &&
       thread_support->cancelability_type == PTHREAD_CANCEL_ASYNCHRONOUS &&
       thread_support->cancelation_requested ) {
    _Thread_Unnest_dispatch();
    _POSIX_Thread_Exit( the_thread, PTHREAD_CANCELED );
  } else
    _Thread_Enable_dispatch();

}
void _CORE_RWLock_Timeout(
  Objects_Id  id,
  void       *ignored
)
{
  Thread_Control       *the_thread;
  Objects_Locations     location;

  the_thread = _Thread_Get( id, &location );
  switch ( location ) {
    case OBJECTS_ERROR:
#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:  /* impossible */
#endif
      break;
    case OBJECTS_LOCAL:
      _Thread_queue_Process_timeout( the_thread );
      _Thread_Unnest_dispatch();
      break;
  }
}
示例#14
0
文件: mpci.c 项目: 0871087123/rtems
Thread_Control *_MPCI_Process_response (
  MP_packet_Prefix  *the_packet
)
{
  Thread_Control    *the_thread;
  Objects_Locations  location;

  the_thread = _Thread_Get( the_packet->id, &location );
  switch ( location ) {
    case OBJECTS_ERROR:
#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
#endif
      the_thread = NULL;          /* IMPOSSIBLE */
      break;
    case OBJECTS_LOCAL:
      _Thread_queue_Extract( &_MPCI_Remote_blocked_threads, the_thread );
      the_thread->Wait.return_code = the_packet->return_code;
      _Thread_Unnest_dispatch();
    break;
  }

  return the_thread;
}
示例#15
0
int _POSIX_Condition_variables_Wait_support(
  pthread_cond_t            *cond,
  pthread_mutex_t           *mutex,
  Watchdog_Interval          timeout,
  bool                       already_timedout
)
{
  register POSIX_Condition_variables_Control *the_cond;
  Objects_Locations                           location;
  int                                         status;
  int                                         mutex_status;

  if ( !_POSIX_Mutex_Get( mutex, &location ) ) {
     return EINVAL;
  }

  _Thread_Unnest_dispatch();

  the_cond = _POSIX_Condition_variables_Get( cond, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:

      if ( the_cond->Mutex && ( the_cond->Mutex != *mutex ) ) {
        _Thread_Enable_dispatch();
        return EINVAL;
      }

      (void) pthread_mutex_unlock( mutex );
/* XXX ignore this for now  since behavior is undefined
      if ( mutex_status ) {
        _Thread_Enable_dispatch();
        return EINVAL;
      }
*/

      if ( !already_timedout ) {
        the_cond->Mutex = *mutex;

        _Thread_queue_Enter_critical_section( &the_cond->Wait_queue );
        _Thread_Executing->Wait.return_code = 0;
        _Thread_Executing->Wait.queue       = &the_cond->Wait_queue;
        _Thread_Executing->Wait.id          = *cond;

        _Thread_queue_Enqueue( &the_cond->Wait_queue, timeout );

        _Thread_Enable_dispatch();

        /*
         *  Switch ourself out because we blocked as a result of the
         *  _Thread_queue_Enqueue.
         */

        /*
         *  If the thread is interrupted, while in the thread queue, by
         *  a POSIX signal, then pthread_cond_wait returns spuriously,
         *  according to the POSIX standard. It means that pthread_cond_wait
         *  returns a success status, except for the fact that it was not
         *  woken up a pthread_cond_signal or a pthread_cond_broadcast.
         */
        status = _Thread_Executing->Wait.return_code;
        if ( status == EINTR )
          status = 0;

      } else {
        _Thread_Enable_dispatch();
        status = ETIMEDOUT;
      }

      /*
       *  When we get here the dispatch disable level is 0.
       */

      mutex_status = pthread_mutex_lock( mutex );
      if ( mutex_status )
        return EINVAL;

      return status;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
#endif
    case OBJECTS_ERROR:
      break;
  }

  return EINVAL;
}
示例#16
0
void _Thread_Close(
  Objects_Information  *information,
  Thread_Control       *the_thread
)
{
  /*
   *  Now we are in a dispatching critical section again and we
   *  can take the thread OUT of the published set.  It is invalid
   *  to use this thread's Id after this call.  This will prevent
   *  any other task from attempting to initiate a call on this task.
   */
  _Objects_Invalidate_Id( information, &the_thread->Object );

  /*
   *  We assume the Allocator Mutex is locked when we get here.
   *  This provides sufficient protection to let the user extensions
   *  run but as soon as we get back, we will make the thread
   *  disappear and set a transient state on it.  So we temporarily
   *  unnest dispatching.
   */
  _Thread_Unnest_dispatch();

  _User_extensions_Thread_delete( the_thread );

  _Thread_Disable_dispatch();

  /*
   *  Now we are in a dispatching critical section again and we
   *  can take the thread OUT of the published set.  It is invalid
   *  to use this thread's Id OR name after this call.
   */
  _Objects_Close( information, &the_thread->Object );

  /*
   *  By setting the dormant state, the thread will not be considered
   *  for scheduling when we remove any blocking states.
   */
  _Thread_Set_state( the_thread, STATES_DORMANT );

  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    if ( _Watchdog_Is_active( &the_thread->Timer ) )
      (void) _Watchdog_Remove( &the_thread->Timer );
  }

  /*
   * Free the per-thread scheduling information.
   */
  _Scheduler_Free( the_thread );

  /*
   *  The thread might have been FP.  So deal with that.
   */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
  if ( _Thread_Is_allocated_fp( the_thread ) )
    _Thread_Deallocate_fp();
#endif
  the_thread->fp_context = NULL;

  _Workspace_Free( the_thread->Start.fp_context );
#endif

  /*
   *  Free the rest of the memory associated with this task
   *  and set the associated pointers to NULL for safety.
   */
  _Thread_Stack_Free( the_thread );
  the_thread->Start.stack = NULL;

  _Workspace_Free( the_thread->extensions );
  the_thread->extensions = NULL;

  _Workspace_Free( the_thread->Start.tls_area );
}
示例#17
0
文件: task1.c 项目: krohini1593/rtems
void complete_test( void )
{
  uint32_t    index;
  rtems_id          task_id;

  benchmark_timer_initialize();
    thread_resume( Middle_tcb );
  thread_resume_time = benchmark_timer_read();

  thread_set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE );

  benchmark_timer_initialize();
    thread_unblock( Middle_tcb );
  thread_unblock_time = benchmark_timer_read();

  thread_set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE );

  benchmark_timer_initialize();
    thread_ready( Middle_tcb );
  thread_ready_time = benchmark_timer_read();

  benchmark_timer_initialize();
    for ( index=1 ; index <= OPERATION_COUNT ; index++ )
      (void) benchmark_timer_empty_function();
  overhead = benchmark_timer_read();

  task_id = Middle_tcb->Object.id;

  benchmark_timer_initialize();
    for ( index=1 ; index <= OPERATION_COUNT ; index++ )
      (void) _Thread_Get( task_id, &location );
  thread_get_time = benchmark_timer_read();

  benchmark_timer_initialize();
    for ( index=1 ; index <= OPERATION_COUNT ; index++ )
      (void) _Semaphore_Get( Semaphore_id, &location );
  semaphore_get_time = benchmark_timer_read();

  benchmark_timer_initialize();
    for ( index=1 ; index <= OPERATION_COUNT ; index++ )
      (void) _Thread_Get( 0x3, &location );
  thread_get_invalid_time = benchmark_timer_read();

  /*
   *  This is the running task and we have tricked RTEMS out enough where
   *  we need to set some internal tracking information to match this.
   */

  set_thread_heir( _Thread_Get_executing() );
  set_thread_dispatch_necessary( false );

  for (index = 0; index < 2 * OPERATION_COUNT; ++index) {
    _Thread_Unnest_dispatch();
  }

  /*
   *  Now dump all the times
   */

  put_time(
    "rtems interrupt: _ISR_Disable",
    isr_disable_time,
    1,
    0,
    0
  );

  put_time(
    "rtems interrupt: _ISR_Flash",
    isr_flash_time,
    1,
    0,
    0
  );

  put_time(
    "rtems interrupt: _ISR_Enable",
    isr_enable_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Disable_dispatch",
    thread_disable_dispatch_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Enable_dispatch",
    thread_enable_dispatch_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Set_state",
    thread_set_state_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Dispatch NO FP",
    thread_dispatch_no_fp_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: context switch: no floating point contexts",
    context_switch_no_fp_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: context switch: self",
    context_switch_self_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: context switch to another task",
    context_switch_another_task_time,
    1,
    0,
    0
  );

#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
  put_time(
    "rtems internal: fp context switch restore 1st FP task",
    context_switch_restore_1st_fp_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: fp context switch save idle and restore initialized",
    context_switch_save_idle_restore_initted_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: fp context switch save idle, restore idle",
    context_switch_save_restore_idle_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: fp context switch save initialized, restore initialized",
    context_switch_save_restore_initted_time,
    1,
    0,
    0
  );
#else
    puts(
     "rtems internal: fp context switch restore 1st FP task - NA\n"
     "rtems internal: fp context switch save idle restore initialized - NA\n"
     "rtems internal: fp context switch save idle restore idle - NA\n"
     "rtems internal: fp context switch save initialized\n"
                      " restore initialized - NA"
   );
#endif

  put_time(
    "rtems internal: _Thread_Resume",
    thread_resume_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Unblock",
    thread_unblock_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Ready",
    thread_ready_time,
    1,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Get",
    thread_get_time,
    OPERATION_COUNT,
    0,
    0
  );

  put_time(
    "rtems internal: _Semaphore_Get",
    semaphore_get_time,
    OPERATION_COUNT,
    0,
    0
  );

  put_time(
    "rtems internal: _Thread_Get: invalid id",
    thread_get_invalid_time,
    OPERATION_COUNT,
    0,
    0
  );

  TEST_END();
  rtems_test_exit( 0 );
}
示例#18
0
文件: task1.c 项目: Avanznow/rtems
rtems_task Task_1(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
#if defined(RTEMS_SMP)
  rtems_interrupt_level level;
#endif

  Install_tm27_vector( Isr_handler );

  /*
   *  No preempt .. no nesting
   */

  Interrupt_nest = 0;

  Interrupt_occurred = 0;

  benchmark_timer_initialize();
  Cause_tm27_intr();
  /* goes to Isr_handler */

#if (MUST_WAIT_FOR_INTERRUPT == 1)
  while ( Interrupt_occurred == 0 );
#endif
  Interrupt_return_time = benchmark_timer_read();

  put_time(
    "rtems interrupt: entry overhead returns to interrupted task",
    Interrupt_enter_time,
    1,
    0,
    timer_overhead
  );

  put_time(
    "rtems interrupt: exit overhead returns to interrupted task",
    Interrupt_return_time,
    1,
    0,
    timer_overhead
  );

  /*
   *  No preempt .. nested
   */

  _Thread_Disable_dispatch();

  Interrupt_nest = 1;

  Interrupt_occurred = 0;
  benchmark_timer_initialize();
  Cause_tm27_intr();
  /* goes to Isr_handler */

#if (MUST_WAIT_FOR_INTERRUPT == 1)
  while ( Interrupt_occurred == 0 );
#endif
  Interrupt_return_time = benchmark_timer_read();

  _Thread_Unnest_dispatch();

  put_time(
    "rtems interrupt: entry overhead returns to nested interrupt",
    Interrupt_enter_nested_time,
    1,
    0,
    0
  );

  put_time(
    "rtems interrupt: exit overhead returns to nested interrupt",
    Interrupt_return_nested_time,
    1,
    0,
    0
  );

  /*
   *  Does a preempt .. not nested
   */

#if defined(RTEMS_SMP)
  _ISR_Disable_without_giant(level);
#endif

  _Thread_Executing =
        (Thread_Control *) _Chain_First(&scheduler_context->Ready[LOW_PRIORITY]);

  _Thread_Dispatch_necessary = 1;

#if defined(RTEMS_SMP)
  _ISR_Enable_without_giant(level);
#endif

  Interrupt_occurred = 0;
  benchmark_timer_initialize();
  Cause_tm27_intr();

  /*
   *  goes to Isr_handler and then returns
   */

  TEST_END();
  rtems_test_exit( 0 );
}
示例#19
0
void _Thread_Dispatch( void )
{
  Thread_Control   *executing;
  Thread_Control   *heir;
  ISR_Level         level;

  #if defined(RTEMS_SMP)
    /*
     * WARNING: The SMP sequence has severe defects regarding the real-time
     * performance.
     *
     * Consider the following scenario.  We have three tasks L (lowest
     * priority), M (middle priority), and H (highest priority).  Now let a
     * thread dispatch from M to L happen.  An interrupt occurs in
     * _Thread_Dispatch() here:
     *
     * void _Thread_Dispatch( void )
     * {
     *   [...]
     *
     * post_switch:
     *
     *   _ISR_Enable( level );
     *
     *   <-- INTERRUPT
     *   <-- AFTER INTERRUPT
     *
     *   _Thread_Unnest_dispatch();
     *
     *   _API_extensions_Run_post_switch();
     * }
     *
     * The interrupt event makes task H ready.  The interrupt code will see
     * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a
     * _Thread_Dispatch().  Now we return to position "AFTER INTERRUPT".  This
     * means task L executes now although task H is ready!  Task H will execute
     * once someone calls _Thread_Dispatch().
     */
    _Thread_Disable_dispatch();

    /*
     *  If necessary, send dispatch request to other cores.
     */
    _SMP_Request_other_cores_to_dispatch();
  #endif

  /*
   *  Now determine if we need to perform a dispatch on the current CPU.
   */
  executing   = _Thread_Executing;
  _ISR_Disable( level );
  while ( _Thread_Dispatch_necessary == true ) {
    heir = _Thread_Heir;
    #ifndef RTEMS_SMP
      _Thread_Dispatch_set_disable_level( 1 );
    #endif
    _Thread_Dispatch_necessary = false;
    _Thread_Executing = heir;

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;

    _ISR_Enable( level );

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      {
        Timestamp_Control uptime, ran;
        _TOD_Get_uptime( &uptime );
        _Timestamp_Subtract(
          &_Thread_Time_of_last_context_switch,
          &uptime,
          &ran
        );
        _Timestamp_Add_to( &executing->cpu_time_used, &ran );
        _Thread_Time_of_last_context_switch = uptime;
      }
    #else
      {
        _TOD_Get_uptime( &_Thread_Time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }

    _User_extensions_Thread_switch( executing, heir );

    /*
     *  If the CPU has hardware floating point, then we must address saving
     *  and restoring it as part of the context switch.
     *
     *  The second conditional compilation section selects the algorithm used
     *  to context switch between floating point tasks.  The deferred algorithm
     *  can be significantly better in a system with few floating point tasks
     *  because it reduces the total number of save and restore FP context
     *  operations.  However, this algorithm can not be used on all CPUs due
     *  to unpredictable use of FP registers by some compilers for integer
     *  operations.
     */

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
    if ( executing->fp_context != NULL )
      _Context_Save_fp( &executing->fp_context );
#endif
#endif

    _Context_Switch( &executing->Registers, &heir->Registers );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    if ( (executing->fp_context != NULL) &&
         !_Thread_Is_allocated_fp( executing ) ) {
      if ( _Thread_Allocated_fp != NULL )
        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
      _Context_Restore_fp( &executing->fp_context );
      _Thread_Allocated_fp = executing;
    }
#else
    if ( executing->fp_context != NULL )
      _Context_Restore_fp( &executing->fp_context );
#endif
#endif

    executing = _Thread_Executing;

    _ISR_Disable( level );
  }

post_switch:
  #ifndef RTEMS_SMP
    _Thread_Dispatch_set_disable_level( 0 );
  #endif

  _ISR_Enable( level );

  #ifdef RTEMS_SMP
    _Thread_Unnest_dispatch();
  #endif

  _API_extensions_Run_post_switch( executing );
}