Exemplo n.º 1
0
rtems_task Floating_point_task_1(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
  Thread_Control             *executing;
  FP_DECLARE;

  context_switch_restore_1st_fp_time = benchmark_timer_read();

  executing = _Thread_Get_executing();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY])
  );

  /* do not force context switch */

  set_thread_dispatch_necessary( false );

  _Thread_Dispatch_disable();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Get_executing()->fp_context );
#endif
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
  /* switch to Floating_point_task_2 */

  context_switch_save_idle_restore_initted_time = benchmark_timer_read();

  FP_LOAD( 1.0 );

  executing = _Thread_Get_executing();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY])
  );

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Get_executing()->fp_context );
#endif
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
  /* switch to Floating_point_task_2 */
}
Exemplo n.º 2
0
rtems_task Floating_point_task_1(
  rtems_task_argument argument
)
{
  Chain_Control   *ready_queues;
  Thread_Control  *executing;
  FP_DECLARE;

  context_switch_restore_1st_fp_time = benchmark_timer_read();

  executing = _Thread_Executing;

  ready_queues      = (Chain_Control *) _Scheduler.information;
  _Thread_Executing =
        (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]);

  /* do not force context switch */

  _Thread_Dispatch_necessary = false;

  _Thread_Disable_dispatch();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Executing->fp_context );
#endif
    _Context_Switch( &executing->Registers, &_Thread_Executing->Registers );
  /* switch to Floating_point_task_2 */

  context_switch_save_idle_restore_initted_time = benchmark_timer_read();

  FP_LOAD( 1.0 );

  executing = _Thread_Executing;

  ready_queues      = (Chain_Control *) _Scheduler.information;
  _Thread_Executing =
        (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]);

  /* do not force context switch */

  _Thread_Dispatch_necessary = false;

  _Thread_Disable_dispatch();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Executing->fp_context );
#endif
    _Context_Switch( &executing->Registers, &_Thread_Executing->Registers );
  /* switch to Floating_point_task_2 */
}
Exemplo n.º 3
0
rtems_task Floating_point_task_2(
  rtems_task_argument argument
)
{
  Thread_Control *executing;
  FP_DECLARE;

  context_switch_save_restore_idle_time = benchmark_timer_read();

  executing = _Thread_Executing;

  _Thread_Executing =
       (Thread_Control *) _Thread_Ready_chain[FP1_PRIORITY].first;

  FP_LOAD( 1.0 );

  /* do not force context switch */

  _Context_Switch_necessary = false;

  _Thread_Disable_dispatch();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Save_fp( &executing->fp_context );
    _Context_Restore_fp( &_Thread_Executing->fp_context );
#endif
    _Context_Switch( &executing->Registers, &_Thread_Executing->Registers );
  /* switch to Floating_point_task_1 */

  context_switch_save_restore_initted_time = benchmark_timer_read();

  complete_test();
}
Exemplo n.º 4
0
void _Thread_Start_multitasking( void )
{
  Per_CPU_Control *cpu_self = _Per_CPU_Get();
  Thread_Control  *heir;

#if defined(RTEMS_SMP)
  _Per_CPU_State_change( cpu_self, PER_CPU_STATE_UP );

  /*
   * Threads begin execution in the _Thread_Handler() function.   This
   * function will set the thread dispatch disable level to zero.
   */
  cpu_self->thread_dispatch_disable_level = 1;
#endif

  heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

   /*
    * Get the init task(s) running.
    *
    * Note: Thread_Dispatch() is normally used to dispatch threads.  As
    *       part of its work, Thread_Dispatch() restores floating point
    *       state for the heir task.
    *
    *       This code avoids Thread_Dispatch(), and so we have to restore
    *       (actually initialize) the floating point state "by hand".
    *
    *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
    *       switch in the first thread if it is FP.
    */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
   /*
    *  don't need to worry about saving BSP's floating point state
    */

   if ( heir->fp_context != NULL )
     _Context_Restore_fp( &heir->fp_context );
#endif

  _Profiling_Thread_dispatch_disable( cpu_self, 0 );

#if defined(RTEMS_SMP)
  /*
   * The _CPU_Context_Restart_self() implementations usually assume that self
   * context is executing.
   *
   * FIXME: We have a race condition here in case another thread already
   * performed scheduler operations and moved our heir thread to another
   * processor.  The time frame for this is likely too small to be practically
   * relevant.
   */
  _CPU_Context_Set_is_executing( &heir->Registers, true );
#endif

#if defined(_CPU_Start_multitasking)
  _CPU_Start_multitasking( &heir->Registers );
#else
  _CPU_Context_Restart_self( &heir->Registers );
#endif
}
Exemplo n.º 5
0
rtems_task Low_task(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
  Thread_Control             *executing;

  context_switch_no_fp_time = benchmark_timer_read();

  executing    = _Thread_Get_executing();

  Low_tcb = executing;

  benchmark_timer_initialize();
    _Context_Switch( &executing->Registers, &executing->Registers );

  context_switch_self_time = benchmark_timer_read();

  _Context_Switch(&executing->Registers, &Middle_tcb->Registers);

  context_switch_another_task_time = benchmark_timer_read();

  set_thread_executing(
    (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP1_PRIORITY])
  );

  /* do not force context switch */

  set_thread_dispatch_necessary( false );

  thread_disable_dispatch();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Restore_fp( &_Thread_Get_executing()->fp_context );
#endif
    _Context_Switch(
      &executing->Registers,
      &_Thread_Get_executing()->Registers
    );
}
void _Thread_Start_multitasking( void )
{
  /*
   *  The system is now multitasking and completely initialized.
   *  This system thread now "hides" in a single processor until
   *  the system is shut down.
   */

  _System_state_Set( SYSTEM_STATE_UP );

  _Thread_Dispatch_necessary = false;

  _Thread_Executing = _Thread_Heir;

   /*
    * Get the init task(s) running.
    *
    * Note: Thread_Dispatch() is normally used to dispatch threads.  As
    *       part of its work, Thread_Dispatch() restores floating point
    *       state for the heir task.
    *
    *       This code avoids Thread_Dispatch(), and so we have to restore
    *       (actually initialize) the floating point state "by hand".
    *
    *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
    *       switch in the first thread if it is FP.
    */
#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
   /*
    *  don't need to worry about saving BSP's floating point state
    */

   if ( _Thread_Heir->fp_context != NULL )
     _Context_Restore_fp( &_Thread_Heir->fp_context );
#endif

#if defined(_CPU_Start_multitasking)
  _CPU_Start_multitasking( &_Thread_BSP_context, &_Thread_Heir->Registers );
#else
  _Context_Switch( &_Thread_BSP_context, &_Thread_Heir->Registers );
#endif
}
Exemplo n.º 7
0
rtems_task Low_task(
  rtems_task_argument argument
)
{
  Thread_Control *executing;
  Chain_Control  *ready_queues;

  ready_queues      = (Chain_Control *) _Scheduler.information;

  context_switch_no_fp_time = benchmark_timer_read();

  executing    = _Thread_Executing;

  Low_tcb = executing;

  benchmark_timer_initialize();
    _Context_Switch( &executing->Registers, &executing->Registers );

  context_switch_self_time = benchmark_timer_read();

  _Context_Switch(&executing->Registers, &Middle_tcb->Registers);

  context_switch_another_task_time = benchmark_timer_read();

  _Thread_Executing =
        (Thread_Control *) _Chain_First(&ready_queues[FP1_PRIORITY]);

  /* do not force context switch */

  _Thread_Dispatch_necessary = false;

  _Thread_Disable_dispatch();

  benchmark_timer_initialize();
#if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1)
    _Context_Restore_fp( &_Thread_Executing->fp_context );
#endif
    _Context_Switch( &executing->Registers, &_Thread_Executing->Registers );
}
Exemplo n.º 8
0
void _Thread_Life_action_handler(
  Thread_Control   *executing,
  Thread_Action    *action,
  ISR_lock_Context *lock_context
)
{
  Thread_Life_state  previous_life_state;
  Per_CPU_Control   *cpu_self;

  (void) action;

  previous_life_state = executing->Life.state;
  executing->Life.state = previous_life_state | THREAD_LIFE_PROTECTED;

  _Thread_State_release( executing, lock_context );

  if ( _Thread_Is_life_terminating( previous_life_state ) ) {
    _User_extensions_Thread_terminate( executing );
  } else {
    _Assert( _Thread_Is_life_restarting( previous_life_state ) );

    _User_extensions_Thread_restart( executing );
  }

  cpu_self = _Thread_Dispatch_disable();

  if ( _Thread_Is_life_terminating( previous_life_state ) ) {
    cpu_self = _Thread_Wait_for_join( executing, cpu_self );

    _Thread_Make_zombie( executing );

    /* FIXME: Workaround for https://devel.rtems.org/ticket/2751 */
    cpu_self->dispatch_necessary = true;

    _Assert( cpu_self->heir != executing );
    _Thread_Dispatch_enable( cpu_self );
    RTEMS_UNREACHABLE();
  }

  _Assert( _Thread_Is_life_restarting( previous_life_state ) );

  _Thread_State_acquire( executing, lock_context );

  _Thread_Change_life_locked(
    executing,
    THREAD_LIFE_PROTECTED | THREAD_LIFE_RESTARTING,
    0,
    0
  );

  _Thread_State_release( executing, lock_context );

  _Assert(
    _Watchdog_Get_state( &executing->Timer.Watchdog ) == WATCHDOG_INACTIVE
  );
  _Assert(
    executing->current_state == STATES_READY
      || executing->current_state == STATES_SUSPENDED
  );

  _User_extensions_Destroy_iterators( executing );
  _Thread_Load_environment( executing );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
  if ( executing->fp_context != NULL ) {
    _Context_Restore_fp( &executing->fp_context );
  }
#endif

  _Context_Restart_self( &executing->Registers );
  RTEMS_UNREACHABLE();
}
Exemplo n.º 9
0
void _Thread_Dispatch( void )
{
  Per_CPU_Control  *cpu_self;
  Thread_Control   *executing;
  ISR_Level         level;

#if defined( RTEMS_SMP )
  /*
   * On SMP the complete context switch must be atomic with respect to one
   * processor.  See also _Thread_Handler() since _Context_switch() may branch
   * to this function.
   */
  _ISR_Disable_without_giant( level );
#endif

  cpu_self = _Per_CPU_Get();
  _Assert( cpu_self->thread_dispatch_disable_level == 0 );
  _Profiling_Thread_dispatch_disable( cpu_self, 0 );
  cpu_self->thread_dispatch_disable_level = 1;

  /*
   *  Now determine if we need to perform a dispatch on the current CPU.
   */
  executing = cpu_self->executing;

#if !defined( RTEMS_SMP )
  _ISR_Disable( level );
#endif

#if defined( RTEMS_SMP )
  if ( cpu_self->dispatch_necessary ) {
#else
  while ( cpu_self->dispatch_necessary ) {
#endif
    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();

#if !defined( RTEMS_SMP )
    _ISR_Enable( level );
#endif

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      _Thread_Update_cpu_time_used(
        executing,
        &cpu_self->time_of_last_context_switch
      );
    #else
      {
        _TOD_Get_uptime( &cpu_self->time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

#if !defined(__DYNAMIC_REENT__)
    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }
#endif

    _User_extensions_Thread_switch( executing, heir );

    /*
     *  If the CPU has hardware floating point, then we must address saving
     *  and restoring it as part of the context switch.
     *
     *  The second conditional compilation section selects the algorithm used
     *  to context switch between floating point tasks.  The deferred algorithm
     *  can be significantly better in a system with few floating point tasks
     *  because it reduces the total number of save and restore FP context
     *  operations.  However, this algorithm can not be used on all CPUs due
     *  to unpredictable use of FP registers by some compilers for integer
     *  operations.
     */

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
    if ( executing->fp_context != NULL )
      _Context_Save_fp( &executing->fp_context );
#endif
#endif

    _Context_Switch( &executing->Registers, &heir->Registers );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    if ( (executing->fp_context != NULL) &&
         !_Thread_Is_allocated_fp( executing ) ) {
      if ( _Thread_Allocated_fp != NULL )
        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
      _Context_Restore_fp( &executing->fp_context );
      _Thread_Allocated_fp = executing;
    }
#else
    if ( executing->fp_context != NULL )
      _Context_Restore_fp( &executing->fp_context );
#endif
#endif

    /*
     * We have to obtain this value again after the context switch since the
     * heir thread may have migrated from another processor.  Values from the
     * stack or non-volatile registers reflect the old execution environment.
     */
    cpu_self = _Per_CPU_Get();

    _Thread_Debug_set_real_processor( executing, cpu_self );

#if !defined( RTEMS_SMP )
    _ISR_Disable( level );
#endif
  }

post_switch:
  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
  cpu_self->thread_dispatch_disable_level = 0;
  _Profiling_Thread_dispatch_enable( cpu_self, 0 );

  _ISR_Enable_without_giant( level );

  _Thread_Run_post_switch_actions( executing );
}
Exemplo n.º 10
0
void _Thread_Dispatch( void )
{
  Thread_Control   *executing;
  Thread_Control   *heir;
  ISR_Level         level;

  #if defined(RTEMS_SMP)
    /*
     * WARNING: The SMP sequence has severe defects regarding the real-time
     * performance.
     *
     * Consider the following scenario.  We have three tasks L (lowest
     * priority), M (middle priority), and H (highest priority).  Now let a
     * thread dispatch from M to L happen.  An interrupt occurs in
     * _Thread_Dispatch() here:
     *
     * void _Thread_Dispatch( void )
     * {
     *   [...]
     *
     * post_switch:
     *
     *   _ISR_Enable( level );
     *
     *   <-- INTERRUPT
     *   <-- AFTER INTERRUPT
     *
     *   _Thread_Unnest_dispatch();
     *
     *   _API_extensions_Run_post_switch();
     * }
     *
     * The interrupt event makes task H ready.  The interrupt code will see
     * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a
     * _Thread_Dispatch().  Now we return to position "AFTER INTERRUPT".  This
     * means task L executes now although task H is ready!  Task H will execute
     * once someone calls _Thread_Dispatch().
     */
    _Thread_Disable_dispatch();

    /*
     *  If necessary, send dispatch request to other cores.
     */
    _SMP_Request_other_cores_to_dispatch();
  #endif

  /*
   *  Now determine if we need to perform a dispatch on the current CPU.
   */
  executing   = _Thread_Executing;
  _ISR_Disable( level );
  while ( _Thread_Dispatch_necessary == true ) {
    heir = _Thread_Heir;
    #ifndef RTEMS_SMP
      _Thread_Dispatch_set_disable_level( 1 );
    #endif
    _Thread_Dispatch_necessary = false;
    _Thread_Executing = heir;

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;

    _ISR_Enable( level );

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      {
        Timestamp_Control uptime, ran;
        _TOD_Get_uptime( &uptime );
        _Timestamp_Subtract(
          &_Thread_Time_of_last_context_switch,
          &uptime,
          &ran
        );
        _Timestamp_Add_to( &executing->cpu_time_used, &ran );
        _Thread_Time_of_last_context_switch = uptime;
      }
    #else
      {
        _TOD_Get_uptime( &_Thread_Time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }

    _User_extensions_Thread_switch( executing, heir );

    /*
     *  If the CPU has hardware floating point, then we must address saving
     *  and restoring it as part of the context switch.
     *
     *  The second conditional compilation section selects the algorithm used
     *  to context switch between floating point tasks.  The deferred algorithm
     *  can be significantly better in a system with few floating point tasks
     *  because it reduces the total number of save and restore FP context
     *  operations.  However, this algorithm can not be used on all CPUs due
     *  to unpredictable use of FP registers by some compilers for integer
     *  operations.
     */

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
    if ( executing->fp_context != NULL )
      _Context_Save_fp( &executing->fp_context );
#endif
#endif

    _Context_Switch( &executing->Registers, &heir->Registers );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    if ( (executing->fp_context != NULL) &&
         !_Thread_Is_allocated_fp( executing ) ) {
      if ( _Thread_Allocated_fp != NULL )
        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
      _Context_Restore_fp( &executing->fp_context );
      _Thread_Allocated_fp = executing;
    }
#else
    if ( executing->fp_context != NULL )
      _Context_Restore_fp( &executing->fp_context );
#endif
#endif

    executing = _Thread_Executing;

    _ISR_Disable( level );
  }

post_switch:
  #ifndef RTEMS_SMP
    _Thread_Dispatch_set_disable_level( 0 );
  #endif

  _ISR_Enable( level );

  #ifdef RTEMS_SMP
    _Thread_Unnest_dispatch();
  #endif

  _API_extensions_Run_post_switch( executing );
}