Пример #1
0
uint32_t _Thread_Dispatch_increment_disable_level( void )
{
  ISR_Level isr_level;
  uint32_t self_cpu_index;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable_without_giant( isr_level );

  /*
   * We must obtain the processor ID after interrupts are disabled to prevent
   * thread migration.
   */
  self_cpu_index = _SMP_Get_current_processor();

  _Giant_Do_acquire( self_cpu_index );

  self_cpu = _Per_CPU_Get_by_index( self_cpu_index );
  disable_level = self_cpu->thread_dispatch_disable_level;
  ++disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _ISR_Enable_without_giant( isr_level );

  return disable_level;
}
Пример #2
0
uint32_t _Thread_Dispatch_set_disable_level(uint32_t value)
{
  ISR_Level isr_level;
  uint32_t disable_level;

  _ISR_Disable_without_giant( isr_level );
  disable_level = _Thread_Dispatch_disable_level;
  _ISR_Enable_without_giant( isr_level );

  /*
   * If we need the dispatch level to go higher 
   * call increment method the desired number of times.
   */

  while ( value > disable_level ) {
    disable_level = _Thread_Dispatch_increment_disable_level();
  }

  /*
   * If we need the dispatch level to go lower
   * call increment method the desired number of times.
   */

  while ( value < disable_level ) {
    disable_level = _Thread_Dispatch_decrement_disable_level();
  }

  return value;
}
void _Giant_Release( void )
{
  ISR_Level isr_level;

  _ISR_Disable_without_giant( isr_level );
  _Assert( _Thread_Dispatch_disable_level != 0 );
  _Giant_Do_release( _Per_CPU_Get() );
  _ISR_Enable_without_giant( isr_level );
}
Пример #4
0
void _Giant_Acquire( void )
{
  ISR_Level isr_level;

  _ISR_Disable_without_giant( isr_level );
  _Assert( _Thread_Dispatch_disable_level != 0 );
  _Giant_Do_acquire( _SMP_Get_current_processor() );
  _ISR_Enable_without_giant( isr_level );
}
Пример #5
0
static void standard_funcs_isrdisabled_test( size_t set_size,
    cpu_set_t *cpu_set, SMP_barrier_State *bs  )
{
  ISR_Level isr_level;

  _ISR_Disable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  cache_manager_smp_functions( set_size, cpu_set );

  _ISR_Enable_without_giant( isr_level );
}
Пример #6
0
static void set_thread_executing( Thread_Control *thread )
{
#if defined( PREVENT_SMP_ASSERT_FAILURES )
  ISR_Level level;

  _ISR_Disable_without_giant( level );
#endif

  _Thread_Executing = thread;

#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _ISR_Enable_without_giant( level );
#endif
}
Пример #7
0
static void thread_disable_dispatch( void )
{
/* Yes, RTEMS_SMP and not PREVENT_SMP_ASSERT_FAILURES */
#if defined( RTEMS_SMP )
  Per_CPU_Control *self_cpu;
  ISR_Level level;

  _ISR_Disable_without_giant( level );
  ( void ) level;

  self_cpu = _Per_CPU_Get();
  self_cpu->thread_dispatch_disable_level = 1;
#else
  _Thread_Disable_dispatch();
#endif
}
Пример #8
0
void _Thread_Dispatch( void )
{
  ISR_Level        level;
  Per_CPU_Control *cpu_self;

  _ISR_Disable_without_giant( level );

  cpu_self = _Per_CPU_Get();

  if ( cpu_self->dispatch_necessary ) {
    _Profiling_Thread_dispatch_disable( cpu_self, 0 );
    cpu_self->thread_dispatch_disable_level = 1;
    _Thread_Do_dispatch( cpu_self, level );
  } else {
    _ISR_Enable_without_giant( level );
  }
}
Пример #9
0
static void set_thread_dispatch_necessary( bool dispatch_necessary )
{
#if defined( PREVENT_SMP_ASSERT_FAILURES )
  ISR_Level level;

  _ISR_Disable_without_giant( level );
#endif

  _Thread_Dispatch_necessary = dispatch_necessary;

  if ( !dispatch_necessary ) {
    _Thread_Heir = _Thread_Executing;
  }

#if defined( PREVENT_SMP_ASSERT_FAILURES )
  _ISR_Enable_without_giant( level );
#endif
}
Пример #10
0
static void test_func_isrdisabled_test( size_t set_size, cpu_set_t *cpu_set,
    SMP_barrier_State *bs )
{
  ISR_Level isr_level;

  ctx.count[rtems_get_current_processor()] = 0;
  _ISR_Disable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx );

  _ISR_Enable_without_giant( isr_level );

  _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() );

  rtems_test_assert( ctx.count[rtems_get_current_processor()] ==
      rtems_get_processor_count() );
}
uint32_t _Thread_Dispatch_decrement_disable_level( void )
{
  ISR_Level isr_level;
  uint32_t disable_level;
  Per_CPU_Control *self_cpu;

  _ISR_Disable_without_giant( isr_level );

  self_cpu = _Per_CPU_Get();
  disable_level = self_cpu->thread_dispatch_disable_level;
  --disable_level;
  self_cpu->thread_dispatch_disable_level = disable_level;

  _Giant_Do_release( self_cpu );
  _Assert( disable_level != 0 || _Giant.owner_cpu == NO_OWNER_CPU );

  _ISR_Enable_without_giant( isr_level );

  return disable_level;
}
Пример #12
0
rtems_task Task_1(
  rtems_task_argument argument
)
{
  Scheduler_priority_Context *scheduler_context =
    _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) );
#if defined(RTEMS_SMP)
  rtems_interrupt_level level;
#endif

  Install_tm27_vector( Isr_handler );

  /*
   *  No preempt .. no nesting
   */

  Interrupt_nest = 0;

  Interrupt_occurred = 0;

  benchmark_timer_initialize();
  Cause_tm27_intr();
  /* goes to Isr_handler */

#if (MUST_WAIT_FOR_INTERRUPT == 1)
  while ( Interrupt_occurred == 0 );
#endif
  Interrupt_return_time = benchmark_timer_read();

  put_time(
    "rtems interrupt: entry overhead returns to interrupted task",
    Interrupt_enter_time,
    1,
    0,
    timer_overhead
  );

  put_time(
    "rtems interrupt: exit overhead returns to interrupted task",
    Interrupt_return_time,
    1,
    0,
    timer_overhead
  );

  /*
   *  No preempt .. nested
   */

  _Thread_Disable_dispatch();

  Interrupt_nest = 1;

  Interrupt_occurred = 0;
  benchmark_timer_initialize();
  Cause_tm27_intr();
  /* goes to Isr_handler */

#if (MUST_WAIT_FOR_INTERRUPT == 1)
  while ( Interrupt_occurred == 0 );
#endif
  Interrupt_return_time = benchmark_timer_read();

  _Thread_Unnest_dispatch();

  put_time(
    "rtems interrupt: entry overhead returns to nested interrupt",
    Interrupt_enter_nested_time,
    1,
    0,
    0
  );

  put_time(
    "rtems interrupt: exit overhead returns to nested interrupt",
    Interrupt_return_nested_time,
    1,
    0,
    0
  );

  /*
   *  Does a preempt .. not nested
   */

#if defined(RTEMS_SMP)
  _ISR_Disable_without_giant(level);
#endif

  _Thread_Executing =
        (Thread_Control *) _Chain_First(&scheduler_context->Ready[LOW_PRIORITY]);

  _Thread_Dispatch_necessary = 1;

#if defined(RTEMS_SMP)
  _ISR_Enable_without_giant(level);
#endif

  Interrupt_occurred = 0;
  benchmark_timer_initialize();
  Cause_tm27_intr();

  /*
   *  goes to Isr_handler and then returns
   */

  TEST_END();
  rtems_test_exit( 0 );
}
Пример #13
0
void _Thread_Dispatch( void )
{
  Per_CPU_Control  *cpu_self;
  Thread_Control   *executing;
  ISR_Level         level;

#if defined( RTEMS_SMP )
  /*
   * On SMP the complete context switch must be atomic with respect to one
   * processor.  See also _Thread_Handler() since _Context_switch() may branch
   * to this function.
   */
  _ISR_Disable_without_giant( level );
#endif

  cpu_self = _Per_CPU_Get();
  _Assert( cpu_self->thread_dispatch_disable_level == 0 );
  _Profiling_Thread_dispatch_disable( cpu_self, 0 );
  cpu_self->thread_dispatch_disable_level = 1;

  /*
   *  Now determine if we need to perform a dispatch on the current CPU.
   */
  executing = cpu_self->executing;

#if !defined( RTEMS_SMP )
  _ISR_Disable( level );
#endif

#if defined( RTEMS_SMP )
  if ( cpu_self->dispatch_necessary ) {
#else
  while ( cpu_self->dispatch_necessary ) {
#endif
    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();

#if !defined( RTEMS_SMP )
    _ISR_Enable( level );
#endif

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      _Thread_Update_cpu_time_used(
        executing,
        &cpu_self->time_of_last_context_switch
      );
    #else
      {
        _TOD_Get_uptime( &cpu_self->time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

#if !defined(__DYNAMIC_REENT__)
    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }
#endif

    _User_extensions_Thread_switch( executing, heir );

    /*
     *  If the CPU has hardware floating point, then we must address saving
     *  and restoring it as part of the context switch.
     *
     *  The second conditional compilation section selects the algorithm used
     *  to context switch between floating point tasks.  The deferred algorithm
     *  can be significantly better in a system with few floating point tasks
     *  because it reduces the total number of save and restore FP context
     *  operations.  However, this algorithm can not be used on all CPUs due
     *  to unpredictable use of FP registers by some compilers for integer
     *  operations.
     */

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
    if ( executing->fp_context != NULL )
      _Context_Save_fp( &executing->fp_context );
#endif
#endif

    _Context_Switch( &executing->Registers, &heir->Registers );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    if ( (executing->fp_context != NULL) &&
         !_Thread_Is_allocated_fp( executing ) ) {
      if ( _Thread_Allocated_fp != NULL )
        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
      _Context_Restore_fp( &executing->fp_context );
      _Thread_Allocated_fp = executing;
    }
#else
    if ( executing->fp_context != NULL )
      _Context_Restore_fp( &executing->fp_context );
#endif
#endif

    /*
     * We have to obtain this value again after the context switch since the
     * heir thread may have migrated from another processor.  Values from the
     * stack or non-volatile registers reflect the old execution environment.
     */
    cpu_self = _Per_CPU_Get();

    _Thread_Debug_set_real_processor( executing, cpu_self );

#if !defined( RTEMS_SMP )
    _ISR_Disable( level );
#endif
  }

post_switch:
  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
  cpu_self->thread_dispatch_disable_level = 0;
  _Profiling_Thread_dispatch_enable( cpu_self, 0 );

  _ISR_Enable_without_giant( level );

  _Thread_Run_post_switch_actions( executing );
}