コード例 #1
0
static void _Scheduler_default_Tick_for_executing(
  Scheduler_Control *scheduler,
  Thread_Control *executing
)
{
  #ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
    /*
     *  Increment the number of ticks this thread has been executing
     */
    executing->cpu_time_used++;
  #endif

  /*
   *  If the thread is not preemptible or is not ready, then
   *  just return.
   */

  if ( !executing->is_preemptible )
    return;

  if ( !_States_Is_ready( executing->current_state ) )
    return;

  /*
   *  The cpu budget algorithm determines what happens next.
   */

  switch ( executing->budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
      break;

    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
    #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
      case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
    #endif
      if ( (int)(--executing->cpu_time_budget) <= 0 ) {

        /*
         *  A yield performs the ready chain mechanics needed when
         *  resetting a timeslice.  If no other thread's are ready
         *  at the priority of the currently executing thread, then the
         *  executing thread's timeslice is reset.  Otherwise, the
         *  currently executing thread is placed at the rear of the
         *  FIFO for this priority and a new heir is selected.
         */
        _Scheduler_Yield( scheduler, executing );
        executing->cpu_time_budget =
          rtems_configuration_get_ticks_per_timeslice();
      }
      break;

    #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
      case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
	if ( --executing->cpu_time_budget == 0 )
	  (*executing->budget_callout)( executing );
	break;
    #endif
  }
}
コード例 #2
0
/* Clock_exit --
 *     This shuts down the timer if it was enabled and removes it
 *     from the MCF5206E interrupt mask.
 *
 * PARAMETERS:
 *     none
 *
 * RETURNS:
 *     none
 */
void
Clock_exit(void)
{
    if (rtems_configuration_get_ticks_per_timeslice()) {
        uint32_t icr;
        /* disable all timer1 interrupts */
        icr = g_intctrl_regs->icr1;
        icr = icr & ~(MCF5272_ICR1_TMR1_MASK | MCF5272_ICR1_TMR1_PI);
        icr |= (MCF5272_ICR1_TMR1_IPL(0) | MCF5272_ICR1_TMR1_PI);
        g_intctrl_regs->icr1 = icr;

        /* reset timer1 */
        g_timer_regs->tmr1 = MCF5272_TMR_CLK_STOP;

        /* clear pending */
        g_timer_regs->ter1 = MCF5272_TER_REF | MCF5272_TER_CAP;
    }
}
コード例 #3
0
/* Install_clock --
 *     This initialises timer1 with the BSP timeslice config value
 *     as a reference and sets up the interrupt handler for clock ticks.
 *
 * PARAMETERS:
 *     clock_isr - clock interrupt handler routine
 *
 * RETURNS:
 *     none.
 */
static void
Install_clock(rtems_isr_entry clock_isr)
{
  uint32_t icr;
  Clock_driver_ticks = 0;
  if (rtems_configuration_get_ticks_per_timeslice()) {

      /* Register the interrupt handler */
      set_vector(clock_isr, BSP_INTVEC_TMR1, 1);

      /* Reset timer 1 */
      g_timer_regs->tmr1 = MCF5272_TMR_RST;
      g_timer_regs->tmr1 = MCF5272_TMR_CLK_STOP;
      g_timer_regs->tmr1 = MCF5272_TMR_RST;
      g_timer_regs->tcn1 = 0;  /* reset counter */
      g_timer_regs->ter1 = MCF5272_TER_REF | MCF5272_TER_CAP;

      /* Set Timer 1 prescaler so that it counts in microseconds */
      g_timer_regs->tmr1 = (
          ((((BSP_SYSTEM_FREQUENCY / 1000000) - 1) << MCF5272_TMR_PS_SHIFT) |
           MCF5272_TMR_CE_DISABLE                                      |
           MCF5272_TMR_ORI                                             |
           MCF5272_TMR_FRR                                             |
           MCF5272_TMR_CLK_MSTR                                        |
           MCF5272_TMR_RST));

      /* Set the timer timeout value from the BSP config */
      g_timer_regs->trr1 = rtems_configuration_get_microseconds_per_tick() - 1;

      /* Feed system frequency to the timer */
      g_timer_regs->tmr1 |= MCF5272_TMR_CLK_MSTR;

      /* Configure timer1 interrupts */
      icr = g_intctrl_regs->icr1;
      icr = icr & ~(MCF5272_ICR1_TMR1_MASK | MCF5272_ICR1_TMR1_PI);
      icr |= (MCF5272_ICR1_TMR1_IPL(BSP_INTLVL_TMR1) | MCF5272_ICR1_TMR1_PI);
      g_intctrl_regs->icr1 = icr;

      /* Register the driver exit procedure so we can shutdown */
      atexit(Clock_exit);
  }
}
コード例 #4
0
int sched_rr_get_interval(
  pid_t             pid,
  struct timespec  *interval
)
{
  /*
   *  Only supported for the "calling process" (i.e. this node).
   */

  if ( pid && pid != getpid() )
    rtems_set_errno_and_return_minus_one( ESRCH );

  if ( !interval )
    rtems_set_errno_and_return_minus_one( EINVAL );

  _Timespec_From_ticks(
    rtems_configuration_get_ticks_per_timeslice(),
    interval
  );

  return 0;
}
コード例 #5
0
ファイル: threadinitialize.c プロジェクト: WattTech/rtems
bool _Thread_Initialize(
  Thread_Information                   *information,
  Thread_Control                       *the_thread,
  const Scheduler_Control              *scheduler,
  void                                 *stack_area,
  size_t                                stack_size,
  bool                                  is_fp,
  Priority_Control                      priority,
  bool                                  is_preemptible,
  Thread_CPU_budget_algorithms          budget_algorithm,
  Thread_CPU_budget_algorithm_callout   budget_callout,
  uint32_t                              isr_level,
  Objects_Name                          name
)
{
  uintptr_t                tls_size = _TLS_Get_size();
  size_t                   actual_stack_size = 0;
  void                    *stack = NULL;
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    void                  *fp_area = NULL;
  #endif
  bool                     extension_status;
  size_t                   i;
  bool                     scheduler_node_initialized = false;
  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );

#if defined( RTEMS_SMP )
  if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
    return false;
  }
#endif

  memset(
    &the_thread->current_state,
    0,
    information->Objects.size - offsetof( Thread_Control, current_state )
  );

  for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
    const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];

    *(void **) ( (char *) the_thread + add_on->destination_offset ) =
      (char *) the_thread + add_on->source_offset;
  }

  /*
   *  Allocate and Initialize the stack for this thread.
   */
  #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
    if ( !actual_stack_size || actual_stack_size < stack_size )
      return false;                     /* stack allocation failed */

    stack = the_thread->Start.stack;
  #else
    if ( !stack_area ) {
      actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
      if ( !actual_stack_size || actual_stack_size < stack_size )
        return false;                     /* stack allocation failed */

      stack = the_thread->Start.stack;
      the_thread->Start.core_allocated_stack = true;
    } else {
      stack = stack_area;
      actual_stack_size = stack_size;
      the_thread->Start.core_allocated_stack = false;
    }
  #endif

  _Stack_Initialize(
     &the_thread->Start.Initial_stack,
     stack,
     actual_stack_size
  );

  /* Thread-local storage (TLS) area allocation */
  if ( tls_size > 0 ) {
    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );

    the_thread->Start.tls_area =
      _Workspace_Allocate_aligned( tls_alloc, tls_align );

    if ( the_thread->Start.tls_area == NULL ) {
      goto failed;
    }
  }

  /*
   *  Allocate the floating point area for this thread
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    if ( is_fp ) {
      fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
      if ( !fp_area )
        goto failed;
      fp_area = _Context_Fp_start( fp_area, 0 );
    }
    the_thread->fp_context       = fp_area;
    the_thread->Start.fp_context = fp_area;
  #endif

  /*
   *  Get thread queue heads
   */
  the_thread->Wait.spare_heads = _Freechain_Get(
    &information->Free_thread_queue_heads,
    _Workspace_Allocate,
    _Objects_Extend_size( &information->Objects ),
    THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count )
  );
  if ( the_thread->Wait.spare_heads == NULL ) {
    goto failed;
  }
  _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads );

  /*
   *  General initialization
   */

  the_thread->is_fp                  = is_fp;
  the_thread->Start.isr_level        = isr_level;
  the_thread->Start.is_preemptible   = is_preemptible;
  the_thread->Start.budget_algorithm = budget_algorithm;
  the_thread->Start.budget_callout   = budget_callout;

  _Thread_Timer_initialize( &the_thread->Timer, cpu );

  switch ( budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
      break;
    #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
      case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
        the_thread->cpu_time_budget =
          rtems_configuration_get_ticks_per_timeslice();
        break;
    #endif
    #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
      case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
	break;
    #endif
  }

#if defined(RTEMS_SMP)
  RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
  the_thread->Scheduler.own_control = scheduler;
  the_thread->Scheduler.control = scheduler;
  the_thread->Scheduler.own_node = the_thread->Scheduler.node;
  _Resource_Node_initialize( &the_thread->Resource_node );
  the_thread->Lock.current = &the_thread->Lock.Default;
  _SMP_ticket_lock_Initialize( &the_thread->Lock.Default );
  _SMP_lock_Stats_initialize( &the_thread->Lock.Stats, "Thread Lock" );
  _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" );
#endif

  _Thread_Debug_set_real_processor( the_thread, cpu );

  /* Initialize the CPU for the non-SMP schedulers */
  _Thread_Set_CPU( the_thread, cpu );

  _Thread_queue_Initialize( &the_thread->Join_queue );

  the_thread->current_state           = STATES_DORMANT;
  the_thread->Wait.operations         = &_Thread_queue_Operations_default;
  the_thread->current_priority        = priority;
  the_thread->real_priority           = priority;
  the_thread->Start.initial_priority  = priority;

  RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags );

  _Scheduler_Node_initialize( scheduler, the_thread );
  scheduler_node_initialized = true;

  _Scheduler_Update_priority( the_thread, priority );

  /* POSIX Keys */
  _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs );
  _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" );

  _Thread_Action_control_initialize( &the_thread->Post_switch_actions );

  RTEMS_STATIC_ASSERT( THREAD_LIFE_NORMAL == 0, Life_state );

  /*
   *  Open the object
   */
  _Objects_Open( &information->Objects, &the_thread->Object, name );

  /*
   *  We assume the Allocator Mutex is locked and dispatching is
   *  enabled when we get here.  We want to be able to run the
   *  user extensions with dispatching enabled.  The Allocator
   *  Mutex provides sufficient protection to let the user extensions
   *  run safely.
   */
  extension_status = _User_extensions_Thread_create( the_thread );
  if ( extension_status )
    return true;

failed:

  if ( scheduler_node_initialized ) {
    _Scheduler_Node_destroy( scheduler, the_thread );
  }

  _Workspace_Free( the_thread->Start.tls_area );

  _Freechain_Put(
    &information->Free_thread_queue_heads,
    the_thread->Wait.spare_heads
  );

  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    _Workspace_Free( fp_area );
  #endif

   _Thread_Stack_Free( the_thread );
  return false;
}
コード例 #6
0
bool _Thread_Initialize(
  Objects_Information                  *information,
  Thread_Control                       *the_thread,
  const Scheduler_Control              *scheduler,
  void                                 *stack_area,
  size_t                                stack_size,
  bool                                  is_fp,
  Priority_Control                      priority,
  bool                                  is_preemptible,
  Thread_CPU_budget_algorithms          budget_algorithm,
  Thread_CPU_budget_algorithm_callout   budget_callout,
  uint32_t                              isr_level,
  Objects_Name                          name
)
{
  uintptr_t                tls_size = _TLS_Get_size();
  size_t                   actual_stack_size = 0;
  void                    *stack = NULL;
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    void                  *fp_area = NULL;
  #endif
  bool                     extension_status;
  size_t                   i;
  bool                     scheduler_node_initialized = false;
  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );

#if defined( RTEMS_SMP )
  if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) {
    return false;
  }
#endif

  for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) {
    const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ];

    *(void **) ( (char *) the_thread + add_on->destination_offset ) =
      (char *) the_thread + add_on->source_offset;
  }

  /*
   *  Initialize the Ada self pointer
   */
  #if __RTEMS_ADA__
    the_thread->rtems_ada_self = NULL;
  #endif

  the_thread->Start.tls_area = NULL;

  /*
   *  Allocate and Initialize the stack for this thread.
   */
  #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API)
    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
    if ( !actual_stack_size || actual_stack_size < stack_size )
      return false;                     /* stack allocation failed */

    stack = the_thread->Start.stack;
  #else
    if ( !stack_area ) {
      actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
      if ( !actual_stack_size || actual_stack_size < stack_size )
        return false;                     /* stack allocation failed */

      stack = the_thread->Start.stack;
      the_thread->Start.core_allocated_stack = true;
    } else {
      stack = stack_area;
      actual_stack_size = stack_size;
      the_thread->Start.core_allocated_stack = false;
    }
  #endif

  _Stack_Initialize(
     &the_thread->Start.Initial_stack,
     stack,
     actual_stack_size
  );

  /* Thread-local storage (TLS) area allocation */
  if ( tls_size > 0 ) {
    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );

    the_thread->Start.tls_area =
      _Workspace_Allocate_aligned( tls_alloc, tls_align );

    if ( the_thread->Start.tls_area == NULL ) {
      goto failed;
    }
  }

  /*
   *  Allocate the floating point area for this thread
   */
  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    if ( is_fp ) {
      fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
      if ( !fp_area )
        goto failed;
      fp_area = _Context_Fp_start( fp_area, 0 );
    }
    the_thread->fp_context       = fp_area;
    the_thread->Start.fp_context = fp_area;
  #endif

  /*
   *  Initialize the thread timer
   */
  _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL );

  #ifdef __RTEMS_STRICT_ORDER_MUTEX__
    /* Initialize the head of chain of held mutexes */
    _Chain_Initialize_empty(&the_thread->lock_mutex);
  #endif

  /*
   * Clear the extensions area so extension users can determine
   * if they are linked to the thread. An extension user may
   * create the extension long after tasks have been created
   * so they cannot rely on the thread create user extension
   * call.  The object index starts with one, so the first extension context is
   * unused.
   */
  for ( i = 1 ; i <= rtems_configuration_get_maximum_extensions() ; ++i )
    the_thread->extensions[ i ] = NULL;

  /*
   *  General initialization
   */

  the_thread->Start.isr_level        = isr_level;
  the_thread->Start.is_preemptible   = is_preemptible;
  the_thread->Start.budget_algorithm = budget_algorithm;
  the_thread->Start.budget_callout   = budget_callout;

  switch ( budget_algorithm ) {
    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
      break;
    #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE)
      case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
        the_thread->cpu_time_budget =
          rtems_configuration_get_ticks_per_timeslice();
        break;
    #endif
    #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT)
      case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
	break;
    #endif
  }

#if defined(RTEMS_SMP)
  the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
  the_thread->Scheduler.own_control = scheduler;
  the_thread->Scheduler.control = scheduler;
  the_thread->Scheduler.own_node = the_thread->Scheduler.node;
  _Resource_Node_initialize( &the_thread->Resource_node );
  _CPU_Context_Set_is_executing( &the_thread->Registers, false );
#endif

  _Thread_Debug_set_real_processor( the_thread, cpu );

  /* Initialize the CPU for the non-SMP schedulers */
  _Thread_Set_CPU( the_thread, cpu );

  the_thread->current_state           = STATES_DORMANT;
  the_thread->Wait.queue              = NULL;
  the_thread->resource_count          = 0;
  the_thread->real_priority           = priority;
  the_thread->Start.initial_priority  = priority;

  _Scheduler_Node_initialize( scheduler, the_thread );
  scheduler_node_initialized = true;

  _Thread_Set_priority( the_thread, priority );

  /*
   *  Initialize the CPU usage statistics
   */
  #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    _Timestamp_Set_to_zero( &the_thread->cpu_time_used );
  #else
    the_thread->cpu_time_used = 0;
  #endif

  /*
   * initialize thread's key vaule node chain
   */
  _Chain_Initialize_empty( &the_thread->Key_Chain );

  _Thread_Action_control_initialize( &the_thread->Post_switch_actions );

  _Thread_Action_initialize(
    &the_thread->Life.Action,
    _Thread_Life_action_handler
  );
  the_thread->Life.state = THREAD_LIFE_NORMAL;
  the_thread->Life.terminator = NULL;

  /*
   *  Open the object
   */
  _Objects_Open( information, &the_thread->Object, name );

  /*
   *  We assume the Allocator Mutex is locked and dispatching is
   *  enabled when we get here.  We want to be able to run the
   *  user extensions with dispatching enabled.  The Allocator
   *  Mutex provides sufficient protection to let the user extensions
   *  run safely.
   */
  extension_status = _User_extensions_Thread_create( the_thread );
  if ( extension_status )
    return true;

failed:

  if ( scheduler_node_initialized ) {
    _Scheduler_Node_destroy( scheduler, the_thread );
  }

  _Workspace_Free( the_thread->Start.tls_area );

  #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    _Workspace_Free( fp_area );
  #endif

   _Thread_Stack_Free( the_thread );
  return false;
}
コード例 #7
0
ファイル: pthreadsetschedparam.c プロジェクト: WattTech/rtems
int pthread_setschedparam(
  pthread_t           thread,
  int                 policy,
  struct sched_param *param
)
{
  Thread_Control                      *the_thread;
  Per_CPU_Control                     *cpu_self;
  POSIX_API_Control                   *api;
  Thread_CPU_budget_algorithms         budget_algorithm;
  Thread_CPU_budget_algorithm_callout  budget_callout;
  int                                  eno;
  Priority_Control                     unused;
  ISR_lock_Context                     lock_context;
  Priority_Control                     new_priority;

  /*
   *  Check all the parameters
   */

  if ( param == NULL ) {
    return EINVAL;
  }

  eno = _POSIX_Thread_Translate_sched_param(
    policy,
    param,
    &budget_algorithm,
    &budget_callout
  );
  if ( eno != 0 ) {
    return eno;
  }

  the_thread = _Thread_Get_interrupt_disable( thread, &lock_context );

  if ( the_thread == NULL ) {
    return ESRCH;
  }

  /*
   *  Actually change the scheduling policy and parameters
   */

  cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
  _Thread_State_acquire_critical( the_thread, &lock_context );

  api = the_thread->API_Extensions[ THREAD_API_POSIX ];

  if ( api->schedpolicy == SCHED_SPORADIC ) {
    _Watchdog_Per_CPU_remove_relative( &api->Sporadic_timer );
  }

  api->schedpolicy = policy;
  api->schedparam  = *param;
  api->Attributes.schedpolicy = policy;
  api->Attributes.schedparam  = *param;

  the_thread->budget_algorithm = budget_algorithm;
  the_thread->budget_callout   = budget_callout;

  switch ( policy ) {
    case SCHED_OTHER:
    case SCHED_FIFO:
    case SCHED_RR:
      the_thread->cpu_time_budget =
        rtems_configuration_get_ticks_per_timeslice();
      new_priority = _POSIX_Priority_To_core( api->schedparam.sched_priority );
      break;

    case SCHED_SPORADIC:
      api->ss_high_priority = api->schedparam.sched_priority;
      break;
  }

  _Thread_State_release( the_thread, &lock_context );

  switch ( policy ) {
    case SCHED_OTHER:
    case SCHED_FIFO:
    case SCHED_RR:
      _Thread_Set_priority( the_thread, new_priority, &unused, true );
      break;

    case SCHED_SPORADIC:
      _POSIX_Threads_Sporadic_budget_TSR( &api->Sporadic_timer );
      break;
  }

  _Thread_Dispatch_enable( cpu_self );
  return 0;
}
コード例 #8
0
ファイル: threaddispatch.c プロジェクト: AlexShiLucky/rtems
void _Thread_Dispatch( void )
{
  Per_CPU_Control  *cpu_self;
  Thread_Control   *executing;
  ISR_Level         level;

#if defined( RTEMS_SMP )
  /*
   * On SMP the complete context switch must be atomic with respect to one
   * processor.  See also _Thread_Handler() since _Context_switch() may branch
   * to this function.
   */
  _ISR_Disable_without_giant( level );
#endif

  cpu_self = _Per_CPU_Get();
  _Assert( cpu_self->thread_dispatch_disable_level == 0 );
  _Profiling_Thread_dispatch_disable( cpu_self, 0 );
  cpu_self->thread_dispatch_disable_level = 1;

  /*
   *  Now determine if we need to perform a dispatch on the current CPU.
   */
  executing = cpu_self->executing;

#if !defined( RTEMS_SMP )
  _ISR_Disable( level );
#endif

#if defined( RTEMS_SMP )
  if ( cpu_self->dispatch_necessary ) {
#else
  while ( cpu_self->dispatch_necessary ) {
#endif
    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();

#if !defined( RTEMS_SMP )
    _ISR_Enable( level );
#endif

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      _Thread_Update_cpu_time_used(
        executing,
        &cpu_self->time_of_last_context_switch
      );
    #else
      {
        _TOD_Get_uptime( &cpu_self->time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

#if !defined(__DYNAMIC_REENT__)
    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }
#endif

    _User_extensions_Thread_switch( executing, heir );

    /*
     *  If the CPU has hardware floating point, then we must address saving
     *  and restoring it as part of the context switch.
     *
     *  The second conditional compilation section selects the algorithm used
     *  to context switch between floating point tasks.  The deferred algorithm
     *  can be significantly better in a system with few floating point tasks
     *  because it reduces the total number of save and restore FP context
     *  operations.  However, this algorithm can not be used on all CPUs due
     *  to unpredictable use of FP registers by some compilers for integer
     *  operations.
     */

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE )
    if ( executing->fp_context != NULL )
      _Context_Save_fp( &executing->fp_context );
#endif
#endif

    _Context_Switch( &executing->Registers, &heir->Registers );

#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    if ( (executing->fp_context != NULL) &&
         !_Thread_Is_allocated_fp( executing ) ) {
      if ( _Thread_Allocated_fp != NULL )
        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
      _Context_Restore_fp( &executing->fp_context );
      _Thread_Allocated_fp = executing;
    }
#else
    if ( executing->fp_context != NULL )
      _Context_Restore_fp( &executing->fp_context );
#endif
#endif

    /*
     * We have to obtain this value again after the context switch since the
     * heir thread may have migrated from another processor.  Values from the
     * stack or non-volatile registers reflect the old execution environment.
     */
    cpu_self = _Per_CPU_Get();

    _Thread_Debug_set_real_processor( executing, cpu_self );

#if !defined( RTEMS_SMP )
    _ISR_Disable( level );
#endif
  }

post_switch:
  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
  cpu_self->thread_dispatch_disable_level = 0;
  _Profiling_Thread_dispatch_enable( cpu_self, 0 );

  _ISR_Enable_without_giant( level );

  _Thread_Run_post_switch_actions( executing );
}
コード例 #9
0
int pthread_setschedparam(
  pthread_t           thread,
  int                 policy,
  struct sched_param *param
)
{
  Thread_Control                      *the_thread;
  POSIX_API_Control                   *api;
  Thread_CPU_budget_algorithms         budget_algorithm;
  Thread_CPU_budget_algorithm_callout  budget_callout;
  Objects_Locations                    location;
  int                                  rc;

  /*
   *  Check all the parameters
   */
  if ( !param )
    return EINVAL;

  rc = _POSIX_Thread_Translate_sched_param(
    policy,
    param,
    &budget_algorithm,
    &budget_callout
  );
  if ( rc )
    return rc;

  /*
   *  Actually change the scheduling policy and parameters
   */
  the_thread = _Thread_Get( thread, &location );
  switch ( location ) {

    case OBJECTS_LOCAL:
      api = the_thread->API_Extensions[ THREAD_API_POSIX ];

      if ( api->schedpolicy == SCHED_SPORADIC )
        (void) _Watchdog_Remove( &api->Sporadic_timer );

      api->schedpolicy = policy;
      api->schedparam  = *param;
      api->Attributes.schedpolicy = policy;
      api->Attributes.schedparam  = *param;

      the_thread->budget_algorithm = budget_algorithm;
      the_thread->budget_callout   = budget_callout;

      switch ( api->schedpolicy ) {
        case SCHED_OTHER:
        case SCHED_FIFO:
        case SCHED_RR:
          the_thread->cpu_time_budget =
            rtems_configuration_get_ticks_per_timeslice();

          the_thread->real_priority =
            _POSIX_Priority_To_core( api->schedparam.sched_priority );

          _Thread_Change_priority(
             the_thread,
             the_thread->real_priority,
             true
          );
          break;

        case SCHED_SPORADIC:
          api->ss_high_priority = api->schedparam.sched_priority;
          _Watchdog_Remove( &api->Sporadic_timer );
          _POSIX_Threads_Sporadic_budget_TSR( 0, the_thread );
          break;
      }

      _Objects_Put( &the_thread->Object );
      return 0;

#if defined(RTEMS_MULTIPROCESSING)
    case OBJECTS_REMOTE:
#endif
    case OBJECTS_ERROR:
      break;
  }

  return ESRCH;
}
コード例 #10
0
ファイル: threaddispatch.c プロジェクト: krohini1593/rtems
void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
{
  Thread_Control *executing;

  _Assert( cpu_self->thread_dispatch_disable_level == 1 );

  executing = cpu_self->executing;

  do {
    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );

    /*
     *  When the heir and executing are the same, then we are being
     *  requested to do the post switch dispatching.  This is normally
     *  done to dispatch signals.
     */
    if ( heir == executing )
      goto post_switch;

    /*
     *  Since heir and executing are not the same, we need to do a real
     *  context switch.
     */
#if __RTEMS_ADA__
    executing->rtems_ada_self = rtems_ada_self;
    rtems_ada_self = heir->rtems_ada_self;
#endif
    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
      heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice();

    /*
     * On SMP the complete context switch must be atomic with respect to one
     * processor.  See also _Thread_Handler() since _Context_switch() may branch
     * to this function.
     */
#if !defined( RTEMS_SMP )
    _ISR_Enable( level );
#endif

    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
      _Thread_Update_cpu_time_used(
        executing,
        &cpu_self->time_of_last_context_switch
      );
    #else
      {
        _TOD_Get_uptime( &cpu_self->time_of_last_context_switch );
        heir->cpu_time_used++;
      }
    #endif

#if !defined(__DYNAMIC_REENT__)
    /*
     * Switch libc's task specific data.
     */
    if ( _Thread_libc_reent ) {
      executing->libc_reent = *_Thread_libc_reent;
      *_Thread_libc_reent = heir->libc_reent;
    }
#endif

    _User_extensions_Thread_switch( executing, heir );
    _Thread_Save_fp( executing );
    _Context_Switch( &executing->Registers, &heir->Registers );
    _Thread_Restore_fp( executing );

    /*
     * We have to obtain this value again after the context switch since the
     * heir thread may have migrated from another processor.  Values from the
     * stack or non-volatile registers reflect the old execution environment.
     */
    cpu_self = _Per_CPU_Get();

    _Thread_Debug_set_real_processor( executing, cpu_self );

#if !defined( RTEMS_SMP )
    _ISR_Disable( level );
#endif
  } while (
#if defined( RTEMS_SMP )
    false
#else
    cpu_self->dispatch_necessary
#endif
  );

post_switch:
  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
  cpu_self->thread_dispatch_disable_level = 0;
  _Profiling_Thread_dispatch_enable( cpu_self, 0 );

  _ISR_Enable_without_giant( level );

  _Thread_Run_post_switch_actions( executing );
}
コード例 #11
0
ファイル: taskmode.c プロジェクト: Dipupo/rtems
rtems_status_code rtems_task_mode(
  rtems_mode  mode_set,
  rtems_mode  mask,
  rtems_mode *previous_mode_set
)
{
  Thread_Control     *executing;
  RTEMS_API_Control  *api;
  ASR_Information    *asr;
  bool                preempt_enabled;
  bool                needs_asr_dispatching;
  rtems_mode          old_mode;

  if ( !previous_mode_set )
    return RTEMS_INVALID_ADDRESS;

  executing     = _Thread_Get_executing();
  api = executing->API_Extensions[ THREAD_API_RTEMS ];
  asr = &api->Signal;

  old_mode  = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;

  if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
    old_mode |= RTEMS_NO_TIMESLICE;
  else
    old_mode |= RTEMS_TIMESLICE;

  old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
  old_mode |= _ISR_Get_level();

  *previous_mode_set = old_mode;

  /*
   *  These are generic thread scheduling characteristics.
   */
  preempt_enabled = false;
  if ( mask & RTEMS_PREEMPT_MASK ) {
#if defined( RTEMS_SMP )
    if ( rtems_configuration_is_smp_enabled() &&
         !_Modes_Is_preempt( mode_set ) ) {
      return RTEMS_NOT_IMPLEMENTED;
    }
#endif
    bool is_preempt_enabled = _Modes_Is_preempt( mode_set );

    preempt_enabled = !executing->is_preemptible && is_preempt_enabled;
    executing->is_preemptible = is_preempt_enabled;
  }

  if ( mask & RTEMS_TIMESLICE_MASK ) {
    if ( _Modes_Is_timeslice(mode_set) ) {
      executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
      executing->cpu_time_budget =
        rtems_configuration_get_ticks_per_timeslice();
    } else
      executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
  }

  /*
   *  Set the new interrupt level
   */
  if ( mask & RTEMS_INTERRUPT_MASK )
    _Modes_Set_interrupt_level( mode_set );

  /*
   *  This is specific to the RTEMS API
   */
  needs_asr_dispatching = false;
  if ( mask & RTEMS_ASR_MASK ) {
    bool is_asr_enabled = !_Modes_Is_asr_disabled( mode_set );

    if ( is_asr_enabled != asr->is_enabled ) {
      asr->is_enabled = is_asr_enabled;
      _ASR_Swap_signals( asr );
      if ( _ASR_Are_signals_pending( asr ) ) {
        needs_asr_dispatching = true;
        _Thread_Add_post_switch_action(
          executing,
          &api->Signal_action,
          _Signal_Action_handler
        );
      }
    }
  }

  if ( preempt_enabled || needs_asr_dispatching ) {
    Per_CPU_Control  *cpu_self;
    ISR_lock_Context  lock_context;

    cpu_self = _Thread_Dispatch_disable();
    _Scheduler_Acquire( executing, &lock_context );
    _Scheduler_Schedule( executing );
    _Scheduler_Release( executing, &lock_context );
    _Thread_Dispatch_enable( cpu_self );
  }

  return RTEMS_SUCCESSFUL;
}
コード例 #12
0
ファイル: pthreadsetschedparam.c プロジェクト: gedare/rtems
static int _POSIX_Set_sched_param(
  Thread_Control                       *the_thread,
  int                                   policy,
  struct sched_param                   *param,
  Thread_CPU_budget_algorithms          budget_algorithm,
  Thread_CPU_budget_algorithm_callout   budget_callout,
  Thread_queue_Context                 *queue_context
)
{
  const Scheduler_Control *scheduler;
  POSIX_API_Control       *api;
  int                      normal_prio;
  int                      low_prio;
  bool                     valid;
  Priority_Control         core_normal_prio;
  Priority_Control         core_low_prio;

  normal_prio = param->sched_priority;

  scheduler = _Thread_Scheduler_get_home( the_thread );

  core_normal_prio = _POSIX_Priority_To_core( scheduler, normal_prio, &valid );
  if ( !valid ) {
    return EINVAL;
  }

  if ( policy == SCHED_SPORADIC ) {
    low_prio = param->sched_ss_low_priority;
  } else {
    low_prio = normal_prio;
  }

  core_low_prio = _POSIX_Priority_To_core( scheduler, low_prio, &valid );
  if ( !valid ) {
    return EINVAL;
  }

  api = the_thread->API_Extensions[ THREAD_API_POSIX ];

  _Watchdog_Per_CPU_remove_monotonic( &api->Sporadic.Timer );

  _Priority_Node_set_priority( &the_thread->Real_priority, core_normal_prio );

  if ( _Priority_Node_is_active( &api->Sporadic.Low_priority ) ) {
    _Thread_Priority_add(
      the_thread,
      &the_thread->Real_priority,
      queue_context
    );
    _Thread_Priority_remove(
      the_thread,
      &api->Sporadic.Low_priority,
      queue_context
    );
    _Priority_Node_set_inactive( &api->Sporadic.Low_priority );
  } else {
    _Thread_Priority_changed(
      the_thread,
      &the_thread->Real_priority,
      false,
      queue_context
    );
  }

  the_thread->budget_algorithm = budget_algorithm;
  the_thread->budget_callout   = budget_callout;

  _Priority_Node_set_priority( &api->Sporadic.Low_priority, core_low_prio );
  api->Sporadic.sched_ss_repl_period = param->sched_ss_repl_period;
  api->Sporadic.sched_ss_init_budget = param->sched_ss_init_budget;
  api->Sporadic.sched_ss_max_repl = param->sched_ss_max_repl;

  if ( policy == SCHED_SPORADIC ) {
    _POSIX_Threads_Sporadic_timer_insert( the_thread, api );
  } else {
    the_thread->cpu_time_budget =
      rtems_configuration_get_ticks_per_timeslice();
  }

  return 0;
}