/* * This method is unique to this scheduler because it must take into * account affinity as it searches for the lowest priority scheduled * thread. It ignores those which cannot be replaced by the filter * thread because the potential victim thread does not have affinity * for that processor. */ static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled( Scheduler_Context *context, Scheduler_Node *filter_base, Chain_Node_order order ) { Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); Scheduler_Node *lowest_scheduled = NULL; Chain_Control *scheduled = &self->Scheduled; Chain_Node *chain_node; Scheduler_priority_affinity_SMP_Node *filter = _Scheduler_priority_affinity_SMP_Node_downcast( filter_base ); for ( chain_node = _Chain_Last( scheduled ); chain_node != _Chain_Immutable_head( scheduled ) ; chain_node = _Chain_Previous( chain_node ) ) { Scheduler_priority_affinity_SMP_Node *node; Thread_Control *thread; uint32_t cpu_index; node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * If we didn't find a thread which is of equal or lower importance * than filter thread is, then we can't schedule the filter thread * to execute. */ if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) ) break; /* cpu_index is the processor number thread is executing on */ thread = _Scheduler_Node_get_owner( &node->Base.Base.Base ); cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) ); if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) { lowest_scheduled = &node->Base.Base.Base; break; } } return lowest_scheduled; }
static bool is_executing_on_a_core( Thread_Control *the_thread, Timestamp_Control *time_of_context_switch ) { #ifndef RTEMS_SMP if ( _Thread_Executing->Object.id == the_thread->Object.id ) { *time_of_context_switch = _Thread_Time_of_last_context_switch; return true; } #else /* FIXME: Locking */ if ( _Thread_Is_executing_on_a_processor( the_thread ) ) { *time_of_context_switch = _Thread_Get_CPU( the_thread )->time_of_last_context_switch; return true; } #endif return false; }
void rtems_monitor_task_canonical( rtems_monitor_task_t *canonical_task, const void *thread_void ) { Thread_Control *rtems_thread; RTEMS_API_Control *api; rtems_thread = RTEMS_DECONST( Thread_Control *, (const Thread_Control *) thread_void ); api = rtems_thread->API_Extensions[ THREAD_API_RTEMS ]; rtems_monitor_task_wait_info( canonical_task, rtems_thread ); canonical_task->entry = rtems_thread->Start.Entry; canonical_task->stack = rtems_thread->Start.Initial_stack.area; canonical_task->stack_size = rtems_thread->Start.Initial_stack.size; canonical_task->cpu = _Per_CPU_Get_index( _Thread_Get_CPU( rtems_thread ) ); canonical_task->priority = rtems_thread->current_priority; canonical_task->events = api->Event.pending_events; /* * FIXME: make this optionally cpu_time_executed */ #if 0 canonical_task->ticks = rtems_thread->cpu_time_executed; #else canonical_task->ticks = 0; #endif /* XXX modes and attributes only exist in the RTEMS API .. */ /* XXX not directly in the core thread.. they will have to be derived */ /* XXX if they are important enough to include anymore. */ canonical_task->modes = 0; /* XXX FIX ME.... rtems_thread->current_modes; */ canonical_task->attributes = 0 /* XXX FIX ME rtems_thread->API_Extensions[ THREAD_API_RTEMS ]->attribute_set */; }
void _Scheduler_Request_ask_for_help( Thread_Control *the_thread ) { ISR_lock_Context lock_context; _Thread_Scheduler_acquire_critical( the_thread, &lock_context ); if ( _Chain_Is_node_off_chain( &the_thread->Scheduler.Help_node ) ) { Per_CPU_Control *cpu; cpu = _Thread_Get_CPU( the_thread ); _Per_CPU_Acquire( cpu ); _Chain_Append_unprotected( &cpu->Threads_in_need_for_help, &the_thread->Scheduler.Help_node ); _Per_CPU_Release( cpu ); _Thread_Dispatch_request( _Per_CPU_Get(), cpu ); } _Thread_Scheduler_release_critical( the_thread, &lock_context ); }
/* * This method is unique to this scheduler because it takes into * account affinity as it determines the highest ready thread. * Since this is used to pick a new thread to replace the victim, * the highest ready thread must have affinity such that it can * be executed on the victim's processor. */ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *victim ) { Scheduler_priority_SMP_Context *self = _Scheduler_priority_SMP_Get_self( context ); Priority_Control index; Scheduler_Node *highest = NULL; Thread_Control *victim_thread; uint32_t victim_cpu_index; Scheduler_priority_affinity_SMP_Node *node; /* * This is done when we need to check if reevaluations are needed. */ if ( victim == NULL ) { node = (Scheduler_priority_affinity_SMP_Node *) _Scheduler_priority_Ready_queue_first( &self->Bit_map, &self->Ready[ 0 ] ); return &node->Base.Base.Base; } victim_thread = _Scheduler_Node_get_owner( victim ); victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) ); /** * @todo The deterministic priority scheduler structure is optimized * for insertion, extraction, and finding the highest priority * thread. Scanning the list of ready threads is not a purpose * for which it was optimized. There are optimizations to be * made in this loop. * * + by checking the major bit, we could potentially skip entire * groups of 16. * * When using this scheduler as implemented, the application's * choice of numeric priorities and their distribution can have * an impact on performance. */ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ; index <= PRIORITY_MAXIMUM; index++ ) { Chain_Control *chain = &self->Ready[index]; Chain_Node *chain_node; for ( chain_node = _Chain_First( chain ); chain_node != _Chain_Immutable_tail( chain ) ; chain_node = _Chain_Next( chain_node ) ) { node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * Can this thread run on this CPU? */ if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) { highest = &node->Base.Base.Base; break; } } if ( highest ) break; } _Assert( highest != NULL ); return highest; }