void rtems_monitor_task_canonical( rtems_monitor_task_t *canonical_task, const void *thread_void ) { const Thread_Control *rtems_thread = (const Thread_Control *) thread_void; RTEMS_API_Control *api; api = rtems_thread->API_Extensions[ THREAD_API_RTEMS ]; canonical_task->entry = rtems_thread->Start.entry_point; canonical_task->argument = rtems_thread->Start.numeric_argument; canonical_task->stack = rtems_thread->Start.Initial_stack.area; canonical_task->stack_size = rtems_thread->Start.Initial_stack.size; canonical_task->cpu = _Per_CPU_Get_index( _Thread_Get_CPU( rtems_thread ) ); canonical_task->priority = rtems_thread->current_priority; canonical_task->state = rtems_thread->current_state; canonical_task->wait_id = rtems_thread->Wait.id; canonical_task->wait_queue = rtems_thread->Wait.queue; canonical_task->wait_operations = rtems_thread->Wait.operations; canonical_task->events = api->Event.pending_events; /* * FIXME: make this optionally cpu_time_executed */ #if 0 canonical_task->ticks = rtems_thread->cpu_time_executed; #else canonical_task->ticks = 0; #endif /* XXX modes and attributes only exist in the RTEMS API .. */ /* XXX not directly in the core thread.. they will have to be derived */ /* XXX if they are important enough to include anymore. */ canonical_task->modes = 0; /* XXX FIX ME.... rtems_thread->current_modes; */ canonical_task->attributes = 0 /* XXX FIX ME rtems_thread->API_Extensions[ THREAD_API_RTEMS ]->attribute_set */; }
/* * This method is unique to this scheduler because it takes into * account affinity as it determines the highest ready thread. * Since this is used to pick a new thread to replace the victim, * the highest ready thread must have affinity such that it can * be executed on the victim's processor. */ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *victim ) { Scheduler_priority_SMP_Context *self = _Scheduler_priority_SMP_Get_self( context ); Priority_Control index; Scheduler_Node *highest = NULL; Thread_Control *victim_thread; uint32_t victim_cpu_index; Scheduler_priority_affinity_SMP_Node *node; /* * This is done when we need to check if reevaluations are needed. */ if ( victim == NULL ) { node = (Scheduler_priority_affinity_SMP_Node *) _Scheduler_priority_Ready_queue_first( &self->Bit_map, &self->Ready[ 0 ] ); return &node->Base.Base.Base; } victim_thread = _Scheduler_Node_get_owner( victim ); victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) ); /** * @todo The deterministic priority scheduler structure is optimized * for insertion, extraction, and finding the highest priority * thread. Scanning the list of ready threads is not a purpose * for which it was optimized. There are optimizations to be * made in this loop. * * + by checking the major bit, we could potentially skip entire * groups of 16. * * When using this scheduler as implemented, the application's * choice of numeric priorities and their distribution can have * an impact on performance. */ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ; index <= PRIORITY_MAXIMUM; index++ ) { Chain_Control *chain = &self->Ready[index]; Chain_Node *chain_node; for ( chain_node = _Chain_First( chain ); chain_node != _Chain_Immutable_tail( chain ) ; chain_node = _Chain_Next( chain_node ) ) { node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * Can this thread run on this CPU? */ if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) { highest = &node->Base.Base.Base; break; } } if ( highest ) break; } _Assert( highest != NULL ); return highest; }