void _Thread_Scheduler_ask_for_help( Thread_Control *the_thread ) { Chain_Node *node; const Chain_Node *tail; node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes ); tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes ); do { Scheduler_Node *scheduler_node; const Scheduler_Control *scheduler; ISR_lock_Context lock_context; bool success; scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node ); scheduler = _Scheduler_Node_get_scheduler( scheduler_node ); _Scheduler_Acquire_critical( scheduler, &lock_context ); success = ( *scheduler->Operations.ask_for_help )( scheduler, the_thread, scheduler_node ); _Scheduler_Release_critical( scheduler, &lock_context ); if ( success ) { break; } node = _Chain_Next( node ); } while ( node != tail ); }
static void _Watchdog_Insert_fixup( Watchdog_Header *header, Watchdog_Control *the_watchdog, Watchdog_Interval delta, Watchdog_Control *next_watchdog, Watchdog_Interval delta_next ) { const Chain_Node *iterator_tail; Chain_Node *iterator_node; next_watchdog->delta_interval = delta_next - delta; iterator_node = _Chain_First( &header->Iterators ); iterator_tail = _Chain_Immutable_tail( &header->Iterators ); while ( iterator_node != iterator_tail ) { Watchdog_Iterator *iterator; iterator = (Watchdog_Iterator *) iterator_node; if ( iterator->current == &next_watchdog->Node ) { iterator->current = &the_watchdog->Node; } iterator_node = _Chain_Next( iterator_node ); } }
static void test_chain_insert_ordered( void ) { Chain_Control chain = CHAIN_INITIALIZER_EMPTY(chain); Chain_Node nodes[5]; const Chain_Node *tail; const Chain_Node *node; size_t n = RTEMS_ARRAY_SIZE( nodes ); size_t i = 0; puts( "INIT - Verify _Chain_Insert_ordered_unprotected" ); _Chain_Insert_ordered_unprotected( &chain, &nodes[4], test_order ); _Chain_Insert_ordered_unprotected( &chain, &nodes[2], test_order ); _Chain_Insert_ordered_unprotected( &chain, &nodes[0], test_order ); _Chain_Insert_ordered_unprotected( &chain, &nodes[3], test_order ); _Chain_Insert_ordered_unprotected( &chain, &nodes[1], test_order ); tail = _Chain_Immutable_tail( &chain ); node = _Chain_Immutable_first( &chain ); while ( node != tail && i < n ) { rtems_test_assert( node == &nodes[ i ] ); ++i; node = _Chain_Immutable_next( node ); } rtems_test_assert( i == n ); }
void _User_extensions_Iterate( void *arg, User_extensions_Visitor visitor ) { Thread_Control *executing = _Thread_Executing; const User_extensions_Table *callouts_current = rtems_configuration_get_user_extension_table(); const User_extensions_Table *callouts_end = callouts_current + rtems_configuration_get_number_of_initial_extensions(); const Chain_Node *node; const Chain_Node *tail; while ( callouts_current != callouts_end ) { (*visitor)( executing, arg, callouts_current ); ++callouts_current; } node = _Chain_Immutable_first( &_User_extensions_List ); tail = _Chain_Immutable_tail( &_User_extensions_List ); while ( node != tail ) { const User_extensions_Control *extension = (const User_extensions_Control *) node; (*visitor)( executing, arg, &extension->Callouts ); node = _Chain_Immutable_next( node ); } }
static void _Watchdog_Remove_it( Watchdog_Header *header, Watchdog_Control *the_watchdog ) { Chain_Node *next; Watchdog_Interval delta; const Chain_Node *iterator_tail; Chain_Node *iterator_node; _Assert( the_watchdog->state == WATCHDOG_ACTIVE ); the_watchdog->state = WATCHDOG_INACTIVE; the_watchdog->stop_time = _Watchdog_Ticks_since_boot; next = _Chain_Next( &the_watchdog->Node ); delta = the_watchdog->delta_interval; if ( next != _Chain_Tail( &header->Watchdogs ) ) { Watchdog_Control *next_watchdog; next_watchdog = (Watchdog_Control *) next; next_watchdog->delta_interval += delta; } _Chain_Extract_unprotected( &the_watchdog->Node ); iterator_node = _Chain_First( &header->Iterators ); iterator_tail = _Chain_Immutable_tail( &header->Iterators ); while ( iterator_node != iterator_tail ) { Watchdog_Iterator *iterator; iterator = (Watchdog_Iterator *) iterator_node; if ( iterator->current == next ) { iterator->delta_interval += delta; } if ( iterator->current == &the_watchdog->Node ) { Chain_Node *previous = _Chain_Previous( &the_watchdog->Node ); iterator->current = previous; if ( previous != _Chain_Head( &header->Watchdogs ) ) { Watchdog_Control *previous_watchdog; previous_watchdog = (Watchdog_Control *) previous; iterator->delta_interval += previous_watchdog->delta_interval; } } iterator_node = _Chain_Next( iterator_node ); } }
void _Objects_Shrink_information( Objects_Information *information ) { uint32_t block_count; uint32_t block; uint32_t index_base; /* * Search the list to find block or chunk with all objects inactive. */ index_base = _Objects_Get_index( information->minimum_id ); block_count = (information->maximum - index_base) / information->allocation_size; for ( block = 0; block < block_count; block++ ) { if ( information->inactive_per_block[ block ] == information->allocation_size ) { Chain_Node *node = _Chain_First( &information->Inactive ); const Chain_Node *tail = _Chain_Immutable_tail( &information->Inactive ); uint32_t index_end = index_base + information->allocation_size; while ( node != tail ) { Objects_Control *object = (Objects_Control *) node; uint32_t index = _Objects_Get_index( object->id ); /* * Get the next node before the node is extracted */ node = _Chain_Next( node ); if ( index >= index_base && index < index_end ) { _Chain_Extract( &object->Node ); } } /* * Free the memory and reset the structures in the object' information */ _Workspace_Free( information->object_blocks[ block ] ); information->object_blocks[ block ] = NULL; information->inactive_per_block[ block ] = 0; information->inactive -= information->allocation_size; return; } index_base += information->allocation_size; } }
/* * This method is unique to this scheduler because it takes into * account affinity as it determines the highest ready thread. * Since this is used to pick a new thread to replace the victim, * the highest ready thread must have affinity such that it can * be executed on the victim's processor. */ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *victim ) { Scheduler_priority_SMP_Context *self = _Scheduler_priority_SMP_Get_self( context ); Priority_Control index; Scheduler_Node *highest = NULL; Thread_Control *victim_thread; uint32_t victim_cpu_index; Scheduler_priority_affinity_SMP_Node *node; /* * This is done when we need to check if reevaluations are needed. */ if ( victim == NULL ) { node = (Scheduler_priority_affinity_SMP_Node *) _Scheduler_priority_Ready_queue_first( &self->Bit_map, &self->Ready[ 0 ] ); return &node->Base.Base.Base; } victim_thread = _Scheduler_Node_get_owner( victim ); victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) ); /** * @todo The deterministic priority scheduler structure is optimized * for insertion, extraction, and finding the highest priority * thread. Scanning the list of ready threads is not a purpose * for which it was optimized. There are optimizations to be * made in this loop. * * + by checking the major bit, we could potentially skip entire * groups of 16. * * When using this scheduler as implemented, the application's * choice of numeric priorities and their distribution can have * an impact on performance. */ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ; index <= PRIORITY_MAXIMUM; index++ ) { Chain_Control *chain = &self->Ready[index]; Chain_Node *chain_node; for ( chain_node = _Chain_First( chain ); chain_node != _Chain_Immutable_tail( chain ) ; chain_node = _Chain_Next( chain_node ) ) { node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * Can this thread run on this CPU? */ if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) { highest = &node->Base.Base.Base; break; } } if ( highest ) break; } _Assert( highest != NULL ); return highest; }