static void test_chain_control_layout(void) { Chain_Control chain; puts( "INIT - Verify rtems_chain_control layout" ); rtems_test_assert( sizeof(Chain_Control) == sizeof(Chain_Node) + sizeof(Chain_Node *) ); rtems_test_assert( sizeof(Chain_Control) == 3 * sizeof(Chain_Node *) ); rtems_test_assert( _Chain_Previous( _Chain_Head( &chain ) ) == _Chain_Next( _Chain_Tail( &chain ) ) ); #if !defined( RTEMS_SMP ) rtems_test_assert( sizeof(Chain_Control) == sizeof(rtems_chain_control) ); #endif }
void _Objects_MP_Close ( Objects_Information *information, Objects_Id the_id ) { Chain_Control *the_chain; Chain_Node *the_node; Objects_MP_Control *the_object; the_chain = &information->global_table[ _Objects_Get_node( the_id ) ]; for ( the_node = _Chain_First( the_chain ) ; !_Chain_Is_tail( the_chain, the_node ) ; the_node = _Chain_Next( the_node ) ) { the_object = (Objects_MP_Control *) the_node; if ( _Objects_Are_ids_equal( the_object->Object.id, the_id ) ) { _Chain_Extract( the_node ); _Objects_MP_Free_global_object( the_object ); return; } } _Terminate( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INVALID_GLOBAL_ID ); }
static void _Watchdog_Insert_fixup( Watchdog_Header *header, Watchdog_Control *next_watchdog, Watchdog_Interval delta ) { const Chain_Node *iterator_tail; Chain_Node *iterator_node; next_watchdog->delta_interval -= delta; iterator_node = _Chain_First( &header->Iterators ); iterator_tail = _Chain_Immutable_tail( &header->Iterators ); while ( iterator_node != iterator_tail ) { Watchdog_Iterator *iterator; iterator = (Watchdog_Iterator *) iterator_node; if ( iterator->current == &next_watchdog->Node ) { iterator->delta_interval -= delta; } iterator_node = _Chain_Next( iterator_node ); } }
void _Watchdog_Insert_locked( Watchdog_Header *header, Watchdog_Control *the_watchdog, ISR_lock_Context *lock_context ) { if ( the_watchdog->state == WATCHDOG_INACTIVE ) { Watchdog_Iterator iterator; Chain_Node *current; Chain_Node *next; Watchdog_Interval delta; the_watchdog->state = WATCHDOG_BEING_INSERTED; _Chain_Append_unprotected( &header->Iterators, &iterator.Node ); delta = the_watchdog->initial; current = _Chain_Head( &header->Watchdogs ); while ( ( next = _Chain_Next( current ) ) != _Chain_Tail( &header->Watchdogs ) ) { Watchdog_Control *next_watchdog; Watchdog_Interval delta_next; next_watchdog = (Watchdog_Control *) next; delta_next = next_watchdog->delta_interval; if ( delta < delta_next ) { _Watchdog_Insert_fixup( header, next_watchdog, delta ); break; } iterator.delta_interval = delta - delta_next; iterator.current = next; _Watchdog_Flash( header, lock_context ); if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) { goto abort_insert; } delta = iterator.delta_interval; current = iterator.current; } the_watchdog->delta_interval = delta; the_watchdog->start_time = _Watchdog_Ticks_since_boot; _Watchdog_Activate( the_watchdog ); _Chain_Insert_unprotected( current, &the_watchdog->Node ); abort_insert: _Chain_Extract_unprotected( &iterator.Node ); } }
void _Objects_MP_Is_remote ( Objects_Information *information, Objects_Id the_id, Objects_Locations *location, Objects_Control **the_object ) { uint32_t node; Chain_Control *the_chain; Chain_Node *the_node; Objects_MP_Control *the_global_object; node = _Objects_Get_node( the_id ); /* * NOTE: The local node was search (if necessary) by * _Objects_Name_to_id_XXX before this was invoked. * * The NODE field of an object id cannot be 0 * because 0 is an invalid node number. */ if ( node == 0 || _Objects_Is_local_node( node ) || node > _Objects_Maximum_nodes || information->global_table == NULL ) { *location = OBJECTS_ERROR; *the_object = NULL; return; } _Thread_Disable_dispatch(); the_chain = &information->global_table[ node ]; for ( the_node = _Chain_First( the_chain ) ; !_Chain_Is_tail( the_chain, the_node ) ; the_node = _Chain_Next( the_node ) ) { the_global_object = (Objects_MP_Control *) the_node; if ( _Objects_Are_ids_equal( the_global_object->Object.id, the_id ) ) { _Thread_Unnest_dispatch(); *location = OBJECTS_REMOTE; *the_object = (Objects_Control *) the_global_object; return; } } _Thread_Enable_dispatch(); *location = OBJECTS_ERROR; *the_object = NULL; }
void _Objects_Shrink_information( Objects_Information *information ) { uint32_t block_count; uint32_t block; uint32_t index_base; /* * Search the list to find block or chunk with all objects inactive. */ index_base = _Objects_Get_index( information->minimum_id ); block_count = (information->maximum - index_base) / information->allocation_size; for ( block = 0; block < block_count; block++ ) { if ( information->inactive_per_block[ block ] == information->allocation_size ) { Chain_Node *node = _Chain_First( &information->Inactive ); const Chain_Node *tail = _Chain_Immutable_tail( &information->Inactive ); uint32_t index_end = index_base + information->allocation_size; while ( node != tail ) { Objects_Control *object = (Objects_Control *) node; uint32_t index = _Objects_Get_index( object->id ); /* * Get the next node before the node is extracted */ node = _Chain_Next( node ); if ( index >= index_base && index < index_end ) { _Chain_Extract( &object->Node ); } } /* * Free the memory and reset the structures in the object' information */ _Workspace_Free( information->object_blocks[ block ] ); information->object_blocks[ block ] = NULL; information->inactive_per_block[ block ] = 0; information->inactive -= information->allocation_size; return; } index_base += information->allocation_size; } }
/* * _POSIX_Keys_Run_destructors * * 17.1.1 Thread-Specific Data Key Create, P1003.1c/Draft 10, p. 163 * * NOTE: This is the routine executed when a thread exits to * run through all the keys and do the destructor action. */ void _POSIX_Keys_Run_destructors( Thread_Control *thread ) { Chain_Control *chain; POSIX_Keys_Key_value_pair *iter, *next; void *value; void (*destructor) (void *); POSIX_Keys_Control *the_key; Objects_Locations location; _Thread_Disable_dispatch(); chain = &( (POSIX_API_Control *)thread->API_Extensions[ THREAD_API_POSIX ] )->Key_Chain; iter = (POSIX_Keys_Key_value_pair *) _Chain_First( chain ); while ( !_Chain_Is_tail( chain, &iter->Key_values_per_thread_node ) ) { next = (POSIX_Keys_Key_value_pair *) _Chain_Next( &iter->Key_values_per_thread_node ); /** * remove key from rbtree and chain. * here Chain_Node *iter can be convert to POSIX_Keys_Key_value_pair *, * because Chain_Node is the first member of POSIX_Keys_Key_value_pair * structure. */ _RBTree_Extract( &_POSIX_Keys_Key_value_lookup_tree, &iter->Key_value_lookup_node ); _Chain_Extract_unprotected( &iter->Key_values_per_thread_node ); /** * run key value's destructor if destructor and value are both non-null. */ the_key = _POSIX_Keys_Get( iter->key, &location ); destructor = the_key->destructor; value = iter->value; if ( destructor != NULL && value != NULL ) (*destructor)( value ); _Objects_Put( &the_key->Object ); _POSIX_Keys_Key_value_pair_free( iter ); iter = next; } _Thread_Enable_dispatch(); }
Thread_Control *_Thread_MP_Find_proxy ( Objects_Id the_id ) { Chain_Node *proxy_node; Thread_Control *the_thread; ISR_Level level; restart: _ISR_Disable( level ); for ( proxy_node = _Chain_First( &_Thread_MP_Active_proxies ); !_Chain_Is_tail( &_Thread_MP_Active_proxies, proxy_node ) ; ) { the_thread = (Thread_Control *) _Addresses_Subtract_offset( proxy_node, _Thread_MP_Proxy_Active_offset ); if ( _Objects_Are_ids_equal( the_thread->Object.id, the_id ) ) { _ISR_Enable( level ); return the_thread; } _ISR_Flash( level ); proxy_node = _Chain_Next( proxy_node ); /* * A proxy which is only dormant is not in a blocking state. * Therefore, we are looking at proxy which has been moved from * active to inactive chain (by an ISR) and need to restart * the search. */ if ( _States_Is_only_dormant( the_thread->current_state ) ) { _ISR_Enable( level ); goto restart; } } _ISR_Enable( level ); return NULL; }
/* * Reschedule threads -- select heirs for all cores */ void _Scheduler_simple_smp_Schedule(void) { Chain_Control *c; Chain_Node *n; Thread_Control *t; uint32_t cpu; c = (Chain_Control *)_Scheduler.information; cpu = 0; /* * Iterate over the first N (where N is the number of CPU cores) threads * on the ready chain. Attempt to assign each as heir on a core. When * unable to assign a thread as a new heir, then stop. */ for (n = _Chain_First(c); !_Chain_Is_tail(c, n); n = _Chain_Next(n)) { t = (Thread_Control *)n; if ( !_Scheduler_simple_smp_Assign( t ) ) break; if ( ++cpu >= _SMP_Processor_count ) break; } }
Objects_Name_or_id_lookup_errors _Objects_MP_Global_name_search ( Objects_Information *information, Objects_Name the_name, uint32_t nodes_to_search, Objects_Id *the_id ) { uint32_t low_node; uint32_t high_node; uint32_t node_index; Chain_Control *the_chain; Chain_Node *the_node; Objects_MP_Control *the_object; uint32_t name_to_use; name_to_use = the_name.name_u32; /* XXX only fixed length names */ if ( nodes_to_search > _Objects_Maximum_nodes ) return OBJECTS_INVALID_NODE; if ( information->global_table == NULL ) return OBJECTS_INVALID_NAME; if ( nodes_to_search == OBJECTS_SEARCH_ALL_NODES || nodes_to_search == OBJECTS_SEARCH_OTHER_NODES ) { low_node = 1; high_node = _Objects_Maximum_nodes; } else { low_node = high_node = nodes_to_search; } _Thread_Disable_dispatch(); for ( node_index = low_node ; node_index <= high_node ; node_index++ ) { /* * NOTE: The local node was search (if necessary) by * _Objects_Name_to_id_XXX before this was invoked. */ if ( !_Objects_Is_local_node( node_index ) ) { the_chain = &information->global_table[ node_index ]; for ( the_node = _Chain_First( the_chain ) ; !_Chain_Is_tail( the_chain, the_node ) ; the_node = _Chain_Next( the_node ) ) { the_object = (Objects_MP_Control *) the_node; if ( the_object->name == name_to_use ) { *the_id = the_object->Object.id; _Thread_Enable_dispatch(); return OBJECTS_NAME_OR_ID_LOOKUP_SUCCESSFUL; } } } } _Thread_Enable_dispatch(); return OBJECTS_INVALID_NAME; }
static int _Condition_Wake( struct _Condition_Control *_condition, int count ) { Condition_Control *condition; ISR_lock_Context lock_context; Thread_queue_Heads *heads; Chain_Control unblock; Chain_Node *node; Chain_Node *tail; int woken; condition = _Condition_Get( _condition ); _ISR_lock_ISR_disable( &lock_context ); _Condition_Queue_acquire_critical( condition, &lock_context ); /* * In common uses cases of condition variables there are normally no threads * on the queue, so check this condition early. */ heads = condition->Queue.Queue.heads; if ( __predict_true( heads == NULL ) ) { _Condition_Queue_release( condition, &lock_context ); return 0; } woken = 0; _Chain_Initialize_empty( &unblock ); while ( count > 0 && heads != NULL ) { const Thread_queue_Operations *operations; Thread_Control *first; bool do_unblock; operations = CONDITION_TQ_OPERATIONS; first = ( *operations->first )( heads ); do_unblock = _Thread_queue_Extract_locked( &condition->Queue.Queue, operations, first ); if (do_unblock) { _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain ); } ++woken; --count; heads = condition->Queue.Queue.heads; } node = _Chain_First( &unblock ); tail = _Chain_Tail( &unblock ); if ( node != tail ) { Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Condition_Queue_release( condition, &lock_context ); do { Thread_Control *thread; Chain_Node *next; next = _Chain_Next( node ); thread = THREAD_CHAIN_NODE_TO_THREAD( node ); _Watchdog_Remove_ticks( &thread->Timer ); _Thread_Unblock( thread ); node = next; } while ( node != tail ); _Thread_Dispatch_enable( cpu_self ); } else { _Condition_Queue_release( condition, &lock_context ); } return woken; }
/* * This method is unique to this scheduler because it takes into * account affinity as it determines the highest ready thread. * Since this is used to pick a new thread to replace the victim, * the highest ready thread must have affinity such that it can * be executed on the victim's processor. */ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready( Scheduler_Context *context, Scheduler_Node *victim ) { Scheduler_priority_SMP_Context *self = _Scheduler_priority_SMP_Get_self( context ); Priority_Control index; Scheduler_Node *highest = NULL; Thread_Control *victim_thread; uint32_t victim_cpu_index; Scheduler_priority_affinity_SMP_Node *node; /* * This is done when we need to check if reevaluations are needed. */ if ( victim == NULL ) { node = (Scheduler_priority_affinity_SMP_Node *) _Scheduler_priority_Ready_queue_first( &self->Bit_map, &self->Ready[ 0 ] ); return &node->Base.Base.Base; } victim_thread = _Scheduler_Node_get_owner( victim ); victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) ); /** * @todo The deterministic priority scheduler structure is optimized * for insertion, extraction, and finding the highest priority * thread. Scanning the list of ready threads is not a purpose * for which it was optimized. There are optimizations to be * made in this loop. * * + by checking the major bit, we could potentially skip entire * groups of 16. * * When using this scheduler as implemented, the application's * choice of numeric priorities and their distribution can have * an impact on performance. */ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ; index <= PRIORITY_MAXIMUM; index++ ) { Chain_Control *chain = &self->Ready[index]; Chain_Node *chain_node; for ( chain_node = _Chain_First( chain ); chain_node != _Chain_Immutable_tail( chain ) ; chain_node = _Chain_Next( chain_node ) ) { node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * Can this thread run on this CPU? */ if ( CPU_ISSET( (int) victim_cpu_index, node->Affinity.set ) ) { highest = &node->Base.Base.Base; break; } } if ( highest ) break; } _Assert( highest != NULL ); return highest; }