static void _Thread_queue_Path_release( Thread_queue_Path *path ) { #if defined(RTEMS_SMP) Chain_Node *head; Chain_Node *node; head = _Chain_Head( &path->Links ); node = _Chain_Last( &path->Links ); if ( head != node ) { Thread_queue_Link *link; /* * The terminal link may have an owner which does not wait on a thread * queue. */ link = THREAD_QUEUE_LINK_OF_PATH_NODE( node ); if ( link->Queue_context.Wait.queue == NULL ) { _Thread_Wait_release_default_critical( link->owner, &link->Queue_context.Lock_context ); node = _Chain_Previous( node ); #if defined(RTEMS_DEBUG) _Chain_Set_off_chain( &link->Path_node ); #endif } while ( head != node ) { /* The other links have an owner which waits on a thread queue */ link = THREAD_QUEUE_LINK_OF_PATH_NODE( node ); _Assert( link->Queue_context.Wait.queue != NULL ); _Thread_queue_Link_remove( link ); _Thread_Wait_release_queue_critical( link->Queue_context.Wait.queue, &link->Queue_context ); _Thread_Wait_remove_request( link->owner, &link->Queue_context ); node = _Chain_Previous( node ); #if defined(RTEMS_DEBUG) _Chain_Set_off_chain( &link->Path_node ); #endif } } #else (void) path; #endif }
static void test_chain_control_layout(void) { Chain_Control chain; puts( "INIT - Verify rtems_chain_control layout" ); rtems_test_assert( sizeof(Chain_Control) == sizeof(Chain_Node) + sizeof(Chain_Node *) ); rtems_test_assert( sizeof(Chain_Control) == 3 * sizeof(Chain_Node *) ); rtems_test_assert( _Chain_Previous( _Chain_Head( &chain ) ) == _Chain_Next( _Chain_Tail( &chain ) ) ); #if !defined( RTEMS_SMP ) rtems_test_assert( sizeof(Chain_Control) == sizeof(rtems_chain_control) ); #endif }
static void _Watchdog_Remove_it( Watchdog_Header *header, Watchdog_Control *the_watchdog ) { Chain_Node *next; Watchdog_Interval delta; const Chain_Node *iterator_tail; Chain_Node *iterator_node; _Assert( the_watchdog->state == WATCHDOG_ACTIVE ); the_watchdog->state = WATCHDOG_INACTIVE; the_watchdog->stop_time = _Watchdog_Ticks_since_boot; next = _Chain_Next( &the_watchdog->Node ); delta = the_watchdog->delta_interval; if ( next != _Chain_Tail( &header->Watchdogs ) ) { Watchdog_Control *next_watchdog; next_watchdog = (Watchdog_Control *) next; next_watchdog->delta_interval += delta; } _Chain_Extract_unprotected( &the_watchdog->Node ); iterator_node = _Chain_First( &header->Iterators ); iterator_tail = _Chain_Immutable_tail( &header->Iterators ); while ( iterator_node != iterator_tail ) { Watchdog_Iterator *iterator; iterator = (Watchdog_Iterator *) iterator_node; if ( iterator->current == next ) { iterator->delta_interval += delta; } if ( iterator->current == &the_watchdog->Node ) { Chain_Node *previous = _Chain_Previous( &the_watchdog->Node ); iterator->current = previous; if ( previous != _Chain_Head( &header->Watchdogs ) ) { Watchdog_Control *previous_watchdog; previous_watchdog = (Watchdog_Control *) previous; iterator->delta_interval += previous_watchdog->delta_interval; } } iterator_node = _Chain_Next( iterator_node ); } }
/* * This method is unique to this scheduler because it must take into * account affinity as it searches for the lowest priority scheduled * thread. It ignores those which cannot be replaced by the filter * thread because the potential victim thread does not have affinity * for that processor. */ static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled( Scheduler_Context *context, Scheduler_Node *filter_base, Chain_Node_order order ) { Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context ); Scheduler_Node *lowest_scheduled = NULL; Chain_Control *scheduled = &self->Scheduled; Chain_Node *chain_node; Scheduler_priority_affinity_SMP_Node *filter = _Scheduler_priority_affinity_SMP_Node_downcast( filter_base ); for ( chain_node = _Chain_Last( scheduled ); chain_node != _Chain_Immutable_head( scheduled ) ; chain_node = _Chain_Previous( chain_node ) ) { Scheduler_priority_affinity_SMP_Node *node; Thread_Control *thread; uint32_t cpu_index; node = (Scheduler_priority_affinity_SMP_Node *) chain_node; /* * If we didn't find a thread which is of equal or lower importance * than filter thread is, then we can't schedule the filter thread * to execute. */ if ( (*order)( &node->Base.Base.Base.Node, &filter->Base.Base.Base.Node ) ) break; /* cpu_index is the processor number thread is executing on */ thread = _Scheduler_Node_get_owner( &node->Base.Base.Base ); cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) ); if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) { lowest_scheduled = &node->Base.Base.Base; break; } } return lowest_scheduled; }
static #endif void _Thread_queue_Path_release_critical( Thread_queue_Context *queue_context ) { #if defined(RTEMS_SMP) Chain_Node *head; Chain_Node *node; head = _Chain_Head( &queue_context->Path.Links ); node = _Chain_Last( &queue_context->Path.Links ); while ( head != node ) { Thread_queue_Link *link; link = THREAD_QUEUE_LINK_OF_PATH_NODE( node ); if ( link->Lock_context.Wait.queue != NULL ) { _Thread_queue_Link_remove( link ); _Thread_Wait_release_queue_critical( link->Lock_context.Wait.queue, &link->Lock_context ); _Thread_Wait_remove_request( link->owner, &link->Lock_context ); } else { _Thread_Wait_release_default_critical( link->owner, &link->Lock_context.Lock_context ); } node = _Chain_Previous( node ); #if defined(RTEMS_DEBUG) _Chain_Set_off_chain( &link->Path_node ); #endif } #else (void) queue_context; #endif }