void _Freechain_Put( Freechain_Control *freechain, void *node ) { if ( node != NULL ) { _Chain_Initialize_node( node ); _Chain_Prepend_unprotected( &freechain->Free, node ); } }
static void _Thread_queue_FIFO_do_initialize( Thread_queue_Heads *heads, Thread_Control *the_thread ) { Scheduler_Node *scheduler_node; scheduler_node = _Scheduler_Thread_get_own_node( the_thread ); _Chain_Initialize_node( &scheduler_node->Wait.Node.Chain ); _Chain_Initialize_one( &heads->Heads.Fifo, &scheduler_node->Wait.Node.Chain ); }
void _SMP_Multicast_action( const size_t setsize, const cpu_set_t *cpus, SMP_Action_handler handler, void *arg ) { SMP_Multicast_action node; Processor_mask targets; SMP_lock_Context lock_context; uint32_t i; if ( ! _System_state_Is_up( _System_state_Get() ) ) { ( *handler )( arg ); return; } if( cpus == NULL ) { _Processor_mask_Assign( &targets, _SMP_Get_online_processors() ); } else { _Processor_mask_Zero( &targets ); for ( i = 0; i < _SMP_Get_processor_count(); ++i ) { if ( CPU_ISSET_S( i, setsize, cpus ) ) { _Processor_mask_Set( &targets, i ); } } } _Chain_Initialize_node( &node.Node ); node.handler = handler; node.arg = arg; _Processor_mask_Assign( &node.targets, &targets ); _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED ); _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context ); _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node ); _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context ); _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION ); _SMP_Multicasts_try_process(); while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) { /* Wait */ }; }
static void _Thread_queue_Path_append_deadlock_thread( Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Thread_Control *deadlock; /* * In case of a deadlock, we must obtain the thread wait default lock for the * first thread on the path that tries to enqueue on a thread queue. This * thread can be identified by the thread wait operations. This lock acquire * is necessary for the timeout and explicit thread priority changes, see * _Thread_Priority_perform_actions(). */ deadlock = NULL; while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) { the_thread = the_thread->Wait.queue->owner; deadlock = the_thread; } if ( deadlock != NULL ) { Thread_queue_Link *link; link = &queue_context->Path.Deadlock; _Chain_Initialize_node( &link->Path_node ); _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node ); link->owner = deadlock; link->Lock_context.Wait.queue = NULL; _Thread_Wait_acquire_default_critical( deadlock, &link->Lock_context.Lock_context ); } }
static #endif bool _Thread_queue_Path_acquire_critical( Thread_queue_Queue *queue, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Thread_Control *owner; #if defined(RTEMS_SMP) Thread_queue_Link *link; Thread_queue_Queue *target; /* * For an overview please look at the non-SMP part below. We basically do * the same on SMP configurations. The fact that we may have more than one * executing thread and each thread queue has its own SMP lock makes the task * a bit more difficult. We have to avoid deadlocks at SMP lock level, since * this would result in an unrecoverable deadlock of the overall system. */ _Chain_Initialize_empty( &queue_context->Path.Links ); owner = queue->owner; if ( owner == NULL ) { return true; } if ( owner == the_thread ) { return false; } _Chain_Initialize_node( &queue_context->Path.Start.Lock_context.Wait.Gate.Node ); link = &queue_context->Path.Start; _RBTree_Initialize_node( &link->Registry_node ); _Chain_Initialize_node( &link->Path_node ); do { _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node ); link->owner = owner; _Thread_Wait_acquire_default_critical( owner, &link->Lock_context.Lock_context ); target = owner->Wait.queue; link->Lock_context.Wait.queue = target; if ( target != NULL ) { if ( _Thread_queue_Link_add( link, queue, target ) ) { _Thread_queue_Gate_add( &owner->Wait.Lock.Pending_requests, &link->Lock_context.Wait.Gate ); _Thread_Wait_release_default_critical( owner, &link->Lock_context.Lock_context ); _Thread_Wait_acquire_queue_critical( target, &link->Lock_context ); if ( link->Lock_context.Wait.queue == NULL ) { _Thread_queue_Link_remove( link ); _Thread_Wait_release_queue_critical( target, &link->Lock_context ); _Thread_Wait_acquire_default_critical( owner, &link->Lock_context.Lock_context ); _Thread_Wait_remove_request_locked( owner, &link->Lock_context ); _Assert( owner->Wait.queue == NULL ); return true; } } else { link->Lock_context.Wait.queue = NULL; _Thread_queue_Path_append_deadlock_thread( owner, queue_context ); return false; } } else { return true; } link = &owner->Wait.Link; queue = target; owner = queue->owner; } while ( owner != NULL ); #else do { owner = queue->owner; if ( owner == NULL ) { return true; } if ( owner == the_thread ) { return false; } queue = owner->Wait.queue; } while ( queue != NULL ); #endif return true; }