static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread ) { bool success; bool unblock; /* * We must update the wait flags under protection of the current thread lock, * otherwise a _Thread_Timeout() running on another processor may interfere. */ success = _Thread_Wait_flags_try_change_release( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_READY_AGAIN ); if ( success ) { unblock = false; } else { _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED ); _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN ); unblock = true; } _Thread_Wait_restore_default( the_thread ); return unblock; }
rtems_status_code rtems_task_wake_after( rtems_interval ticks ) { /* * It is critical to obtain the executing thread after thread dispatching is * disabled on SMP configurations. */ Thread_Control *executing; Per_CPU_Control *cpu_self; cpu_self = _Thread_Dispatch_disable(); executing = _Thread_Executing; if ( ticks == 0 ) { _Thread_Yield( executing ); } else { _Thread_Set_state( executing, STATES_DELAYING ); _Thread_Wait_flags_set( executing, THREAD_WAIT_STATE_BLOCKED ); _Watchdog_Initialize( &executing->Timer, _Thread_Timeout, 0, executing ); _Watchdog_Insert_ticks( &executing->Timer, ticks ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
Status_Control _Thread_queue_Enqueue_sticky( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); ( *queue_context->deadlock_callout )( the_thread ); return _Thread_Wait_get_status( the_thread ); } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); if ( cpu_self->thread_dispatch_disable_level != 1 ) { _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE ); } _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); _Thread_Priority_update( queue_context ); _Thread_Priority_and_sticky_update( the_thread, 1 ); _Thread_Dispatch_enable( cpu_self ); while ( _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK ) { /* Wait */ } _Thread_Wait_tranquilize( the_thread ); _Thread_Timer_remove( the_thread ); return _Thread_Wait_get_status( the_thread ); }
bool _Thread_queue_Do_extract_locked( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread #if defined(RTEMS_MULTIPROCESSING) , const Thread_queue_Context *queue_context #endif ) { bool success; bool unblock; #if defined(RTEMS_MULTIPROCESSING) if ( !_Objects_Is_local_id( the_thread->Object.id ) ) { Thread_Proxy_control *the_proxy; Thread_queue_MP_callout mp_callout; the_proxy = (Thread_Proxy_control *) the_thread; mp_callout = queue_context->mp_callout; _Assert( mp_callout != NULL ); the_proxy->thread_queue_callout = queue_context->mp_callout; } #endif ( *operations->extract )( queue, the_thread ); /* * We must update the wait flags under protection of the current thread lock, * otherwise a _Thread_Timeout() running on another processor may interfere. */ success = _Thread_Wait_flags_try_change_release( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_READY_AGAIN ); if ( success ) { unblock = false; } else { _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED ); _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN ); unblock = true; } _Thread_Wait_restore_default( the_thread ); return unblock; }
void _Rate_monotonic_Timeout( Watchdog_Control *the_watchdog ) { Rate_monotonic_Control *the_period; Thread_Control *owner; ISR_lock_Context lock_context; Thread_Wait_flags wait_flags; the_period = RTEMS_CONTAINER_OF( the_watchdog, Rate_monotonic_Control, Timer ); owner = the_period->owner; _ISR_lock_ISR_disable( &lock_context ); _Rate_monotonic_Acquire_critical( the_period, &lock_context ); wait_flags = _Thread_Wait_flags_get( owner ); if ( ( wait_flags & THREAD_WAIT_CLASS_PERIOD ) != 0 && owner->Wait.return_argument == the_period ) { bool unblock; bool success; owner->Wait.return_argument = NULL; success = _Thread_Wait_flags_try_change_release( owner, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_READY_AGAIN ); if ( success ) { unblock = false; } else { _Assert( _Thread_Wait_flags_get( owner ) == RATE_MONOTONIC_BLOCKED ); _Thread_Wait_flags_set( owner, RATE_MONOTONIC_READY_AGAIN ); unblock = true; } _Rate_monotonic_Restart( the_period, owner, &lock_context ); if ( unblock ) { _Thread_Unblock( owner ); } } else { _Rate_monotonic_Renew_deadline( the_period, &lock_context ); } }
static rtems_status_code _Rate_monotonic_Block_while_active( Rate_monotonic_Control *the_period, rtems_interval length, Thread_Control *executing, ISR_lock_Context *lock_context ) { Per_CPU_Control *cpu_self; bool success; /* * Update statistics from the concluding period. */ _Rate_monotonic_Update_statistics( the_period ); /* * This tells the _Rate_monotonic_Timeout that this task is * in the process of blocking on the period and that we * may be changing the length of the next period. */ the_period->next_length = length; executing->Wait.return_argument = the_period; _Thread_Wait_flags_set( executing, RATE_MONOTONIC_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( lock_context ); _Rate_monotonic_Release( the_period, lock_context ); _Thread_Set_state( executing, STATES_WAITING_FOR_PERIOD ); success = _Thread_Wait_flags_try_change_acquire( executing, RATE_MONOTONIC_INTEND_TO_BLOCK, RATE_MONOTONIC_BLOCKED ); if ( !success ) { _Assert( _Thread_Wait_flags_get( executing ) == RATE_MONOTONIC_READY_AGAIN ); _Thread_Unblock( executing ); } _Thread_Dispatch_enable( cpu_self ); return RTEMS_SUCCESSFUL; }
void _Thread_queue_Enqueue( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context ) { Per_CPU_Control *cpu_self; bool success; _Assert( queue_context->enqueue_callout != NULL ); _Assert( (uint8_t) queue_context->timeout_discipline != 0x7f ); #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state ); } #endif _Thread_Wait_claim( the_thread, queue ); if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) { _Thread_queue_Path_release_critical( queue_context ); _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); _Thread_Wait_tranquilize( the_thread ); _Assert( queue_context->deadlock_callout != NULL ); ( *queue_context->deadlock_callout )( the_thread ); return; } _Thread_queue_Context_clear_priority_updates( queue_context ); _Thread_Wait_claim_finalize( the_thread, operations ); ( *operations->enqueue )( queue, the_thread, queue_context ); _Thread_queue_Path_release_critical( queue_context ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context.Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context ); ( *queue_context->enqueue_callout )( queue, the_thread, queue_context ); /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, queue_context->thread_state ); /* * If the thread wants to timeout, then schedule its timer. */ _Thread_queue_Timeout( the_thread, cpu_self, queue_context ); /* * At this point thread dispatching is disabled, however, we already released * the thread queue lock. Thus, interrupts or threads on other processors * may already changed our state with respect to the thread queue object. * The request could be satisfied or timed out. This situation is indicated * by the thread wait flags. Other parties must not modify our thread state * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, * thus we have to cancel the blocking operation ourself if necessary. */ success = _Thread_Wait_flags_try_change_acquire( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_BLOCKED ); if ( !success ) { _Thread_Remove_timer_and_unblock( the_thread, queue ); } _Thread_Priority_update( queue_context ); _Thread_Dispatch_direct( cpu_self ); }
bool _Thread_Initialize( Objects_Information *information, Thread_Control *the_thread, const Scheduler_Control *scheduler, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { uintptr_t tls_size = _TLS_Get_size(); size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area = NULL; #endif bool extension_status; size_t i; bool scheduler_node_initialized = false; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; *(void **) ( (char *) the_thread + add_on->destination_offset ) = (char *) the_thread + add_on->source_offset; } /* * Initialize the Ada self pointer */ #if __RTEMS_ADA__ the_thread->rtems_ada_self = NULL; #endif the_thread->Start.tls_area = NULL; /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); the_thread->Start.tls_area = _Workspace_Allocate_aligned( tls_alloc, tls_align ); if ( the_thread->Start.tls_area == NULL ) { goto failed; } } /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Initialize the thread timer */ _Watchdog_Preinitialize( &the_thread->Timer ); #ifdef __RTEMS_STRICT_ORDER_MUTEX__ /* Initialize the head of chain of held mutexes */ _Chain_Initialize_empty(&the_thread->lock_mutex); #endif /* * Clear the extensions area so extension users can determine * if they are linked to the thread. An extension user may * create the extension long after tasks have been created * so they cannot rely on the thread create user extension * call. The object index starts with one, so the first extension context is * unused. */ for ( i = 1 ; i <= rtems_configuration_get_maximum_extensions() ; ++i ) the_thread->extensions[ i ] = NULL; /* * General initialization */ the_thread->is_fp = is_fp; the_thread->Start.isr_level = isr_level; the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } #if defined(RTEMS_SMP) the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED; the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = the_thread->Scheduler.node; _Resource_Node_initialize( &the_thread->Resource_node ); _CPU_Context_Set_is_executing( &the_thread->Registers, false ); the_thread->Lock.current = &the_thread->Lock.Default; _ISR_lock_Initialize( &the_thread->Lock.Default, "Thread Lock Default"); _Atomic_Init_uint(&the_thread->Lock.generation, 0); #endif _Thread_Debug_set_real_processor( the_thread, cpu ); /* Initialize the CPU for the non-SMP schedulers */ _Thread_Set_CPU( the_thread, cpu ); the_thread->current_state = STATES_DORMANT; the_thread->Wait.queue = NULL; the_thread->Wait.operations = &_Thread_queue_Operations_default; the_thread->resource_count = 0; the_thread->current_priority = priority; the_thread->real_priority = priority; the_thread->priority_generation = 0; the_thread->Start.initial_priority = priority; _Thread_Wait_flags_set( the_thread, THREAD_WAIT_FLAGS_INITIAL ); _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; _Scheduler_Update_priority( the_thread, priority ); /* * Initialize the CPU usage statistics */ _Timestamp_Set_to_zero( &the_thread->cpu_time_used ); /* * initialize thread's key vaule node chain */ _Chain_Initialize_empty( &the_thread->Key_Chain ); _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); _Thread_Action_initialize( &the_thread->Life.Action, _Thread_Life_action_handler ); the_thread->Life.state = THREAD_LIFE_NORMAL; the_thread->Life.terminator = NULL; the_thread->Capture.flags = 0; the_thread->Capture.control = NULL; /* * Open the object */ _Objects_Open( information, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: if ( scheduler_node_initialized ) { _Scheduler_Node_destroy( scheduler, the_thread ); } _Workspace_Free( the_thread->Start.tls_area ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Thread_Stack_Free( the_thread ); return false; }
void _Thread_queue_Enqueue_critical( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, States_Control state, Thread_queue_Context *queue_context ) { Thread_queue_Path path; Per_CPU_Control *cpu_self; bool success; #if defined(RTEMS_MULTIPROCESSING) if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) { the_thread = _Thread_MP_Allocate_proxy( state ); } #endif _Thread_Wait_claim( the_thread, queue, operations ); if ( !_Thread_queue_Path_acquire( the_thread, queue, &path ) ) { _Thread_Wait_restore_default( the_thread ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); _Thread_Wait_tranquilize( the_thread ); ( *queue_context->deadlock_callout )( the_thread ); return; } ( *operations->enqueue )( queue, the_thread, &path ); _Thread_queue_Path_release( &path ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); if ( cpu_self->thread_dispatch_disable_level != queue_context->expected_thread_dispatch_disable_level ) { _Terminate( INTERNAL_ERROR_CORE, false, INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE ); } /* * Set the blocking state for this thread queue in the thread. */ _Thread_Set_state( the_thread, state ); /* * If the thread wants to timeout, then schedule its timer. */ switch ( queue_context->timeout_discipline ) { case WATCHDOG_RELATIVE: /* A relative timeout of 0 is a special case indefinite (no) timeout */ if ( queue_context->timeout != 0 ) { _Thread_Timer_insert_relative( the_thread, cpu_self, _Thread_Timeout, (Watchdog_Interval) queue_context->timeout ); } break; case WATCHDOG_ABSOLUTE: _Thread_Timer_insert_absolute( the_thread, cpu_self, _Thread_Timeout, queue_context->timeout ); break; default: break; } /* * At this point thread dispatching is disabled, however, we already released * the thread queue lock. Thus, interrupts or threads on other processors * may already changed our state with respect to the thread queue object. * The request could be satisfied or timed out. This situation is indicated * by the thread wait flags. Other parties must not modify our thread state * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state, * thus we have to cancel the blocking operation ourself if necessary. */ success = _Thread_Wait_flags_try_change_acquire( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK, THREAD_QUEUE_BLOCKED ); if ( !success ) { _Thread_Remove_timer_and_unblock( the_thread, queue ); } _Thread_Update_priority( path.update_priority ); _Thread_Dispatch_enable( cpu_self ); }