bool _Thread_Initialize( Thread_Information *information, Thread_Control *the_thread, const Scheduler_Control *scheduler, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { uintptr_t tls_size = _TLS_Get_size(); size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area = NULL; #endif bool extension_status; size_t i; bool scheduler_node_initialized = false; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif memset( &the_thread->current_state, 0, information->Objects.size - offsetof( Thread_Control, current_state ) ); for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; *(void **) ( (char *) the_thread + add_on->destination_offset ) = (char *) the_thread + add_on->source_offset; } /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); the_thread->Start.tls_area = _Workspace_Allocate_aligned( tls_alloc, tls_align ); if ( the_thread->Start.tls_area == NULL ) { goto failed; } } /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Get thread queue heads */ the_thread->Wait.spare_heads = _Freechain_Get( &information->Free_thread_queue_heads, _Workspace_Allocate, _Objects_Extend_size( &information->Objects ), THREAD_QUEUE_HEADS_SIZE( _Scheduler_Count ) ); if ( the_thread->Wait.spare_heads == NULL ) { goto failed; } _Thread_queue_Heads_initialize( the_thread->Wait.spare_heads ); /* * General initialization */ the_thread->is_fp = is_fp; the_thread->Start.isr_level = isr_level; the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; _Thread_Timer_initialize( &the_thread->Timer, cpu ); switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } #if defined(RTEMS_SMP) RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state ); the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = the_thread->Scheduler.node; _Resource_Node_initialize( &the_thread->Resource_node ); the_thread->Lock.current = &the_thread->Lock.Default; _SMP_ticket_lock_Initialize( &the_thread->Lock.Default ); _SMP_lock_Stats_initialize( &the_thread->Lock.Stats, "Thread Lock" ); _SMP_lock_Stats_initialize( &the_thread->Potpourri_stats, "Thread Potpourri" ); #endif _Thread_Debug_set_real_processor( the_thread, cpu ); /* Initialize the CPU for the non-SMP schedulers */ _Thread_Set_CPU( the_thread, cpu ); _Thread_queue_Initialize( &the_thread->Join_queue ); the_thread->current_state = STATES_DORMANT; the_thread->Wait.operations = &_Thread_queue_Operations_default; the_thread->current_priority = priority; the_thread->real_priority = priority; the_thread->Start.initial_priority = priority; RTEMS_STATIC_ASSERT( THREAD_WAIT_FLAGS_INITIAL == 0, Wait_flags ); _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; _Scheduler_Update_priority( the_thread, priority ); /* POSIX Keys */ _RBTree_Initialize_empty( &the_thread->Keys.Key_value_pairs ); _ISR_lock_Initialize( &the_thread->Keys.Lock, "POSIX Key Value Pairs" ); _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); RTEMS_STATIC_ASSERT( THREAD_LIFE_NORMAL == 0, Life_state ); /* * Open the object */ _Objects_Open( &information->Objects, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: if ( scheduler_node_initialized ) { _Scheduler_Node_destroy( scheduler, the_thread ); } _Workspace_Free( the_thread->Start.tls_area ); _Freechain_Put( &information->Free_thread_queue_heads, the_thread->Wait.spare_heads ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Thread_Stack_Free( the_thread ); return false; }
void _Thread_Handler( void ) { Thread_Control *executing = _Thread_Executing; ISR_Level level; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); #if !defined(RTEMS_SMP) /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level( level ); #endif /* * Initialize the floating point context because we do not come * through _Thread_Dispatch on our first invocation. So the normal * code path for performing the FP context switch is not hit. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Thread_Allocated_fp = executing; } #endif #endif /* * Take care that 'begin' extensions get to complete before * 'switch' extensions can run. This means must keep dispatch * disabled until all 'begin' extensions complete. */ _User_extensions_Thread_begin( executing ); /* * At this point, the dispatch disable level BETTER be 1. */ #if defined(RTEMS_SMP) { /* * On SMP we enter _Thread_Handler() with interrupts disabled and * _Thread_Dispatch() obtained the per-CPU lock for us. We have to * release it here and set the desired interrupt level of the thread. */ Per_CPU_Control *cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 1 ); _Assert( _ISR_Get_level() != 0 ); _Thread_Debug_set_real_processor( executing, cpu_self ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); level = executing->Start.isr_level; _ISR_Set_level( level); /* * The thread dispatch level changed from one to zero. Make sure we lose * no thread dispatch necessary update. */ _Thread_Dispatch(); } #else _Thread_Enable_dispatch(); #endif /* * RTEMS supports multiple APIs and each API can define a different * thread/task prototype. The following code supports invoking the * user thread entry point using the prototype expected. */ if ( executing->Start.prototype == THREAD_START_NUMERIC ) { executing->Wait.return_argument = (*(Thread_Entry_numeric) executing->Start.entry_point)( executing->Start.numeric_argument ); } #if defined(RTEMS_POSIX_API) else if ( executing->Start.prototype == THREAD_START_POINTER ) { executing->Wait.return_argument = (*(Thread_Entry_pointer) executing->Start.entry_point)( executing->Start.pointer_argument ); } #endif #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)( executing->Start.pointer_argument, executing->Start.numeric_argument ); } else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)( executing->Start.numeric_argument, executing->Start.pointer_argument ); } #endif /* * In the switch above, the return code from the user thread body * was placed in return_argument. This assumed that if it returned * anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Terminate( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_THREAD_EXITTED ); }
void _Thread_Dispatch( void ) { Per_CPU_Control *cpu_self; Thread_Control *executing; ISR_Level level; #if defined( RTEMS_SMP ) /* * On SMP the complete context switch must be atomic with respect to one * processor. See also _Thread_Handler() since _Context_switch() may branch * to this function. */ _ISR_Disable_without_giant( level ); #endif cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 0 ); _Profiling_Thread_dispatch_disable( cpu_self, 0 ); cpu_self->thread_dispatch_disable_level = 1; /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = cpu_self->executing; #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif #if defined( RTEMS_SMP ) if ( cpu_self->dispatch_necessary ) { #else while ( cpu_self->dispatch_necessary ) { #endif Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); #if !defined( RTEMS_SMP ) _ISR_Enable( level ); #endif #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, &cpu_self->time_of_last_context_switch ); #else { _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif #if !defined(__DYNAMIC_REENT__) /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } #endif _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif /* * We have to obtain this value again after the context switch since the * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ cpu_self = _Per_CPU_Get(); _Thread_Debug_set_real_processor( executing, cpu_self ); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif } post_switch: _Assert( cpu_self->thread_dispatch_disable_level == 1 ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); _ISR_Enable_without_giant( level ); _Thread_Run_post_switch_actions( executing ); }
bool _Thread_Initialize( Objects_Information *information, Thread_Control *the_thread, const Scheduler_Control *scheduler, void *stack_area, size_t stack_size, bool is_fp, Priority_Control priority, bool is_preemptible, Thread_CPU_budget_algorithms budget_algorithm, Thread_CPU_budget_algorithm_callout budget_callout, uint32_t isr_level, Objects_Name name ) { uintptr_t tls_size = _TLS_Get_size(); size_t actual_stack_size = 0; void *stack = NULL; #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) void *fp_area = NULL; #endif bool extension_status; size_t i; bool scheduler_node_initialized = false; Per_CPU_Control *cpu = _Per_CPU_Get_by_index( 0 ); #if defined( RTEMS_SMP ) if ( rtems_configuration_is_smp_enabled() && !is_preemptible ) { return false; } #endif for ( i = 0 ; i < _Thread_Control_add_on_count ; ++i ) { const Thread_Control_add_on *add_on = &_Thread_Control_add_ons[ i ]; *(void **) ( (char *) the_thread + add_on->destination_offset ) = (char *) the_thread + add_on->source_offset; } /* * Initialize the Ada self pointer */ #if __RTEMS_ADA__ the_thread->rtems_ada_self = NULL; #endif the_thread->Start.tls_area = NULL; /* * Allocate and Initialize the stack for this thread. */ #if !defined(RTEMS_SCORE_THREAD_ENABLE_USER_PROVIDED_STACK_VIA_API) actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; #else if ( !stack_area ) { actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size ); if ( !actual_stack_size || actual_stack_size < stack_size ) return false; /* stack allocation failed */ stack = the_thread->Start.stack; the_thread->Start.core_allocated_stack = true; } else { stack = stack_area; actual_stack_size = stack_size; the_thread->Start.core_allocated_stack = false; } #endif _Stack_Initialize( &the_thread->Start.Initial_stack, stack, actual_stack_size ); /* Thread-local storage (TLS) area allocation */ if ( tls_size > 0 ) { uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); the_thread->Start.tls_area = _Workspace_Allocate_aligned( tls_alloc, tls_align ); if ( the_thread->Start.tls_area == NULL ) { goto failed; } } /* * Allocate the floating point area for this thread */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) if ( is_fp ) { fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE ); if ( !fp_area ) goto failed; fp_area = _Context_Fp_start( fp_area, 0 ); } the_thread->fp_context = fp_area; the_thread->Start.fp_context = fp_area; #endif /* * Initialize the thread timer */ _Watchdog_Initialize( &the_thread->Timer, NULL, 0, NULL ); #ifdef __RTEMS_STRICT_ORDER_MUTEX__ /* Initialize the head of chain of held mutexes */ _Chain_Initialize_empty(&the_thread->lock_mutex); #endif /* * Clear the extensions area so extension users can determine * if they are linked to the thread. An extension user may * create the extension long after tasks have been created * so they cannot rely on the thread create user extension * call. The object index starts with one, so the first extension context is * unused. */ for ( i = 1 ; i <= rtems_configuration_get_maximum_extensions() ; ++i ) the_thread->extensions[ i ] = NULL; /* * General initialization */ the_thread->Start.isr_level = isr_level; the_thread->Start.is_preemptible = is_preemptible; the_thread->Start.budget_algorithm = budget_algorithm; the_thread->Start.budget_callout = budget_callout; switch ( budget_algorithm ) { case THREAD_CPU_BUDGET_ALGORITHM_NONE: case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE: break; #if defined(RTEMS_SCORE_THREAD_ENABLE_EXHAUST_TIMESLICE) case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE: the_thread->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); break; #endif #if defined(RTEMS_SCORE_THREAD_ENABLE_SCHEDULER_CALLOUT) case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT: break; #endif } #if defined(RTEMS_SMP) the_thread->Scheduler.state = THREAD_SCHEDULER_BLOCKED; the_thread->Scheduler.own_control = scheduler; the_thread->Scheduler.control = scheduler; the_thread->Scheduler.own_node = the_thread->Scheduler.node; _Resource_Node_initialize( &the_thread->Resource_node ); _CPU_Context_Set_is_executing( &the_thread->Registers, false ); #endif _Thread_Debug_set_real_processor( the_thread, cpu ); /* Initialize the CPU for the non-SMP schedulers */ _Thread_Set_CPU( the_thread, cpu ); the_thread->current_state = STATES_DORMANT; the_thread->Wait.queue = NULL; the_thread->resource_count = 0; the_thread->real_priority = priority; the_thread->Start.initial_priority = priority; _Scheduler_Node_initialize( scheduler, the_thread ); scheduler_node_initialized = true; _Thread_Set_priority( the_thread, priority ); /* * Initialize the CPU usage statistics */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Timestamp_Set_to_zero( &the_thread->cpu_time_used ); #else the_thread->cpu_time_used = 0; #endif /* * initialize thread's key vaule node chain */ _Chain_Initialize_empty( &the_thread->Key_Chain ); _Thread_Action_control_initialize( &the_thread->Post_switch_actions ); _Thread_Action_initialize( &the_thread->Life.Action, _Thread_Life_action_handler ); the_thread->Life.state = THREAD_LIFE_NORMAL; the_thread->Life.terminator = NULL; /* * Open the object */ _Objects_Open( information, &the_thread->Object, name ); /* * We assume the Allocator Mutex is locked and dispatching is * enabled when we get here. We want to be able to run the * user extensions with dispatching enabled. The Allocator * Mutex provides sufficient protection to let the user extensions * run safely. */ extension_status = _User_extensions_Thread_create( the_thread ); if ( extension_status ) return true; failed: if ( scheduler_node_initialized ) { _Scheduler_Node_destroy( scheduler, the_thread ); } _Workspace_Free( the_thread->Start.tls_area ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) _Workspace_Free( fp_area ); #endif _Thread_Stack_Free( the_thread ); return false; }
void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ) { Thread_Control *executing; _Assert( cpu_self->thread_dispatch_disable_level == 1 ); executing = cpu_self->executing; do { Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); /* * On SMP the complete context switch must be atomic with respect to one * processor. See also _Thread_Handler() since _Context_switch() may branch * to this function. */ #if !defined( RTEMS_SMP ) _ISR_Enable( level ); #endif #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, &cpu_self->time_of_last_context_switch ); #else { _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif #if !defined(__DYNAMIC_REENT__) /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } #endif _User_extensions_Thread_switch( executing, heir ); _Thread_Save_fp( executing ); _Context_Switch( &executing->Registers, &heir->Registers ); _Thread_Restore_fp( executing ); /* * We have to obtain this value again after the context switch since the * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ cpu_self = _Per_CPU_Get(); _Thread_Debug_set_real_processor( executing, cpu_self ); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif } while ( #if defined( RTEMS_SMP ) false #else cpu_self->dispatch_necessary #endif ); post_switch: _Assert( cpu_self->thread_dispatch_disable_level == 1 ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); _ISR_Enable_without_giant( level ); _Thread_Run_post_switch_actions( executing ); }