rtems_task Floating_point_task_1( rtems_task_argument argument ) { Scheduler_priority_Context *scheduler_context = _Scheduler_priority_Get_context( _Scheduler_Get( _Thread_Get_executing() ) ); Thread_Control *executing; FP_DECLARE; context_switch_restore_1st_fp_time = benchmark_timer_read(); executing = _Thread_Get_executing(); set_thread_executing( (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY]) ); /* do not force context switch */ set_thread_dispatch_necessary( false ); _Thread_Dispatch_disable(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Get_executing()->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Get_executing()->Registers ); /* switch to Floating_point_task_2 */ context_switch_save_idle_restore_initted_time = benchmark_timer_read(); FP_LOAD( 1.0 ); executing = _Thread_Get_executing(); set_thread_executing( (Thread_Control *) _Chain_First(&scheduler_context->Ready[FP2_PRIORITY]) ); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Get_executing()->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Get_executing()->Registers ); /* switch to Floating_point_task_2 */ }
rtems_task Floating_point_task_1( rtems_task_argument argument ) { Chain_Control *ready_queues; Thread_Control *executing; FP_DECLARE; context_switch_restore_1st_fp_time = benchmark_timer_read(); executing = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_2 */ context_switch_save_idle_restore_initted_time = benchmark_timer_read(); FP_LOAD( 1.0 ); executing = _Thread_Executing; ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[FP2_PRIORITY]); /* do not force context switch */ _Thread_Dispatch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_2 */ }
rtems_task Floating_point_task_2( rtems_task_argument argument ) { Thread_Control *executing; FP_DECLARE; context_switch_save_restore_idle_time = benchmark_timer_read(); executing = _Thread_Executing; _Thread_Executing = (Thread_Control *) _Thread_Ready_chain[FP1_PRIORITY].first; FP_LOAD( 1.0 ); /* do not force context switch */ _Context_Switch_necessary = false; _Thread_Disable_dispatch(); benchmark_timer_initialize(); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) _Context_Save_fp( &executing->fp_context ); _Context_Restore_fp( &_Thread_Executing->fp_context ); #endif _Context_Switch( &executing->Registers, &_Thread_Executing->Registers ); /* switch to Floating_point_task_1 */ context_switch_save_restore_initted_time = benchmark_timer_read(); complete_test(); }
void _Thread_Handler( void ) { ISR_Level level; Thread_Control *executing; #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) static char doneConstructors; char doneCons; #endif executing = _Thread_Executing; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level(level); #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) doneCons = doneConstructors; doneConstructors = 1; #endif #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Thread_Allocated_fp = executing; } #endif #endif /* * Take care that 'begin' extensions get to complete before * 'switch' extensions can run. This means must keep dispatch * disabled until all 'begin' extensions complete. */ _User_extensions_Thread_begin( executing ); /* * At this point, the dispatch disable level BETTER be 1. */ _Thread_Enable_dispatch(); #if defined(EXECUTE_GLOBAL_CONSTRUCTORS) /* * _init could be a weak symbol and we SHOULD test it but it isn't * in any configuration I know of and it generates a warning on every * RTEMS target configuration. --joel (12 May 2007) */ if (!doneCons) /* && (volatile void *)_init) */ { INIT_NAME (); } #endif if ( executing->Start.prototype == THREAD_START_NUMERIC ) { executing->Wait.return_argument = (*(Thread_Entry_numeric) executing->Start.entry_point)( executing->Start.numeric_argument ); } #if defined(RTEMS_POSIX_API) else if ( executing->Start.prototype == THREAD_START_POINTER ) { executing->Wait.return_argument = (*(Thread_Entry_pointer) executing->Start.entry_point)( executing->Start.pointer_argument ); } #endif #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)( executing->Start.pointer_argument, executing->Start.numeric_argument ); } else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)( executing->Start.numeric_argument, executing->Start.pointer_argument ); } #endif /* * In the switch above, the return code from the user thread body * was placed in return_argument. This assumed that if it returned * anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Internal_error_Occurred( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_THREAD_EXITTED ); }
void _Thread_Handler( void ) { Thread_Control *executing = _Thread_Executing; ISR_Level level; /* * Some CPUs need to tinker with the call frame or registers when the * thread actually begins to execute for the first time. This is a * hook point where the port gets a shot at doing whatever it requires. */ _Context_Initialization_at_thread_begin(); #if !defined(RTEMS_SMP) /* * have to put level into a register for those cpu's that use * inline asm here */ level = executing->Start.isr_level; _ISR_Set_level( level ); #endif /* * Initialize the floating point context because we do not come * through _Thread_Dispatch on our first invocation. So the normal * code path for performing the FP context switch is not hit. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Thread_Allocated_fp = executing; } #endif #endif /* * Take care that 'begin' extensions get to complete before * 'switch' extensions can run. This means must keep dispatch * disabled until all 'begin' extensions complete. */ _User_extensions_Thread_begin( executing ); /* * At this point, the dispatch disable level BETTER be 1. */ #if defined(RTEMS_SMP) { /* * On SMP we enter _Thread_Handler() with interrupts disabled and * _Thread_Dispatch() obtained the per-CPU lock for us. We have to * release it here and set the desired interrupt level of the thread. */ Per_CPU_Control *cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 1 ); _Assert( _ISR_Get_level() != 0 ); _Thread_Debug_set_real_processor( executing, cpu_self ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); level = executing->Start.isr_level; _ISR_Set_level( level); /* * The thread dispatch level changed from one to zero. Make sure we lose * no thread dispatch necessary update. */ _Thread_Dispatch(); } #else _Thread_Enable_dispatch(); #endif /* * RTEMS supports multiple APIs and each API can define a different * thread/task prototype. The following code supports invoking the * user thread entry point using the prototype expected. */ if ( executing->Start.prototype == THREAD_START_NUMERIC ) { executing->Wait.return_argument = (*(Thread_Entry_numeric) executing->Start.entry_point)( executing->Start.numeric_argument ); } #if defined(RTEMS_POSIX_API) else if ( executing->Start.prototype == THREAD_START_POINTER ) { executing->Wait.return_argument = (*(Thread_Entry_pointer) executing->Start.entry_point)( executing->Start.pointer_argument ); } #endif #if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API) else if ( executing->Start.prototype == THREAD_START_BOTH_POINTER_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)( executing->Start.pointer_argument, executing->Start.numeric_argument ); } else if ( executing->Start.prototype == THREAD_START_BOTH_NUMERIC_FIRST ) { executing->Wait.return_argument = (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)( executing->Start.numeric_argument, executing->Start.pointer_argument ); } #endif /* * In the switch above, the return code from the user thread body * was placed in return_argument. This assumed that if it returned * anything (which is not supporting in all APIs), then it would be * able to fit in a (void *). */ _User_extensions_Thread_exitted( executing ); _Terminate( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_THREAD_EXITTED ); }
void _Thread_Dispatch( void ) { Per_CPU_Control *cpu_self; Thread_Control *executing; ISR_Level level; #if defined( RTEMS_SMP ) /* * On SMP the complete context switch must be atomic with respect to one * processor. See also _Thread_Handler() since _Context_switch() may branch * to this function. */ _ISR_Disable_without_giant( level ); #endif cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 0 ); _Profiling_Thread_dispatch_disable( cpu_self, 0 ); cpu_self->thread_dispatch_disable_level = 1; /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = cpu_self->executing; #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif #if defined( RTEMS_SMP ) if ( cpu_self->dispatch_necessary ) { #else while ( cpu_self->dispatch_necessary ) { #endif Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); #if !defined( RTEMS_SMP ) _ISR_Enable( level ); #endif #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, &cpu_self->time_of_last_context_switch ); #else { _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif #if !defined(__DYNAMIC_REENT__) /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } #endif _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif /* * We have to obtain this value again after the context switch since the * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ cpu_self = _Per_CPU_Get(); _Thread_Debug_set_real_processor( executing, cpu_self ); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif } post_switch: _Assert( cpu_self->thread_dispatch_disable_level == 1 ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); _ISR_Enable_without_giant( level ); _Thread_Run_post_switch_actions( executing ); }
void _Thread_Dispatch( void ) { Thread_Control *executing; Thread_Control *heir; ISR_Level level; #if defined(RTEMS_SMP) /* * WARNING: The SMP sequence has severe defects regarding the real-time * performance. * * Consider the following scenario. We have three tasks L (lowest * priority), M (middle priority), and H (highest priority). Now let a * thread dispatch from M to L happen. An interrupt occurs in * _Thread_Dispatch() here: * * void _Thread_Dispatch( void ) * { * [...] * * post_switch: * * _ISR_Enable( level ); * * <-- INTERRUPT * <-- AFTER INTERRUPT * * _Thread_Unnest_dispatch(); * * _API_extensions_Run_post_switch(); * } * * The interrupt event makes task H ready. The interrupt code will see * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a * _Thread_Dispatch(). Now we return to position "AFTER INTERRUPT". This * means task L executes now although task H is ready! Task H will execute * once someone calls _Thread_Dispatch(). */ _Thread_Disable_dispatch(); /* * If necessary, send dispatch request to other cores. */ _SMP_Request_other_cores_to_dispatch(); #endif /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = _Thread_Executing; _ISR_Disable( level ); while ( _Thread_Dispatch_necessary == true ) { heir = _Thread_Heir; #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 1 ); #endif _Thread_Dispatch_necessary = false; _Thread_Executing = heir; /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = _Thread_Ticks_per_timeslice; _ISR_Enable( level ); #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ { Timestamp_Control uptime, ran; _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &_Thread_Time_of_last_context_switch, &uptime, &ran ); _Timestamp_Add_to( &executing->cpu_time_used, &ran ); _Thread_Time_of_last_context_switch = uptime; } #else { _TOD_Get_uptime( &_Thread_Time_of_last_context_switch ); heir->cpu_time_used++; } #endif /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif executing = _Thread_Executing; _ISR_Disable( level ); } post_switch: #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 0 ); #endif _ISR_Enable( level ); #ifdef RTEMS_SMP _Thread_Unnest_dispatch(); #endif _API_extensions_Run_post_switch( executing ); }