void rtems_smp_process_interrupt( void ) { Per_CPU_Control *self_cpu = _Per_CPU_Get(); if ( self_cpu->message != 0 ) { uint32_t message; ISR_Level level; _Per_CPU_Lock_acquire( self_cpu, level ); message = self_cpu->message; self_cpu->message = 0; _Per_CPU_Lock_release( self_cpu, level ); #if defined(RTEMS_DEBUG) { void *sp = __builtin_frame_address(0); if ( !(message & RTEMS_BSP_SMP_SHUTDOWN) ) { printk( "ISR on CPU %d -- (0x%02x) (0x%p)\n", _Per_CPU_Get_index( self_cpu ), message, sp ); if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF ) printk( "signal to self\n" ); if ( message & RTEMS_BSP_SMP_SHUTDOWN ) printk( "shutdown\n" ); } printk( "Dispatch level %d\n", _Thread_Dispatch_get_disable_level() ); } #endif if ( ( message & RTEMS_BSP_SMP_SHUTDOWN ) != 0 ) { _ISR_Disable( level ); _Thread_Dispatch_set_disable_level( 0 ); _Per_CPU_Change_state( self_cpu, PER_CPU_STATE_SHUTDOWN ); _CPU_Fatal_halt( _Per_CPU_Get_index( self_cpu ) ); /* does not continue past here */ } } }
rtems_task Task_2( rtems_task_argument argument ) { Chain_Control *ready_queues; #if (MUST_WAIT_FOR_INTERRUPT == 1) while ( Interrupt_occurred == 0 ); #endif end_time = benchmark_timer_read(); put_time( "interrupt entry overhead: returns to preempting task", Interrupt_enter_time, 1, 0, timer_overhead ); put_time( "interrupt exit overhead: returns to preempting task", end_time, 1, 0, 0 ); fflush( stdout ); /* * Switch back to the other task to exit the test. */ _Thread_Dispatch_set_disable_level( 0 ); ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[LOW_PRIORITY]); _Thread_Dispatch_necessary = 1; _Thread_Dispatch(); }
rtems_task Task_1( rtems_task_argument argument ) { Install_tm27_vector( Isr_handler ) ; Interrupt_nest = 0; _Thread_Dispatch_set_disable_level( 0 ); /* Benchmark code */ benchmark_timer_initialize(); /* goes to Isr_handler */ Cause_tm27_intr(); put_time( "Rhealstone: Interrupt Latency", Interrupt_enter_time, 1, /* Only Rhealstone that isn't an average */ timer_overhead, 0 ); puts( "*** END OF RHILATENCY ***" ); rtems_test_exit( 0 ); }
rtems_task Task_1( rtems_task_argument argument ) { Chain_Control *ready_queues; Install_tm27_vector( Isr_handler ); /* * No preempt .. no nesting */ Interrupt_nest = 0; _Thread_Dispatch_set_disable_level( 0 ); Interrupt_occurred = 0; benchmark_timer_initialize(); Cause_tm27_intr(); /* goes to Isr_handler */ #if (MUST_WAIT_FOR_INTERRUPT == 1) while ( Interrupt_occurred == 0 ); #endif Interrupt_return_time = benchmark_timer_read(); put_time( "interrupt entry overhead: returns to interrupted task", Interrupt_enter_time, 1, 0, timer_overhead ); put_time( "interrupt exit overhead: returns to interrupted task", Interrupt_return_time, 1, 0, timer_overhead ); /* * No preempt .. nested */ _Thread_Dispatch_set_disable_level( 1 ); Interrupt_nest = 1; Interrupt_occurred = 0; benchmark_timer_initialize(); Cause_tm27_intr(); /* goes to Isr_handler */ #if (MUST_WAIT_FOR_INTERRUPT == 1) while ( Interrupt_occurred == 0 ); #endif Interrupt_return_time = benchmark_timer_read(); _Thread_Dispatch_set_disable_level( 0 ); put_time( "interrupt entry overhead: returns to nested interrupt", Interrupt_enter_nested_time, 1, 0, 0 ); put_time( "interrupt exit overhead: returns to nested interrupt", Interrupt_return_nested_time, 1, 0, 0 ); /* * Does a preempt .. not nested */ _Thread_Dispatch_set_disable_level( 0 ); ready_queues = (Chain_Control *) _Scheduler.information; _Thread_Executing = (Thread_Control *) _Chain_First(&ready_queues[LOW_PRIORITY]); _Thread_Dispatch_necessary = 1; Interrupt_occurred = 0; benchmark_timer_initialize(); Cause_tm27_intr(); /* * goes to Isr_handler and then returns */ puts( "*** END OF TEST 27 ***" ); rtems_test_exit( 0 ); }
void _Thread_Dispatch_initialization( void ) { _Thread_Dispatch_disable_level = 0; _SMP_lock_spinlock_nested_Initialize(&_Thread_Dispatch_disable_level_lock); _Thread_Dispatch_set_disable_level( 1 ); }
void _Thread_Dispatch( void ) { Thread_Control *executing; Thread_Control *heir; ISR_Level level; #if defined(RTEMS_SMP) /* * WARNING: The SMP sequence has severe defects regarding the real-time * performance. * * Consider the following scenario. We have three tasks L (lowest * priority), M (middle priority), and H (highest priority). Now let a * thread dispatch from M to L happen. An interrupt occurs in * _Thread_Dispatch() here: * * void _Thread_Dispatch( void ) * { * [...] * * post_switch: * * _ISR_Enable( level ); * * <-- INTERRUPT * <-- AFTER INTERRUPT * * _Thread_Unnest_dispatch(); * * _API_extensions_Run_post_switch(); * } * * The interrupt event makes task H ready. The interrupt code will see * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a * _Thread_Dispatch(). Now we return to position "AFTER INTERRUPT". This * means task L executes now although task H is ready! Task H will execute * once someone calls _Thread_Dispatch(). */ _Thread_Disable_dispatch(); /* * If necessary, send dispatch request to other cores. */ _SMP_Request_other_cores_to_dispatch(); #endif /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = _Thread_Executing; _ISR_Disable( level ); while ( _Thread_Dispatch_necessary == true ) { heir = _Thread_Heir; #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 1 ); #endif _Thread_Dispatch_necessary = false; _Thread_Executing = heir; /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = _Thread_Ticks_per_timeslice; _ISR_Enable( level ); #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ { Timestamp_Control uptime, ran; _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &_Thread_Time_of_last_context_switch, &uptime, &ran ); _Timestamp_Add_to( &executing->cpu_time_used, &ran ); _Thread_Time_of_last_context_switch = uptime; } #else { _TOD_Get_uptime( &_Thread_Time_of_last_context_switch ); heir->cpu_time_used++; } #endif /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif executing = _Thread_Executing; _ISR_Disable( level ); } post_switch: #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 0 ); #endif _ISR_Enable( level ); #ifdef RTEMS_SMP _Thread_Unnest_dispatch(); #endif _API_extensions_Run_post_switch( executing ); }
void complete_test( void ) { uint32_t index; rtems_id task_id; benchmark_timer_initialize(); _Thread_Resume( Middle_tcb ); thread_resume_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Unblock( Middle_tcb ); thread_unblock_time = benchmark_timer_read(); _Thread_Set_state( Middle_tcb, STATES_WAITING_FOR_MESSAGE ); benchmark_timer_initialize(); _Thread_Ready( Middle_tcb ); thread_ready_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) benchmark_timer_empty_function(); overhead = benchmark_timer_read(); task_id = Middle_tcb->Object.id; benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Thread_Get( task_id, &location ); thread_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Semaphore_Get( Semaphore_id, &location ); semaphore_get_time = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) _Thread_Get( 0x3, &location ); thread_get_invalid_time = benchmark_timer_read(); /* * This is the running task and we have tricked RTEMS out enough where * we need to set some internal tracking information to match this. */ _Thread_Heir = _Thread_Executing; _Thread_Dispatch_necessary = false; _Thread_Dispatch_set_disable_level( 0 ); /* * Now dump all the times */ put_time( "_ISR_Disable", isr_disable_time, 1, 0, 0 ); put_time( "_ISR_Flash", isr_flash_time, 1, 0, 0 ); put_time( "_ISR_Enable", isr_enable_time, 1, 0, 0 ); put_time( "_Thread_Disable_dispatch", thread_disable_dispatch_time, 1, 0, 0 ); put_time( "_Thread_Enable_dispatch", thread_enable_dispatch_time, 1, 0, 0 ); put_time( "_Thread_Set_state", thread_set_state_time, 1, 0, 0 ); put_time( "_Thread_Dispatch (NO FP)", thread_dispatch_no_fp_time, 1, 0, 0 ); put_time( "context switch: no floating point contexts", context_switch_no_fp_time, 1, 0, 0 ); put_time( "context switch: self", context_switch_self_time, 1, 0, 0 ); put_time( "context switch: to another task", context_switch_another_task_time, 1, 0, 0 ); #if (CPU_HARDWARE_FP == 1) || (CPU_SOFTWARE_FP == 1) put_time( "fp context switch: restore 1st FP task", context_switch_restore_1st_fp_time, 1, 0, 0 ); put_time( "fp context switch: save idle, restore initialized", context_switch_save_idle_restore_initted_time, 1, 0, 0 ); put_time( "fp context switch: save idle, restore idle", context_switch_save_restore_idle_time, 1, 0, 0 ); put_time( "fp context switch: save initialized, restore initialized", context_switch_save_restore_initted_time, 1, 0, 0 ); #else puts( "fp context switch: restore 1st FP task - NA" ); puts( "fp context switch: save idle, restore initialized - NA" ); puts( "fp context switch: save idle, restore idle - NA" ); puts( "fp context switch: save initialized, restore initialized - NA" ); #endif put_time( "_Thread_Resume", thread_resume_time, 1, 0, 0 ); put_time( "_Thread_Unblock", thread_unblock_time, 1, 0, 0 ); put_time( "_Thread_Ready", thread_ready_time, 1, 0, 0 ); put_time( "_Thread_Get", thread_get_time, OPERATION_COUNT, 0, 0 ); put_time( "_Semaphore_Get", semaphore_get_time, OPERATION_COUNT, 0, 0 ); put_time( "_Thread_Get: invalid id", thread_get_invalid_time, OPERATION_COUNT, 0, 0 ); puts( "*** END OF TEST 26 ***" ); rtems_test_exit( 0 ); }