void _Rate_monotonic_Initiate_statistics( Rate_monotonic_Control *the_period ) { Thread_Control *owning_thread = the_period->owner; /* * If using nanosecond statistics, we need to obtain the uptime. */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ Timestamp_Control uptime; _TOD_Get_uptime( &uptime ); #endif /* * Set the starting point and the CPU time used for the statistics. */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ the_period->time_period_initiated = uptime; #else the_period->time_period_initiated = _Watchdog_Ticks_since_boot; #endif the_period->cpu_usage_period_initiated = owning_thread->cpu_time_used; /* * If using nanosecond statistics and the period's thread is currently * executing, then we need to take into account how much time the * executing thread has run since the last context switch. When this * routine is invoked from rtems_rate_monotonic_period, the owner will * be the executing thread. When this routine is invoked from * _Rate_monotonic_Timeout, it will not. */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ if (owning_thread == _Thread_Executing) { Timestamp_Control ran; /* * Adjust the CPU time used to account for the time since last * context switch. */ _Timestamp_Subtract( &_Thread_Time_of_last_context_switch, &uptime, &ran ); _Timestamp_Add_to( &the_period->cpu_usage_period_initiated, &ran ); } #endif _Scheduler_Release_job( _Scheduler_Get( the_period->owner ), the_period->owner, the_period->next_length ); }
/* * rtems_cpu_usage_reset */ void rtems_cpu_usage_reset( void ) { #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); _Thread_Time_of_last_context_switch = CPU_usage_Uptime_at_last_reset; #else CPU_usage_Ticks_at_last_reset = _Watchdog_Ticks_since_boot; #endif rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler); }
/* * rtems_cpu_usage_reset */ void rtems_cpu_usage_reset( void ) { uint32_t cpu_count; uint32_t cpu_index; _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); cpu_count = rtems_get_processor_count(); for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); cpu->cpu_usage_timestamp = CPU_usage_Uptime_at_last_reset; } rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler); }
void _Rate_monotonic_Restart( Rate_monotonic_Control *the_period, Thread_Control *owner, ISR_lock_Context *lock_context ) { /* * Set the starting point and the CPU time used for the statistics. */ _TOD_Get_uptime( &the_period->time_period_initiated ); _Thread_Get_CPU_time_used( owner, &the_period->cpu_usage_period_initiated ); _Rate_monotonic_Release_job( the_period, owner, the_period->next_length, lock_context ); }
/* * rtems_cpu_usage_reset */ void rtems_cpu_usage_reset( void ) { #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ uint32_t processor_count; uint32_t processor; _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); processor_count = rtems_smp_get_processor_count(); for ( processor = 0 ; processor < processor_count ; ++processor ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor ); per_cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset; } #else CPU_usage_Ticks_at_last_reset = _Watchdog_Ticks_since_boot; #endif rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler); }
bool _Rate_monotonic_Get_status( const Rate_monotonic_Control *the_period, Timestamp_Control *wall_since_last_period, Timestamp_Control *cpu_since_last_period ) { Timestamp_Control uptime; Thread_Control *owning_thread = the_period->owner; Timestamp_Control used; /* * Determine elapsed wall time since period initiated. */ _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &the_period->time_period_initiated, &uptime, wall_since_last_period ); /* * Determine cpu usage since period initiated. */ _Thread_Get_CPU_time_used( owning_thread, &used ); /* * The cpu usage info was reset while executing. Can't * determine a status. */ if ( _Timestamp_Less_than( &used, &the_period->cpu_usage_period_initiated ) ) return false; /* used = current cpu usage - cpu usage at start of period */ _Timestamp_Subtract( &the_period->cpu_usage_period_initiated, &used, cpu_since_last_period ); return true; }
bool _Rate_monotonic_Get_status( Rate_monotonic_Control *the_period, Rate_monotonic_Period_time_t *wall_since_last_period, Thread_CPU_usage_t *cpu_since_last_period ) { #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ Timestamp_Control uptime; #endif Thread_Control *owning_thread = the_period->owner; Thread_CPU_usage_t used; /* * Determine elapsed wall time since period initiated. */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &the_period->time_period_initiated, &uptime, wall_since_last_period ); #else *wall_since_last_period = _Watchdog_Ticks_since_boot - the_period->time_period_initiated; #endif /* * Determine cpu usage since period initiated. */ used = owning_thread->cpu_time_used; #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ if (owning_thread == _Thread_Executing) { Thread_CPU_usage_t ran; /* How much time time since last context switch */ _Timestamp_Subtract( &_Thread_Time_of_last_context_switch, &uptime, &ran ); /* cpu usage += ran */ _Timestamp_Add_to( &used, &ran ); /* * The cpu usage info was reset while executing. Can't * determine a status. */ if (_Timestamp_Less_than(&used, &the_period->cpu_usage_period_initiated)) return false; /* used = current cpu usage - cpu usage at start of period */ _Timestamp_Subtract( &the_period->cpu_usage_period_initiated, &used, cpu_since_last_period ); } #else /* * The cpu usage info was reset while executing. Can't * determine a status. */ if (used < the_period->cpu_usage_period_initiated) return false; *cpu_since_last_period = used - the_period->cpu_usage_period_initiated; #endif return true; }
/* * rtems_cpu_usage_report */ void rtems_cpu_usage_report_with_plugin( void *context, rtems_printk_plugin_t print ) { uint32_t i; uint32_t api_index; Thread_Control *the_thread; Objects_Information *information; char name[13]; uint32_t ival, fval; #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ Timestamp_Control uptime, total, ran, uptime_at_last_reset; uint32_t seconds, nanoseconds; #else uint32_t total_units = 0; #endif if ( !print ) return; /* * When not using nanosecond CPU usage resolution, we have to count * the number of "ticks" we gave credit for to give the user a rough * guideline as to what each number means proportionally. */ #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Timestamp_Set_to_zero( &total ); uptime_at_last_reset = CPU_usage_Uptime_at_last_reset; #else for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) { #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG) if ( !_Objects_Information_table[ api_index ] ) continue; #endif information = _Objects_Information_table[ api_index ][ 1 ]; if ( information ) { for ( i=1 ; i <= information->maximum ; i++ ) { the_thread = (Thread_Control *)information->local_table[ i ]; if ( the_thread ) total_units += the_thread->cpu_time_used; } } } #endif (*print)( context, "-------------------------------------------------------------------------------\n" " CPU USAGE BY THREAD\n" "------------+----------------------------------------+---------------+---------\n" #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ " ID | NAME | SECONDS | PERCENT\n" #else " ID | NAME | TICKS | PERCENT\n" #endif "------------+----------------------------------------+---------------+---------\n" ); for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) { #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG) if ( !_Objects_Information_table[ api_index ] ) continue; #endif information = _Objects_Information_table[ api_index ][ 1 ]; if ( information ) { for ( i=1 ; i <= information->maximum ; i++ ) { the_thread = (Thread_Control *)information->local_table[ i ]; if ( !the_thread ) continue; rtems_object_get_name( the_thread->Object.id, sizeof(name), name ); (*print)( context, " 0x%08" PRIx32 " | %-38s |", the_thread->Object.id, name ); #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ { Timestamp_Control last; /* * If this is the currently executing thread, account for time * since the last context switch. */ ran = the_thread->cpu_time_used; if ( is_executing_on_a_core( the_thread, &last ) ) { Timestamp_Control used; _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &last, &uptime, &used ); _Timestamp_Add_to( &ran, &used ); } else { _TOD_Get_uptime( &uptime ); } _Timestamp_Subtract( &uptime_at_last_reset, &uptime, &total ); _Timestamp_Divide( &ran, &total, &ival, &fval ); /* * Print the information */ seconds = _Timestamp_Get_seconds( &ran ); nanoseconds = _Timestamp_Get_nanoseconds( &ran ) / TOD_NANOSECONDS_PER_MICROSECOND; (*print)( context, "%7" PRIu32 ".%06" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n", seconds, nanoseconds, ival, fval ); } #else if (total_units) { uint64_t ival_64; ival_64 = the_thread->cpu_time_used; ival_64 *= 100000; ival = ival_64 / total_units; } else { ival = 0; } fval = ival % 1000; ival /= 1000; (*print)( context, "%14" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n", the_thread->cpu_time_used, ival, fval ); #endif } } } #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ seconds = _Timestamp_Get_seconds( &total ); nanoseconds = _Timestamp_Get_nanoseconds( &total ) / TOD_NANOSECONDS_PER_MICROSECOND; (*print)( context, "------------+----------------------------------------+---------------+---------\n" " TIME SINCE LAST CPU USAGE RESET IN SECONDS: %7" PRIu32 ".%06" PRIu32 "\n" "-------------------------------------------------------------------------------\n", seconds, nanoseconds ); #else (*print)( context, "------------+----------------------------------------+---------------+---------\n" " TICKS SINCE LAST SYSTEM RESET: %14" PRIu32 "\n" " TOTAL UNITS: %14" PRIu32 "\n" "-------------------------------------------------------------------------------\n", _Watchdog_Ticks_since_boot - CPU_usage_Ticks_at_last_reset, total_units ); #endif }
void _Thread_Dispatch( void ) { Per_CPU_Control *cpu_self; Thread_Control *executing; ISR_Level level; #if defined( RTEMS_SMP ) /* * On SMP the complete context switch must be atomic with respect to one * processor. See also _Thread_Handler() since _Context_switch() may branch * to this function. */ _ISR_Disable_without_giant( level ); #endif cpu_self = _Per_CPU_Get(); _Assert( cpu_self->thread_dispatch_disable_level == 0 ); _Profiling_Thread_dispatch_disable( cpu_self, 0 ); cpu_self->thread_dispatch_disable_level = 1; /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = cpu_self->executing; #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif #if defined( RTEMS_SMP ) if ( cpu_self->dispatch_necessary ) { #else while ( cpu_self->dispatch_necessary ) { #endif Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); #if !defined( RTEMS_SMP ) _ISR_Enable( level ); #endif #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, &cpu_self->time_of_last_context_switch ); #else { _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif #if !defined(__DYNAMIC_REENT__) /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } #endif _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif /* * We have to obtain this value again after the context switch since the * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ cpu_self = _Per_CPU_Get(); _Thread_Debug_set_real_processor( executing, cpu_self ); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif } post_switch: _Assert( cpu_self->thread_dispatch_disable_level == 1 ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); _ISR_Enable_without_giant( level ); _Thread_Run_post_switch_actions( executing ); }
void _Thread_Dispatch( void ) { Thread_Control *executing; Thread_Control *heir; ISR_Level level; #if defined(RTEMS_SMP) /* * WARNING: The SMP sequence has severe defects regarding the real-time * performance. * * Consider the following scenario. We have three tasks L (lowest * priority), M (middle priority), and H (highest priority). Now let a * thread dispatch from M to L happen. An interrupt occurs in * _Thread_Dispatch() here: * * void _Thread_Dispatch( void ) * { * [...] * * post_switch: * * _ISR_Enable( level ); * * <-- INTERRUPT * <-- AFTER INTERRUPT * * _Thread_Unnest_dispatch(); * * _API_extensions_Run_post_switch(); * } * * The interrupt event makes task H ready. The interrupt code will see * _Thread_Dispatch_disable_level > 0 and thus doesn't perform a * _Thread_Dispatch(). Now we return to position "AFTER INTERRUPT". This * means task L executes now although task H is ready! Task H will execute * once someone calls _Thread_Dispatch(). */ _Thread_Disable_dispatch(); /* * If necessary, send dispatch request to other cores. */ _SMP_Request_other_cores_to_dispatch(); #endif /* * Now determine if we need to perform a dispatch on the current CPU. */ executing = _Thread_Executing; _ISR_Disable( level ); while ( _Thread_Dispatch_necessary == true ) { heir = _Thread_Heir; #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 1 ); #endif _Thread_Dispatch_necessary = false; _Thread_Executing = heir; /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = _Thread_Ticks_per_timeslice; _ISR_Enable( level ); #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ { Timestamp_Control uptime, ran; _TOD_Get_uptime( &uptime ); _Timestamp_Subtract( &_Thread_Time_of_last_context_switch, &uptime, &ran ); _Timestamp_Add_to( &executing->cpu_time_used, &ran ); _Thread_Time_of_last_context_switch = uptime; } #else { _TOD_Get_uptime( &_Thread_Time_of_last_context_switch ); heir->cpu_time_used++; } #endif /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } _User_extensions_Thread_switch( executing, heir ); /* * If the CPU has hardware floating point, then we must address saving * and restoring it as part of the context switch. * * The second conditional compilation section selects the algorithm used * to context switch between floating point tasks. The deferred algorithm * can be significantly better in a system with few floating point tasks * because it reduces the total number of save and restore FP context * operations. However, this algorithm can not be used on all CPUs due * to unpredictable use of FP registers by some compilers for integer * operations. */ #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH != TRUE ) if ( executing->fp_context != NULL ) _Context_Save_fp( &executing->fp_context ); #endif #endif _Context_Switch( &executing->Registers, &heir->Registers ); #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE ) #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE ) if ( (executing->fp_context != NULL) && !_Thread_Is_allocated_fp( executing ) ) { if ( _Thread_Allocated_fp != NULL ) _Context_Save_fp( &_Thread_Allocated_fp->fp_context ); _Context_Restore_fp( &executing->fp_context ); _Thread_Allocated_fp = executing; } #else if ( executing->fp_context != NULL ) _Context_Restore_fp( &executing->fp_context ); #endif #endif executing = _Thread_Executing; _ISR_Disable( level ); } post_switch: #ifndef RTEMS_SMP _Thread_Dispatch_set_disable_level( 0 ); #endif _ISR_Enable( level ); #ifdef RTEMS_SMP _Thread_Unnest_dispatch(); #endif _API_extensions_Run_post_switch( executing ); }
void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ) { Thread_Control *executing; _Assert( cpu_self->thread_dispatch_disable_level == 1 ); executing = cpu_self->executing; do { Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); /* * When the heir and executing are the same, then we are being * requested to do the post switch dispatching. This is normally * done to dispatch signals. */ if ( heir == executing ) goto post_switch; /* * Since heir and executing are not the same, we need to do a real * context switch. */ #if __RTEMS_ADA__ executing->rtems_ada_self = rtems_ada_self; rtems_ada_self = heir->rtems_ada_self; #endif if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE ) heir->cpu_time_budget = rtems_configuration_get_ticks_per_timeslice(); /* * On SMP the complete context switch must be atomic with respect to one * processor. See also _Thread_Handler() since _Context_switch() may branch * to this function. */ #if !defined( RTEMS_SMP ) _ISR_Enable( level ); #endif #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ _Thread_Update_cpu_time_used( executing, &cpu_self->time_of_last_context_switch ); #else { _TOD_Get_uptime( &cpu_self->time_of_last_context_switch ); heir->cpu_time_used++; } #endif #if !defined(__DYNAMIC_REENT__) /* * Switch libc's task specific data. */ if ( _Thread_libc_reent ) { executing->libc_reent = *_Thread_libc_reent; *_Thread_libc_reent = heir->libc_reent; } #endif _User_extensions_Thread_switch( executing, heir ); _Thread_Save_fp( executing ); _Context_Switch( &executing->Registers, &heir->Registers ); _Thread_Restore_fp( executing ); /* * We have to obtain this value again after the context switch since the * heir thread may have migrated from another processor. Values from the * stack or non-volatile registers reflect the old execution environment. */ cpu_self = _Per_CPU_Get(); _Thread_Debug_set_real_processor( executing, cpu_self ); #if !defined( RTEMS_SMP ) _ISR_Disable( level ); #endif } while ( #if defined( RTEMS_SMP ) false #else cpu_self->dispatch_necessary #endif ); post_switch: _Assert( cpu_self->thread_dispatch_disable_level == 1 ); cpu_self->thread_dispatch_disable_level = 0; _Profiling_Thread_dispatch_enable( cpu_self, 0 ); _ISR_Enable_without_giant( level ); _Thread_Run_post_switch_actions( executing ); }
static void rtems_cpuusage_top_thread (rtems_task_argument arg) { rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; char name[13]; int i; Heap_Information_block wksp; uint32_t ival, fval; int task_count; rtems_event_set out; rtems_status_code sc; bool first_time = true; data->thread_active = true; _TOD_Get_uptime(&data->last_uptime); CPU_usage_Set_to_zero(&data->zero); while (data->thread_run) { Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset; size_t tasks_size; size_t usage_size; Timestamp_Control load; data->task_count = 0; rtems_iterate_over_all_threads_2(task_counter, data); tasks_size = sizeof(Thread_Control*) * (data->task_count + 1); usage_size = sizeof(Timestamp_Control) * (data->task_count + 1); if (data->task_count > data->task_size) { data->tasks = realloc(data->tasks, tasks_size); data->usage = realloc(data->usage, usage_size); data->current_usage = realloc(data->current_usage, usage_size); if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL)) { rtems_printf(data->printer, "top worker: error: no memory\n"); data->thread_run = false; break; } } memset(data->tasks, 0, tasks_size); memset(data->usage, 0, usage_size); memset(data->current_usage, 0, usage_size); _Timestamp_Set_to_zero(&data->total); _Timestamp_Set_to_zero(&data->current); data->stack_size = 0; _TOD_Get_uptime(&data->uptime); _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime); _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period); data->last_uptime = data->uptime; rtems_iterate_over_all_threads_2(task_usage, data); if (data->task_count > data->task_size) { data->last_tasks = realloc(data->last_tasks, tasks_size); data->last_usage = realloc(data->last_usage, usage_size); if ((data->last_tasks == NULL) || (data->last_usage == NULL)) { rtems_printf(data->printer, "top worker: error: no memory\n"); data->thread_run = false; break; } data->task_size = data->task_count; } memcpy(data->last_tasks, data->tasks, tasks_size); memcpy(data->last_usage, data->usage, usage_size); data->last_task_count = data->task_count; /* * We need to loop again to get suitable current usage values as we need a * last sample to work. */ if (first_time) { rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500)); first_time = false; continue; } _Protected_heap_Get_information(&_Workspace_Area, &wksp); if (data->single_page) rtems_printf(data->printer, "\x1b[H\x1b[J" " ENTER:Exit SPACE:Refresh" " S:Scroll A:All <>:Order +/-:Lines\n"); rtems_printf(data->printer, "\n"); /* * Uptime and period of this sample. */ rtems_printf(data->printer, "Uptime: "); print_time(data, &data->uptime, 20); rtems_printf(data->printer, " Period: "); print_time(data, &data->period, 20); /* * Task count, load and idle levels. */ rtems_printf(data->printer, "\nTasks: %4i ", data->task_count); _Timestamp_Subtract(&data->idle, &data->total, &load); _Timestamp_Divide(&load, &data->uptime, &ival, &fval); rtems_printf(data->printer, "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); _Timestamp_Subtract(&data->current_idle, &data->current, &load); _Timestamp_Divide(&load, &data->period, &ival, &fval); rtems_printf(data->printer, " Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval); rtems_printf(data->printer, " Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); /* * Memory usage. */ if (rtems_configuration_get_unified_work_area()) { rtems_printf(data->printer, "\nMem: "); print_memsize(data, wksp.Free.total, "free"); print_memsize(data, wksp.Used.total, "used"); } else { region_information_block libc_heap; malloc_info(&libc_heap); rtems_printf(data->printer, "\nMem: Wksp: "); print_memsize(data, wksp.Free.total, "free"); print_memsize(data, wksp.Used.total, "used Heap: "); print_memsize(data, libc_heap.Free.total, "free"); print_memsize(data, libc_heap.Used.total, "used"); } print_memsize(data, data->stack_size, "stack\n"); rtems_printf(data->printer, "\n" " ID | NAME | RPRI | CPRI | TIME | TOTAL | CURRENT\n" "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n", data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--", data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--", data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--", data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--", data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--" ); task_count = 0; for (i = 0; i < data->task_count; i++) { Thread_Control* thread = data->tasks[i]; Timestamp_Control usage; Timestamp_Control current_usage; if (thread == NULL) break; if (data->single_page && (data->show != 0) && (i >= data->show)) break; /* * We need to count the number displayed to clear the remainder of the * the display. */ ++task_count; /* * If the API os POSIX print the entry point. */ rtems_object_get_name(thread->Object.id, sizeof(name), name); if (name[0] == '\0') snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry); rtems_printf(data->printer, " 0x%08" PRIx32 " | %-19s | %3" PRId64 " | %3" PRId64 " | ", thread->Object.id, name, thread->Real_priority.priority, _Thread_Get_priority(thread)); usage = data->usage[i]; current_usage = data->current_usage[i]; /* * Print the information */ print_time(data, &usage, 19); _Timestamp_Divide(&usage, &data->total, &ival, &fval); rtems_printf(data->printer, " |%4" PRIu32 ".%03" PRIu32, ival, fval); _Timestamp_Divide(¤t_usage, &data->period, &ival, &fval); rtems_printf(data->printer, " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval); } if (data->single_page && (data->show != 0) && (task_count < data->show)) { i = data->show - task_count; while (i > 0) { rtems_printf(data->printer, "\x1b[K\n"); i--; } } sc = rtems_event_receive(RTEMS_EVENT_1, RTEMS_EVENT_ANY, RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs), &out); if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT)) { rtems_printf(data->printer, "error: event receive: %s\n", rtems_status_text(sc)); break; } } free(data->tasks); free(data->last_tasks); free(data->last_usage); free(data->current_usage); data->thread_active = false; rtems_task_delete (RTEMS_SELF); }