static void delay_clock_tick(test_context *ctx) { rtems_interrupt_level level; const Per_CPU_Control *cpu_self = _Per_CPU_Get_by_index(0); const Per_CPU_Control *cpu_other = _Per_CPU_Get_by_index(1); uint64_t ticks; rtems_test_assert(rtems_get_current_processor() == 0); rtems_test_spin_until_next_tick(); ticks = cpu_self->Watchdog.ticks; rtems_interrupt_local_disable(level); /* (A) */ wait(ctx, &ctx->delay_barrier_state); /* (B) */ wait(ctx, &ctx->delay_barrier_state); rtems_test_assert(cpu_self->Watchdog.ticks == ticks); rtems_test_assert(cpu_other->Watchdog.ticks == ticks + 1); rtems_interrupt_local_enable(level); rtems_test_assert(cpu_self->Watchdog.ticks == ticks + 1); rtems_test_assert(cpu_other->Watchdog.ticks == ticks + 1); /* (C) */ wait(ctx, &ctx->delay_barrier_state); }
static bool is_per_cpu_state_ok(void) { bool ok = true; uint32_t n = rtems_smp_get_processor_count(); uint32_t i; for (i = 0; i < n; ++i) { const Thread_Control *thread = _Per_CPU_Get_by_index(i)->executing; uint32_t count = 0; uint32_t j; for (j = 0; j < n; ++j) { const Per_CPU_Control *cpu = _Per_CPU_Get_by_index(j); const Thread_Control *executing = cpu->executing; const Thread_Control *heir = cpu->heir; if (i != j) { count += executing == thread; count += heir == thread; } else { ++count; } ok = ok && executing->cpu == cpu; ok = ok && heir->cpu == cpu; } ok = ok && (count == 1); } return ok; }
bool _Per_CPU_State_wait_for_non_initial_state( uint32_t cpu_index, uint32_t timeout_in_ns ) { const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); Per_CPU_State state = cpu->state; if ( timeout_in_ns > 0 ) { rtems_counter_ticks ticks = rtems_counter_nanoseconds_to_ticks( timeout_in_ns ); rtems_counter_ticks a = rtems_counter_read(); rtems_counter_ticks delta = 0; while ( ticks > delta && state == PER_CPU_STATE_INITIAL ) { rtems_counter_ticks b; _CPU_SMP_Processor_event_receive(); state = cpu->state; ticks -= delta; b = rtems_counter_read(); delta = rtems_counter_difference( b, a ); a = b; } } else { while ( state == PER_CPU_STATE_INITIAL ) { _CPU_SMP_Processor_event_receive(); state = cpu->state; } } return state != PER_CPU_STATE_INITIAL; }
static void fatal_extension( rtems_fatal_source source, bool is_internal, rtems_fatal_code code ) { if ( source == RTEMS_FATAL_SOURCE_APPLICATION || source == RTEMS_FATAL_SOURCE_SMP ) { uint32_t self = rtems_smp_get_current_processor(); assert(!is_internal); if (self == main_cpu) { uint32_t cpu; assert(source == RTEMS_FATAL_SOURCE_APPLICATION); assert(code == 0xdeadbeef); for (cpu = 0; cpu < MAX_CPUS; ++cpu) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); Per_CPU_State state = per_cpu->state; assert(state == PER_CPU_STATE_SHUTDOWN); } end_of_test(); } else { assert(source == RTEMS_FATAL_SOURCE_SMP); assert(code == SMP_FATAL_SHUTDOWN); } } }
static void reset(test_context *ctx) { rtems_status_code sc; size_t i; for (i = 0; i < TASK_COUNT; ++i) { set_priority(ctx->task_ids[i], P(i)); set_affinity(ctx->task_ids[i], A(1, 1)); } for (i = CPU_COUNT; i < TASK_COUNT; ++i) { sc = rtems_task_suspend(ctx->task_ids[i]); rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_ALREADY_SUSPENDED); } for (i = 0; i < CPU_COUNT; ++i) { sc = rtems_task_resume(ctx->task_ids[i]); rtems_test_assert(sc == RTEMS_SUCCESSFUL || sc == RTEMS_INCORRECT_STATE); } /* Order the idle threads explicitly */ for (i = 0; i < CPU_COUNT; ++i) { const Per_CPU_Control *c; const Thread_Control *h; c = _Per_CPU_Get_by_index(CPU_COUNT - 1 - i); h = c->heir; sc = rtems_task_suspend(h->Object.id); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } }
rtems_status_code rtems_scheduler_ident_by_processor( uint32_t cpu_index, rtems_id *id ) { const Scheduler_Control *scheduler; if ( id == NULL ) { return RTEMS_INVALID_ADDRESS; } if ( cpu_index >= _SMP_Get_processor_count() ) { return RTEMS_INVALID_NAME; } scheduler = _Scheduler_Get_by_CPU( _Per_CPU_Get_by_index( cpu_index ) ); #if defined(RTEMS_SMP) if ( scheduler == NULL ) { return RTEMS_INCORRECT_STATE; } #else _Assert( scheduler != NULL ); #endif *id = _Scheduler_Build_id( _Scheduler_Get_index( scheduler ) ); return RTEMS_SUCCESSFUL; }
uint32_t _Thread_Dispatch_increment_disable_level( void ) { Giant_Control *giant = &_Giant; ISR_Level isr_level; uint32_t self_cpu_index; uint32_t disable_level; Per_CPU_Control *self_cpu; _ISR_Disable( isr_level ); /* * We must obtain the processor ID after interrupts are disabled to prevent * thread migration. */ self_cpu_index = _SMP_Get_current_processor(); _Giant_Do_acquire( self_cpu_index ); self_cpu = _Per_CPU_Get_by_index( self_cpu_index ); disable_level = self_cpu->thread_dispatch_disable_level; ++disable_level; self_cpu->thread_dispatch_disable_level = disable_level; _ISR_Enable( isr_level ); return disable_level; }
void _TOD_Set( const Timestamp_Control *tod_as_timestamp, ISR_lock_Context *lock_context ) { struct timespec tod_as_timespec; uint64_t tod_as_ticks; uint32_t cpu_count; uint32_t cpu_index; _Assert( _API_Mutex_Is_owner( _Once_Mutex ) ); _Timecounter_Set_clock( tod_as_timestamp, lock_context ); _Timestamp_To_timespec( tod_as_timestamp, &tod_as_timespec ); tod_as_ticks = _Watchdog_Ticks_from_timespec( &tod_as_timespec ); cpu_count = _SMP_Get_processor_count(); for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); _Watchdog_Per_CPU_tickle_absolute( cpu, tod_as_ticks ); } _TOD.is_set = true; }
void _SMP_Send_message( uint32_t cpu_index, unsigned long message ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); _Atomic_Fetch_or_ulong( &cpu->message, message, ATOMIC_ORDER_RELEASE ); _CPU_SMP_Send_interrupt( cpu_index ); }
void _ISR_Handler_initialization( void ) { _ISR_Nest_level = 0; #if (CPU_SIMPLE_VECTORED_INTERRUPTS == TRUE) _ISR_Vector_table = _Workspace_Allocate_or_fatal_error( sizeof(ISR_Handler_entry) * ISR_NUMBER_OF_VECTORS ); _CPU_Initialize_vectors(); #endif #if ( CPU_ALLOCATE_INTERRUPT_STACK == TRUE ) { size_t stack_size = rtems_configuration_get_interrupt_stack_size(); uint32_t max_cpus = rtems_configuration_get_maximum_processors(); uint32_t cpu; if ( !_Stack_Is_enough( stack_size ) ) _Terminate( INTERNAL_ERROR_CORE, true, INTERNAL_ERROR_INTERRUPT_STACK_TOO_SMALL ); for ( cpu = 0 ; cpu < max_cpus; ++cpu ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); void *low = _Workspace_Allocate_or_fatal_error( stack_size ); void *high = _Addresses_Add_offset( low, stack_size ); #if (CPU_STACK_ALIGNMENT != 0) high = _Addresses_Align_down( high, CPU_STACK_ALIGNMENT ); #endif per_cpu->interrupt_stack_low = low; per_cpu->interrupt_stack_high = high; /* * Interrupt stack might have to be aligned and/or setup in a specific * way. Do not use the local low or high variables here since * _CPU_Interrupt_stack_setup() is a nasty macro that might want to play * with the real memory locations. */ #if defined(_CPU_Interrupt_stack_setup) _CPU_Interrupt_stack_setup( per_cpu->interrupt_stack_low, per_cpu->interrupt_stack_high ); #endif } } #endif #if ( CPU_HAS_HARDWARE_INTERRUPT_STACK == TRUE ) _CPU_Install_interrupt_stack(); #endif }
void _SMP_Handler_initialize(void) { uint32_t max_cpus = rtems_configuration_get_maximum_processors(); uint32_t cpu; /* * Initialize per cpu pointer table */ _Per_CPU_Information_p[0] = _Per_CPU_Get_by_index( 0 ); for ( cpu = 1 ; cpu < max_cpus; ++cpu ) { Per_CPU_Control *p = _Per_CPU_Get_by_index( cpu ); _Per_CPU_Information_p[cpu] = p; #if CPU_ALLOCATE_INTERRUPT_STACK == TRUE { size_t size = rtems_configuration_get_interrupt_stack_size(); uintptr_t ptr; p->interrupt_stack_low = _Workspace_Allocate_or_fatal_error( size ); ptr = (uintptr_t) _Addresses_Add_offset( p->interrupt_stack_low, size ); ptr &= ~(CPU_STACK_ALIGNMENT - 1); p->interrupt_stack_high = (void *)ptr; } #endif } /* * Discover and initialize the secondary cores in an SMP system. */ max_cpus = bsp_smp_initialize( max_cpus ); _SMP_Processor_count = max_cpus; for ( cpu = 1 ; cpu < max_cpus; ++cpu ) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); _Per_CPU_Wait_for_state( per_cpu, PER_CPU_STATE_READY_TO_BEGIN_MULTITASKING ); } }
void _Scheduler_default_Tick( Scheduler_Control *scheduler ) { uint32_t processor_count = _SMP_Get_processor_count(); uint32_t processor; for ( processor = 0 ; processor < processor_count ; ++processor ) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor ); _Scheduler_default_Tick_for_executing( scheduler, per_cpu->executing ); } }
static void release_processor(uboot_spin_table *spin_table, uint32_t cpu_index) { const Per_CPU_Control *cpu = _Per_CPU_Get_by_index(cpu_index); spin_table->pir = cpu_index; spin_table->r3_lower = (uint32_t) cpu->interrupt_stack_high; spin_table->addr_upper = 0; rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table)); ppc_synchronize_data(); spin_table->addr_lower = (uint32_t) _start_secondary_processor; rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table)); }
void _Assert_Thread_dispatching_repressed( void ) { bool dispatch_is_disabled; ISR_Level level; Per_CPU_Control *per_cpu; _ISR_Disable( level ); per_cpu = _Per_CPU_Get_by_index( _SMP_Get_current_processor() ); dispatch_is_disabled = per_cpu->thread_dispatch_disable_level != 0; _ISR_Enable( level ); _Assert( dispatch_is_disabled || _ISR_Get_level() != 0 ); }
void _Thread_Create_idle( void ) { uint32_t cpu_count = _SMP_Get_processor_count(); uint32_t cpu_index; for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_started( cpu ) ) { _Thread_Create_idle_for_cpu( cpu ); } } }
static void _SMP_Start_processors( uint32_t cpu_count ) { uint32_t cpu_index_self; uint32_t cpu_index; cpu_index_self = _SMP_Get_current_processor(); for ( cpu_index = 0 ; cpu_index < cpu_count; ++cpu_index ) { const Scheduler_Assignment *assignment; Per_CPU_Control *cpu; bool started; assignment = _Scheduler_Get_initial_assignment( cpu_index ); cpu = _Per_CPU_Get_by_index( cpu_index ); if ( cpu_index != cpu_index_self ) { if ( _Scheduler_Should_start_processor( assignment ) ) { started = _CPU_SMP_Start_processor( cpu_index ); if ( !started && _Scheduler_Is_mandatory_processor( assignment ) ) { _SMP_Fatal( SMP_FATAL_START_OF_MANDATORY_PROCESSOR_FAILED ); } } else { started = false; } } else { started = true; cpu->boot = true; if ( !_Scheduler_Should_start_processor( assignment ) ) { _SMP_Fatal( SMP_FATAL_BOOT_PROCESSOR_NOT_ASSIGNED_TO_SCHEDULER ); } } cpu->online = started; if ( started ) { const Scheduler_Control *scheduler; Scheduler_Context *context; scheduler = assignment->scheduler; context = _Scheduler_Get_context( scheduler ); _Processor_mask_Set( &_SMP_Online_processors, cpu_index ); _Processor_mask_Set( &context->Processors, cpu_index ); cpu->Scheduler.control = scheduler; cpu->Scheduler.context = context; } } }
/* * rtems_cpu_usage_reset */ void rtems_cpu_usage_reset( void ) { uint32_t cpu_count; uint32_t cpu_index; _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); cpu_count = rtems_get_processor_count(); for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); cpu->cpu_usage_timestamp = CPU_usage_Uptime_at_last_reset; } rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler); }
void _SMP_Send_message( uint32_t cpu, uint32_t message ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); ISR_Level level; #if defined(RTEMS_DEBUG) if ( message & RTEMS_BSP_SMP_SIGNAL_TO_SELF ) printk( "Send 0x%x to %d\n", message, cpu ); #endif _Per_CPU_Lock_acquire( per_cpu, level ); per_cpu->message |= message; _Per_CPU_Lock_release( per_cpu, level ); _CPU_SMP_Send_interrupt( cpu ); }
static void test_watchdog_static_init( void ) { static Watchdog_Control a = WATCHDOG_INITIALIZER( test_watchdog_routine ); Watchdog_Control b; memset( &b, 0, sizeof( b ) ); _Watchdog_Preinitialize( &b, _Per_CPU_Get_by_index( 0 ) ); _Watchdog_Initialize( &b, test_watchdog_routine ); rtems_test_assert( memcmp( &a, &b, sizeof( a ) ) == 0 ); }
void _SMP_Request_start_multitasking( void ) { Per_CPU_Control *self_cpu = _Per_CPU_Get(); uint32_t cpu_count = _SMP_Get_processor_count(); uint32_t cpu_index; _Per_CPU_State_change( self_cpu, PER_CPU_STATE_READY_TO_START_MULTITASKING ); for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( _Per_CPU_Is_processor_online( cpu ) ) { _Per_CPU_State_change( cpu, PER_CPU_STATE_REQUEST_START_MULTITASKING ); } } }
void _SMP_Request_other_cores_to_perform_first_context_switch( void ) { uint32_t self = _SMP_Get_current_processor(); uint32_t ncpus = _SMP_Get_processor_count(); uint32_t cpu; for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); if ( cpu != self ) { _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_BEGIN_MULTITASKING ); } else { _Per_CPU_Change_state( per_cpu, PER_CPU_STATE_UP ); } } }
void _SMP_Request_other_cores_to_shutdown( void ) { uint32_t self = _SMP_Get_current_processor(); uint32_t ncpus = _SMP_Get_processor_count(); uint32_t cpu; _SMP_Broadcast_message( RTEMS_BSP_SMP_SHUTDOWN ); for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { if ( cpu != self ) { _Per_CPU_Wait_for_state( _Per_CPU_Get_by_index( cpu ), PER_CPU_STATE_SHUTDOWN ); } } }
void _SMP_Broadcast_message( uint32_t message ) { uint32_t self = _SMP_Get_current_processor(); uint32_t ncpus = _SMP_Get_processor_count(); uint32_t cpu; for ( cpu = 0 ; cpu < ncpus ; ++cpu ) { if ( cpu != self ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); ISR_Level level; _Per_CPU_Lock_acquire( per_cpu, level ); per_cpu->message |= message; _Per_CPU_Lock_release( per_cpu, level ); } } bsp_smp_broadcast_interrupt(); }
rtems_status_code rtems_rate_monotonic_create( rtems_name name, rtems_id *id ) { Rate_monotonic_Control *the_period; if ( !rtems_is_name_valid( name ) ) return RTEMS_INVALID_NAME; if ( !id ) return RTEMS_INVALID_ADDRESS; the_period = _Rate_monotonic_Allocate(); if ( !the_period ) { _Objects_Allocator_unlock(); return RTEMS_TOO_MANY; } _ISR_lock_Initialize( &the_period->Lock, "Rate Monotonic Period" ); _Priority_Node_initialize( &the_period->Priority, 0 ); _Priority_Node_set_inactive( &the_period->Priority ); the_period->owner = _Thread_Get_executing(); the_period->state = RATE_MONOTONIC_INACTIVE; _Watchdog_Preinitialize( &the_period->Timer, _Per_CPU_Get_by_index( 0 ) ); _Watchdog_Initialize( &the_period->Timer, _Rate_monotonic_Timeout ); _Rate_monotonic_Reset_statistics( the_period ); _Objects_Open( &_Rate_monotonic_Information, &the_period->Object, (Objects_Name) name ); *id = the_period->Object.id; _Objects_Allocator_unlock(); return RTEMS_SUCCESSFUL; }
/* * rtems_cpu_usage_reset */ void rtems_cpu_usage_reset( void ) { #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__ uint32_t processor_count; uint32_t processor; _TOD_Get_uptime( &CPU_usage_Uptime_at_last_reset ); processor_count = rtems_smp_get_processor_count(); for ( processor = 0 ; processor < processor_count ; ++processor ) { Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( processor ); per_cpu->time_of_last_context_switch = CPU_usage_Uptime_at_last_reset; } #else CPU_usage_Ticks_at_last_reset = _Watchdog_Ticks_since_boot; #endif rtems_iterate_over_all_threads(CPU_usage_Per_thread_handler); }
static void check_cpu_allocations(test_context *ctx, const test_action *action) { size_t i; for (i = 0; i < CPU_COUNT; ++i) { size_t e; const Per_CPU_Control *c; const Thread_Control *h; e = action->expected_cpu_allocations[i]; c = _Per_CPU_Get_by_index(i); h = c->heir; if (e != IDLE) { rtems_test_assert(h->Object.id == ctx->task_ids[e]); } else { rtems_test_assert(h->is_idle); } } }
static rtems_status_code test_driver_init( rtems_device_major_number major, rtems_device_minor_number minor, void *arg ) { uint32_t self = rtems_get_current_processor(); uint32_t cpu_count = rtems_get_processor_count(); uint32_t cpu; rtems_test_begink(); assert(rtems_configuration_get_maximum_processors() == MAX_CPUS); main_cpu = self; for (cpu = 0; cpu < MAX_CPUS; ++cpu) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); Per_CPU_State state = per_cpu->state; if (cpu == self) { assert(state == PER_CPU_STATE_INITIAL); } else if (cpu < cpu_count) { assert( state == PER_CPU_STATE_INITIAL || state == PER_CPU_STATE_READY_TO_START_MULTITASKING ); } else { assert(state == PER_CPU_STATE_INITIAL); } } if (cpu_count > 1) { rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0xdeadbeef); } else { rtems_test_endk(); exit(0); } return RTEMS_SUCCESSFUL; }
bool _SMP_Before_multitasking_action_broadcast( SMP_Action_handler handler, void *arg ) { bool done = true; uint32_t cpu_count = _SMP_Get_processor_count(); uint32_t cpu_index; for ( cpu_index = 0 ; done && cpu_index < cpu_count ; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); if ( !_Per_CPU_Is_boot_processor( cpu ) && _Per_CPU_Is_processor_online( cpu ) ) { done = _SMP_Before_multitasking_action( cpu, handler, arg ); } } return done; }
static void fatal_extension( rtems_fatal_source source, bool is_internal, rtems_fatal_code code ) { SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER; if ( source == RTEMS_FATAL_SOURCE_APPLICATION || source == RTEMS_FATAL_SOURCE_SMP ) { uint32_t self = rtems_get_current_processor(); assert(!is_internal); if (self == main_cpu) { uint32_t cpu; assert(source == RTEMS_FATAL_SOURCE_APPLICATION); assert(code == 0xdeadbeef); for (cpu = 0; cpu < MAX_CPUS; ++cpu) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); Per_CPU_State state = per_cpu->state; assert(state == PER_CPU_STATE_SHUTDOWN); } rtems_test_endk(); } else { assert(source == RTEMS_FATAL_SOURCE_SMP); assert(code == SMP_FATAL_SHUTDOWN); } } _SMP_barrier_Wait(&barrier, &barrier_state, rtems_get_processor_count()); }
void _SMP_Handler_initialize( void ) { uint32_t cpu_max = rtems_configuration_get_maximum_processors(); uint32_t cpu_count; uint32_t cpu_index; for ( cpu_index = 0 ; cpu_index < cpu_max; ++cpu_index ) { Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); _ISR_lock_Initialize( &cpu->Watchdog.Lock, "Watchdog" ); _SMP_ticket_lock_Initialize( &cpu->Lock ); _SMP_lock_Stats_initialize( &cpu->Lock_stats, "Per-CPU" ); _Chain_Initialize_empty( &cpu->Threads_in_need_for_help ); } /* * Discover and initialize the secondary cores in an SMP system. */ cpu_count = _CPU_SMP_Initialize(); cpu_count = cpu_count < cpu_max ? cpu_count : cpu_max; _SMP_Processor_count = cpu_count; for ( cpu_index = cpu_count ; cpu_index < cpu_max; ++cpu_index ) { const Scheduler_Assignment *assignment; assignment = _Scheduler_Get_initial_assignment( cpu_index ); if ( _Scheduler_Is_mandatory_processor( assignment ) ) { _SMP_Fatal( SMP_FATAL_MANDATORY_PROCESSOR_NOT_PRESENT ); } } _SMP_Start_processors( cpu_count ); _CPU_SMP_Finalize_initialization( cpu_count ); }