static void fatal_extension( rtems_fatal_source source, bool is_internal, rtems_fatal_code code ) { if ( source == RTEMS_FATAL_SOURCE_APPLICATION || source == RTEMS_FATAL_SOURCE_SMP ) { uint32_t self = rtems_get_current_processor(); SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER; assert(!is_internal); if (self == main_cpu) { assert(source == RTEMS_FATAL_SOURCE_SMP); assert(code == SMP_FATAL_SHUTDOWN_RESPONSE); } else { assert(source == RTEMS_FATAL_SOURCE_APPLICATION); assert(code == 0xdeadbeef); } _SMP_barrier_Wait(&fatal_barrier, &state, CPU_COUNT); if (self == 0) { rtems_test_endk(); } _SMP_barrier_Wait(&fatal_barrier, &state, CPU_COUNT); } }
static void test_func_test( size_t set_size, cpu_set_t *cpu_set, SMP_barrier_State *bs ) { ctx.count[rtems_get_current_processor()] = 0; _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx ); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); rtems_test_assert( ctx.count[rtems_get_current_processor()] == rtems_get_processor_count() ); }
static void barrier( test_context *ctx, SMP_barrier_State *state ) { _SMP_barrier_Wait(&ctx->barrier, state, 2); }
static void wait_for_giant(void) { SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER; _SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT); _Thread_Disable_dispatch(); }
static void wait_for_giant(void) { SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER; _SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT); _Giant_Acquire(); }
static void test_func_isrdisabled_test( size_t set_size, cpu_set_t *cpu_set, SMP_barrier_State *bs ) { ISR_Level isr_level; ctx.count[rtems_get_current_processor()] = 0; _ISR_Disable_without_giant( isr_level ); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx ); _ISR_Enable_without_giant( isr_level ); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); rtems_test_assert( ctx.count[rtems_get_current_processor()] == rtems_get_processor_count() ); }
static void run_tests( rtems_test_parallel_context *ctx, const rtems_test_parallel_job *jobs, size_t job_count, size_t worker_index ) { SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER; size_t i; for (i = 0; i < job_count; ++i) { const rtems_test_parallel_job *job = &jobs[i]; size_t n = rtems_get_processor_count(); size_t j = job->cascade ? 0 : rtems_get_processor_count() - 1; while (j < n) { size_t active_worker = j + 1; if (rtems_test_parallel_is_master_worker(worker_index)) { rtems_interval duration = (*job->init)(ctx, job->arg, active_worker); if (duration > 0) { start_worker_stop_timer(ctx, duration); } } _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count); if (worker_index <= j) { (*job->body)(ctx, job->arg, active_worker, worker_index); } _SMP_barrier_Wait(&ctx->barrier, &bs, ctx->worker_count); if (rtems_test_parallel_is_master_worker(worker_index)) { (*job->fini)(ctx, job->arg, active_worker); } ++j; } } }
static void test_func_giant_taken_test( size_t set_size, cpu_set_t *cpu_set, SMP_barrier_State *bs ) { ctx.count[rtems_get_current_processor()] = 0; if ( rtems_get_current_processor() == 0) _Thread_Disable_dispatch(); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); _SMP_Multicast_action( set_size, cpu_set, test_cache_message, &ctx ); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); rtems_test_assert( ctx.count[rtems_get_current_processor()] == rtems_get_processor_count() ); if ( rtems_get_current_processor() == 0) _Thread_Enable_dispatch(); }
static void standard_funcs_giant_taken_test( size_t set_size, cpu_set_t *cpu_set, SMP_barrier_State *bs ) { if ( rtems_get_current_processor() == 0) _Thread_Disable_dispatch(); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); cache_manager_smp_functions( set_size, cpu_set ); if ( rtems_get_current_processor() == 0) _Thread_Enable_dispatch(); }
static void standard_funcs_isrdisabled_test( size_t set_size, cpu_set_t *cpu_set, SMP_barrier_State *bs ) { ISR_Level isr_level; _ISR_Disable_without_giant( isr_level ); _SMP_barrier_Wait( &ctx.barrier, bs, rtems_get_processor_count() ); cache_manager_smp_functions( set_size, cpu_set ); _ISR_Enable_without_giant( isr_level ); }
static void acquire_giant_and_fatal_task(rtems_task_argument arg) { SMP_barrier_State state = SMP_BARRIER_STATE_INITIALIZER; int i; for (i = 0; i < 13; ++i) { _Thread_Disable_dispatch(); } _SMP_barrier_Wait(&giant_barrier, &state, CPU_COUNT); /* * Now we have to wait some time so that the other thread can actually start * with the _Giant_Acquire() procedure. */ rtems_counter_delay_nanoseconds(1000000); rtems_fatal(RTEMS_FATAL_SOURCE_APPLICATION, 0xdeadbeef); }
static void fatal_extension( rtems_fatal_source source, bool is_internal, rtems_fatal_code code ) { SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER; if ( source == RTEMS_FATAL_SOURCE_APPLICATION || source == RTEMS_FATAL_SOURCE_SMP ) { uint32_t self = rtems_get_current_processor(); assert(!is_internal); if (self == main_cpu) { uint32_t cpu; assert(source == RTEMS_FATAL_SOURCE_APPLICATION); assert(code == 0xdeadbeef); for (cpu = 0; cpu < MAX_CPUS; ++cpu) { const Per_CPU_Control *per_cpu = _Per_CPU_Get_by_index( cpu ); Per_CPU_State state = per_cpu->state; assert(state == PER_CPU_STATE_SHUTDOWN); } rtems_test_endk(); } else { assert(source == RTEMS_FATAL_SOURCE_SMP); assert(code == SMP_FATAL_SHUTDOWN); } } _SMP_barrier_Wait(&barrier, &barrier_state, rtems_get_processor_count()); }
static void all_tests( void ) { uint32_t cpu_count = rtems_get_processor_count(); size_t set_size = CPU_ALLOC_SIZE( rtems_get_processor_count() ); cpu_set_t *cpu_set = CPU_ALLOC( rtems_get_processor_count() ); SMP_barrier_State bs = SMP_BARRIER_STATE_INITIALIZER; /* Send message to all available CPUs */ CPU_FILL_S( set_size, cpu_set ); /* Call SMP cache manager functions */ cmlog( "Calling standard SMP cache functions. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); standard_funcs_test( set_size, cpu_set ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n"); /* Call SMP cache manager functions with ISR disabled */ cmlog( "Calling standard SMP cache functions with ISR disabled. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); standard_funcs_isrdisabled_test( set_size, cpu_set, &bs ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n" ); /* Call SMP cache manager functions with core 0 holding the giant lock */ cmlog( "Calling standard SMP cache functions with CPU0 holding " "the giant lock. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); standard_funcs_giant_taken_test( set_size, cpu_set, &bs ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n"); /* Call a test function using SMP cache manager and verify that all * cores invoke the function */ cmlog( "Calling a test function using the SMP cache manager to " "verify that all CPUs receive the SMP message. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); test_func_test( set_size, cpu_set, &bs ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n"); /* Call a test function using SMP cache manager and verify that all * cores invoke the function. ISR disabled. */ cmlog( "Calling a test function using the SMP cache manager to " "verify that all CPUs receive the SMP message. With ISR disabled. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); test_func_isrdisabled_test( set_size, cpu_set, &bs ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n" ); /* Call a test function using SMP cache manager and verify that all * cores invoke the function. Core 0 holding giant lock. */ cmlog( "Calling a test function using the SMP cache manager to " "verify that all CPUs receive the SMP message. With CPU0 " "holding the giant lock. " ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); test_func_giant_taken_test( set_size, cpu_set, &bs ); _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count ); cmlog( "Done!\n" ); /* Done. Free up memory. */ _SMP_barrier_Wait( &ctx.barrier, &bs, cpu_count); CPU_FREE( cpu_set ); }
static void wait(test_context *ctx, SMP_barrier_State *bs) { _SMP_barrier_Wait(&ctx->barrier, bs, CPU_COUNT); }