bool _Per_CPU_State_wait_for_non_initial_state( uint32_t cpu_index, uint32_t timeout_in_ns ) { const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); Per_CPU_State state = cpu->state; if ( timeout_in_ns > 0 ) { rtems_counter_ticks ticks = rtems_counter_nanoseconds_to_ticks( timeout_in_ns ); rtems_counter_ticks a = rtems_counter_read(); rtems_counter_ticks delta = 0; while ( ticks > delta && state == PER_CPU_STATE_INITIAL ) { rtems_counter_ticks b; _CPU_SMP_Processor_event_receive(); state = cpu->state; ticks -= delta; b = rtems_counter_read(); delta = rtems_counter_difference( b, a ); a = b; } } else { while ( state == PER_CPU_STATE_INITIAL ) { _CPU_SMP_Processor_event_receive(); state = cpu->state; } } return state != PER_CPU_STATE_INITIAL; }
static void test_overheads(test_context *ctx) { int i; for (i = 0; i < N; ++i) { rtems_counter_ticks t0; rtems_counter_ticks t1; rtems_counter_ticks t2; rtems_counter_ticks t3; rtems_counter_ticks t4; rtems_counter_ticks d; t0 = rtems_counter_read(); t1 = rtems_counter_read(); d = rtems_counter_difference(t1, t0); t2 = rtems_counter_read(); rtems_counter_delay_nanoseconds(0); t3 = rtems_counter_read(); rtems_counter_delay_ticks(0); t4 = rtems_counter_read(); ctx->overhead_t[i][0] = t0; ctx->overhead_t[i][1] = t1; ctx->overhead_t[i][2] = t2; ctx->overhead_t[i][3] = t3; ctx->overhead_t[i][4] = t4; ctx->overhead_delta = d; } }
static uint64_t do_some_work(void) { rtems_counter_ticks a; rtems_counter_ticks b; rtems_counter_ticks d; /* This gives 1024 nop instructions */ a = rtems_counter_read(); I512(); I512(); b = rtems_counter_read(); d = rtems_counter_difference(b, a); return rtems_counter_ticks_to_nanoseconds(d); }
static uint64_t store(void) { rtems_counter_ticks a; rtems_counter_ticks b; rtems_counter_ticks d; size_t i; volatile int *vdata = &data[0]; a = rtems_counter_read(); for (i = 0; i < RTEMS_ARRAY_SIZE(data); ++i) { vdata[i] = 0; } b = rtems_counter_read(); d = rtems_counter_difference(b, a); return rtems_counter_ticks_to_nanoseconds(d); }
static int call_at_level(int start, int fl, int s, bool dirty) { if (fl == start) { /* * Some architectures like the SPARC have register windows. A side-effect * of this context switch is that we start with a fresh window set. On * architectures like ARM or PowerPC this context switch has no effect. */ _Context_Switch(&ctx, &ctx); } if (fl > 0) { if (always_true) { return call_at_level(start, fl - 1, s, dirty); } else { return prevent_opt_func(fl - 1, fl - 2); } } else { char *volatile space; rtems_counter_ticks a; rtems_counter_ticks b; if (dirty) { dirty_data_cache(main_data, data_size, cache_line_size, fl); rtems_cache_invalidate_entire_instruction(); } a = rtems_counter_read(); /* Ensure that we use an untouched stack area */ space = alloca(1024); (void) space; _Context_Switch(&ctx, &ctx); b = rtems_counter_read(); t[s] = rtems_counter_difference(b, a); return 0; } }
static void test_delay_nanoseconds(test_context *ctx) { int i; for (i = 0; i < N; ++i) { rtems_counter_ticks t0; rtems_counter_ticks t1; rtems_interval tick; tick = sync_with_clock_tick(); t0 = rtems_counter_read(); rtems_counter_delay_nanoseconds(NS_PER_TICK); t1 = rtems_counter_read(); ctx->delay_ns_t[i][0] = t0; ctx->delay_ns_t[i][1] = t1; rtems_test_assert(tick < rtems_clock_get_ticks_since_boot()); } }