rtems_status_code rtems_interrupt_server_handler_remove( rtems_id server, rtems_vector_number vector, rtems_interrupt_handler handler, void *arg ) { rtems_status_code sc = RTEMS_SUCCESSFUL; bsp_interrupt_server_iterate_entry ie = { .handler = handler, .arg = arg, .entry = NULL }; sc = bsp_interrupt_server_is_initialized(); if (sc != RTEMS_SUCCESSFUL) { return sc; } if (server != RTEMS_ID_NONE) { return RTEMS_NOT_IMPLEMENTED; } /* Query corresponding interrupt server entry */ sc = rtems_interrupt_handler_iterate( vector, bsp_interrupt_server_per_handler_routine, &ie ); if (sc != RTEMS_SUCCESSFUL) { return sc; } else if (ie.entry == NULL) { return RTEMS_INVALID_ID; } sc = rtems_interrupt_handler_remove( vector, bsp_interrupt_server_trigger, ie.entry ); if (sc != RTEMS_SUCCESSFUL) { return sc; } free(ie.entry); return RTEMS_SUCCESSFUL; }
void bsp_interrupt_report_with_plugin( void *context, rtems_printk_plugin_t print ) { rtems_vector_number v = 0; bsp_interrupt_report_entry e = { .context = context, .print = print, .vector = 0 }; print( context, "-------------------------------------------------------------------------------\n" " INTERRUPT INFORMATION\n" "--------+----------------------------------+---------+------------+------------\n" " VECTOR | INFO | OPTIONS | HANDLER | ARGUMENT \n" "--------+----------------------------------+---------+------------+------------\n" ); for (v = BSP_INTERRUPT_VECTOR_MIN; v <= BSP_INTERRUPT_VECTOR_MAX; ++v) { e.vector = v; rtems_interrupt_handler_iterate( v, bsp_interrupt_report_per_handler_routine, &e ); } print( context, "--------+----------------------------------+---------+------------+------------\n" ); } void bsp_interrupt_report(void) { bsp_interrupt_report_with_plugin(NULL, printk_plugin); }
static void Init(rtems_task_argument arg) { rtems_status_code sc; uint32_t i; uint32_t cpu; uint32_t cpu_count; uint32_t read; uint32_t enter_count; uint32_t exit_count; uint32_t clock_tick_count; uint32_t res_should_be; rtems_name name; rtems_capture_record_t *recs; rtems_capture_record_t *prev_rec; empty_record_t *record; enter_add_number_record_t *enter_add_number_rec; exit_add_number_record_t *exit_add_number_rec; rtems_vector_number vec; clock_interrupt_handler cih = {.found = 0}; TEST_BEGIN(); /* Get the number of processors that we are using. */ cpu_count = rtems_get_processor_count(); sc = rtems_capture_open(50000, NULL); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_capture_watch_global(true); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_capture_control(true); rtems_test_assert(sc == RTEMS_SUCCESSFUL); /* Run main test */ test(cpu_count); /* Try to find the clock interrupt handler */ for ( vec=BSP_INTERRUPT_VECTOR_MIN; vec<BSP_INTERRUPT_VECTOR_MAX; vec++ ) { rtems_interrupt_handler_iterate(vec, locate_clock_interrupt_handler, &cih); if ( cih.found ) break; } /* If we find the clock interrupt handler we replace it with * a wrapper and wait for a fixed number of ticks. */ if ( cih.found ) { #ifdef VERBOSE printf("Found a clock handler\n"); #endif org_clock_handler = cih.handler; rtems_interrupt_handler_install(vec, cih.info, cih.options | RTEMS_INTERRUPT_REPLACE, clock_tick_wrapper, cih.arg); rtems_task_wake_after(CLOCK_TICKS); } /* Disable capturing */ sc = rtems_capture_control(false); rtems_test_assert(sc == RTEMS_SUCCESSFUL); clock_tick_count = 0; /* Read out the trace from all processors */ for ( cpu = 0; cpu < cpu_count; cpu++ ) { sc = rtems_capture_read(cpu, &read, &recs); rtems_test_assert(sc == RTEMS_SUCCESSFUL); prev_rec = recs; enter_count = 0; exit_count = 0; res_should_be = 0; for ( i = 0; i < read; i++ ) { /* Verify that time goes forward */ rtems_test_assert(recs->time>=prev_rec->time); if ( recs->events & RTEMS_CAPTURE_TIMESTAMP ) { record = (empty_record_t*)((char*) recs + sizeof(rtems_capture_record_t)); switch ( record->id ) { case enter_add_number: rtems_test_assert(enter_count==exit_count); enter_count++; enter_add_number_rec = (enter_add_number_record_t*)record; res_should_be = add_number(enter_add_number_rec->a, enter_add_number_rec->b); rtems_object_get_classic_name(recs->task_id, &name); #ifdef VERBOSE /* Print record */ printf("Time: %"PRIu64"us Task: %4s => Add %"PRIu32" and" " %"PRIu32"\n", recs->time/1000, (char*)&name, enter_add_number_rec->a, enter_add_number_rec->b); #endif break; case exit_add_number: rtems_test_assert(enter_count==exit_count+1); exit_count++; exit_add_number_rec = (exit_add_number_record_t*)record; /* Verify that the result matches the expected result */ rtems_test_assert(res_should_be == exit_add_number_rec->res); #ifdef VERBOSE /* Print record */ rtems_object_get_classic_name(recs->task_id, &name); printf("Time: %"PRIu64"us Task: %4s => Result is %"PRIu32"\n", recs->time/1000, (char*)&name, exit_add_number_rec->res); #endif break; case clock_tick: clock_tick_count++; #ifdef VERBOSE rtems_object_get_classic_name(recs->task_id, &name); printf("Time: %"PRIu64"us Task: %4s => Clock tick\n", recs->time/1000, (char*)&name); #endif break; default: rtems_test_assert(0); } } prev_rec = recs; recs = (rtems_capture_record_t*) ((char*) recs + recs->size); } rtems_test_assert(enter_count == exit_count); rtems_test_assert(enter_count == TASKS_PER_CPU * ITERATIONS); rtems_capture_release(cpu, read); } if( cih.found ) rtems_test_assert(clock_tick_count == cpu_count * CLOCK_TICKS); TEST_END(); rtems_test_exit(0); }