void mutex_bench(void) { u32_t mutex_lock_start_tsc; u32_t mutex_lock_end_tsc; u32_t mutex_lock_diff = 0; u32_t mutex_unlock_start_tsc; u32_t mutex_unlock_end_tsc; u32_t mutex_unlock_diff = 0; for (int i = 0; i < 1000; i++) { mutex_lock_start_tsc = OS_GET_TIME(); k_mutex_lock(&mutex0, 100); mutex_lock_end_tsc = OS_GET_TIME(); mutex_unlock_start_tsc = OS_GET_TIME(); k_mutex_unlock(&mutex0); mutex_unlock_end_tsc = OS_GET_TIME(); mutex_lock_diff += (mutex_lock_end_tsc - mutex_lock_start_tsc); mutex_unlock_diff += (mutex_unlock_end_tsc - mutex_unlock_start_tsc); } PRINT_F("Mutex lock", mutex_lock_diff / 1000, SYS_CLOCK_HW_CYCLES_TO_NS(mutex_lock_diff / 1000)); PRINT_F("Mutex unlock", mutex_unlock_diff / 1000, SYS_CLOCK_HW_CYCLES_TO_NS(mutex_unlock_diff / 1000)); }
/** * * @brief Dumps interrupt latency values * * The interrupt latency value measures * * @return N/A * */ void int_latency_show(void) { u32_t intHandlerLatency = 0; if (!int_latency_bench_ready) { printk("error: int_latency_init() has not been invoked\n"); return; } if (int_locked_latency_min != ULONG_MAX) { if (_hw_irq_to_c_handler_latency == ULONG_MAX) { intHandlerLatency = 0; printk(" Min latency from hw interrupt up to 'C' int. " "handler: " "not measured\n"); } else { intHandlerLatency = _hw_irq_to_c_handler_latency; printk(" Min latency from hw interrupt up to 'C' int. " "handler:" " %d tcs = %d nsec\n", intHandlerLatency, SYS_CLOCK_HW_CYCLES_TO_NS(intHandlerLatency)); } printk(" Max interrupt latency (includes hw int. to 'C' " "handler):" " %d tcs = %d nsec\n", int_locked_latency_max + intHandlerLatency, SYS_CLOCK_HW_CYCLES_TO_NS(int_locked_latency_max + intHandlerLatency)); printk(" Overhead substracted from Max int. latency:\n" " for int. lock : %d tcs = %d nsec\n" " each time int. lock nest: %d tcs = %d nsec\n" " for int. unlocked : %d tcs = %d nsec\n", initial_start_delay, SYS_CLOCK_HW_CYCLES_TO_NS(initial_start_delay), nesting_delay, SYS_CLOCK_HW_CYCLES_TO_NS(nesting_delay), stop_delay, SYS_CLOCK_HW_CYCLES_TO_NS(stop_delay)); } else { printk("interrupts were not locked and unlocked yet\n"); } /* * Lets start with new values so that one extra long path executed * with interrupt disabled hide smaller paths with interrupt * disabled. */ int_locked_latency_min = ULONG_MAX; int_locked_latency_max = 0; }
/** * * @brief The test main function * * @return 0 on success */ int nanoIntLatency(void) { PRINT_FORMAT(" 1- Measure time to switch from fiber to ISR execution"); TICK_SYNCH(); task_fiber_start(&fiberStack[0], STACKSIZE, (nano_fiber_entry_t) fiberInt, 0, 0, 6, 0); PRINT_FORMAT(" switching time is %lu tcs = %lu nsec", timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); return 0; }
/** * * @brief The test main function * * @return 0 on success */ int int_to_thread_evt(void) { PRINT_FORMAT(" 2 - Measure time from ISR to executing a different thread" " (rescheduled)"); TICK_SYNCH(); k_sem_give(&INTSEMA); k_alert_recv(&EVENT0, K_FOREVER); timestamp = TIME_STAMP_DELTA_GET(timestamp); PRINT_FORMAT(" switch time is %u tcs = %u nsec", timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); return 0; }
/** * * @brief The test main function * * @return 0 on success */ int nanoIntToFiber(void) { PRINT_FORMAT(" 2- Measure time to switch from ISR back to interrupted" " fiber"); TICK_SYNCH(); task_fiber_start(&fiberStack[0], STACKSIZE, (nano_fiber_entry_t) fiberInt, 0, 0, 6, 0); if (flagVar == 1) { PRINT_FORMAT(" switching time is %lu tcs = %lu nsec", timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); } return 0; }
/** * * @brief The test main function * * @return 0 on success */ int nanoIntToFiberSem(void) { PRINT_FORMAT(" 3- Measure time from ISR to executing a different fiber" " (rescheduled)"); nano_sem_init(&testSema); TICK_SYNCH(); task_fiber_start(&waiterStack[0], STACKSIZE, (nano_fiber_entry_t) fiberWaiter, 0, 0, 5, 0); task_fiber_start(&intStack[0], STACKSIZE, (nano_fiber_entry_t) fiberInt, 0, 0, 6, 0); PRINT_FORMAT(" switching time is %lu tcs = %lu nsec", timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); return 0; }
void semaphore_bench(void) { /* Thread yield*/ sem0_tid = k_thread_create(&my_thread, my_stack_area, STACK_SIZE, thread_sem0_test, NULL, NULL, NULL, 2 /*priority*/, 0, 0); sem1_tid = k_thread_create(&my_thread_0, my_stack_area_0, STACK_SIZE, thread_sem1_test, NULL, NULL, NULL, 2 /*priority*/, 0, 0); k_sleep(1000); /* u64_t test_time1 = _tsc_read(); */ sem_end_time = (__common_var_swap_end_tsc); u32_t sem_cycles = sem_end_time - sem_start_time; sem0_tid = k_thread_create(&my_thread, my_stack_area, STACK_SIZE, thread_sem0_give_test, NULL, NULL, NULL, 2 /*priority*/, 0, 0); sem1_tid = k_thread_create(&my_thread_0, my_stack_area_0, STACK_SIZE, thread_sem1_give_test, NULL, NULL, NULL, 2 /*priority*/, 0, 0); k_sleep(1000); sem_give_end_time = (__common_var_swap_end_tsc); u32_t sem_give_cycles = sem_give_end_time - sem_give_start_time; /* Semaphore without context switch*/ u32_t sem_give_wo_cxt_start = OS_GET_TIME(); k_sem_give(&sem_bench); u32_t sem_give_wo_cxt_end = OS_GET_TIME(); u32_t sem_give_wo_cxt_cycles = sem_give_wo_cxt_end - sem_give_wo_cxt_start; u32_t sem_take_wo_cxt_start = OS_GET_TIME(); k_sem_take(&sem_bench, 10); u32_t sem_take_wo_cxt_end = OS_GET_TIME(); u32_t sem_take_wo_cxt_cycles = sem_take_wo_cxt_end - sem_take_wo_cxt_start; /* TC_PRINT("test_time1 , %d cycles\n", (u32_t)test_time1); */ /* TC_PRINT("test_time2 , %d cycles\n", (u32_t)test_time2); */ PRINT_F("Semaphore Take with context switch", sem_cycles, SYS_CLOCK_HW_CYCLES_TO_NS(sem_cycles)); PRINT_F("Semaphore Give with context switch", sem_give_cycles, SYS_CLOCK_HW_CYCLES_TO_NS(sem_give_cycles)); PRINT_F("Semaphore Take without context switch", sem_take_wo_cxt_cycles, SYS_CLOCK_HW_CYCLES_TO_NS(sem_take_wo_cxt_cycles)); PRINT_F("Semaphore Give without context switch", sem_give_wo_cxt_cycles, SYS_CLOCK_HW_CYCLES_TO_NS(sem_give_wo_cxt_cycles)); }