/** * * @brief Process request to read the processor workload * * Computes workload, or uses 0 if workload monitoring is not configured. * * @return N/A */ void _k_workload_get(struct k_args *P) { unsigned int k, t; signed int iret; k = (_k_workload_i - _k_workload_n0) * _k_workload_ref_time; #ifdef WL_SCALE t = (sys_cycle_get_32() - _k_workload_t0) >> (_k_workload_scale); #else t = (sys_cycle_get_32() - _k_workload_t0) >> (4 + 6); #endif iret = MSEC_PER_SEC - k / t; /* * Due to calibration at startup, <iret> could be slightly negative. * Ensure a negative value is never returned. */ if (iret < 0) { iret = 0; } P->args.u1.rval = iret; }
void _sys_k_event_logger_exit_sleep(void) { uint32_t data[3]; if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) { return; } if (_sys_k_event_logger_sleep_start_time != 0) { data[0] = _sys_k_get_time(); data[1] = (sys_cycle_get_32() - _sys_k_event_logger_sleep_start_time) / sys_clock_hw_cycles_per_tick; /* register the cause of exiting sleep mode */ data[2] = _sys_current_irq_key_get(); /* * if _sys_k_event_logger_sleep_start_time is different to zero, means * that the CPU was sleeping, so we reset it to identify that the event * was processed and that any the next interrupt is no awaing the CPU. */ _sys_k_event_logger_sleep_start_time = 0; sys_k_event_logger_put(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID, data, ARRAY_SIZE(data)); } }
void _sys_k_event_logger_enter_sleep(void) { if (!sys_k_must_log_event(KERNEL_EVENT_LOGGER_SLEEP_EVENT_ID)) { return; } _sys_k_event_logger_sleep_start_time = sys_cycle_get_32(); }
void upm_clock_init(upm_clock_t *clock) { #if defined(UPM_PLATFORM_LINUX) gettimeofday(clock, NULL); #elif defined(UPM_PLATFORM_ZEPHYR) *clock = sys_cycle_get_32(); #endif }
/** * * @brief Workload monitor tick handler * * If workload monitor is configured this routine updates the global variables * it uses to record the passage of time. * * @return N/A * */ void _k_workload_monitor_update(void) { if (--_k_workload_ticks == 0) { _k_workload_t0 = _k_workload_t1; _k_workload_t1 = sys_cycle_get_32(); _k_workload_n0 = _k_workload_n1; _k_workload_n1 = _k_workload_i - 1; _k_workload_ticks = _k_workload_slice; } }
/** * * @brief Calibrate the workload monitoring subsystem * * Measures the time required to do a fixed amount of "dummy work", and * sets default values for the workload measuring period. * * @return N/A * */ void _k_workload_monitor_calibrate(void) { _k_workload_n0 = _k_workload_i = 0; _k_workload_n1 = 1000; _k_workload_t0 = sys_cycle_get_32(); workload_loop(); _k_workload_t1 = sys_cycle_get_32(); _k_workload_delta = _k_workload_t1 - _k_workload_t0; _k_workload_i0 = _k_workload_i; #ifdef WL_SCALE _k_workload_ref_time = (_k_workload_t1 - _k_workload_t0) >> (_k_workload_scale); #else _k_workload_ref_time = (_k_workload_t1 - _k_workload_t0) >> (4 + 6); #endif _k_workload_slice = 100; _k_workload_ticks = 100; }
uint32_t upm_elapsed_us(upm_clock_t *clock) { #if defined(UPM_PLATFORM_LINUX) struct timeval elapsed, now; uint32_t elapse; // get current time gettimeofday(&now, NULL); struct timeval startTime = *clock; // compute the delta since startTime if( (elapsed.tv_usec = now.tv_usec - startTime.tv_usec) < 0 ) { elapsed.tv_usec += 1000000; elapsed.tv_sec = now.tv_sec - startTime.tv_sec - 1; } else { elapsed.tv_sec = now.tv_sec - startTime.tv_sec; } elapse = (uint32_t)((elapsed.tv_sec * 1000000) + elapsed.tv_usec); // never return 0 if (elapse == 0) elapse = 1; return elapse; #elif defined(UPM_PLATFORM_ZEPHYR) uint32_t now = sys_cycle_get_32(); uint32_t elapsed = (uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(now - *clock)/(uint64_t)1000); // never return 0 if (elapsed == 0) elapsed = 1; return elapsed; #endif }
/** * * @brief Workload monitor "end idling" handler * * Records time when idle task was no longer selected for execution by the * microkernel, and updates amount of time spent idling. * * @return N/A */ void _k_workload_monitor_idle_end(void) { _k_workload_end_time = sys_cycle_get_32(); _k_workload_i += (_k_workload_i0 * (_k_workload_end_time - _k_workload_start_time)) / _k_workload_delta; }
/** * * @brief Workload monitor "start idling" handler * * Records time when idle task was selected for execution by the microkernel. * * @return N/A */ void _k_workload_monitor_idle_start(void) { _k_workload_start_time = sys_cycle_get_32(); }