/** * * @brief Test the k_cpu_idle() routine * * This tests the k_cpu_idle() routine. The first thing it does is align to * a tick boundary. The only source of interrupts while the test is running is * expected to be the tick clock timer which should wake the CPU. Thus after * each call to k_cpu_idle(), the tick count should be one higher. * * @return TC_PASS on success * @return TC_FAIL on failure */ static int test_kernel_cpu_idle(int atomic) { int tms; /* current time in millisecond */ int i; /* loop variable */ /* Align to a "ms boundary". */ tms = k_uptime_get_32(); while (tms == k_uptime_get_32()) { } tms = k_uptime_get_32(); for (i = 0; i < 5; i++) { /* Repeat the test five times */ if (atomic) { unsigned int key = irq_lock(); k_cpu_atomic_idle(key); } else { k_cpu_idle(); } /* calculating milliseconds per tick*/ tms += sys_clock_us_per_tick / USEC_PER_MSEC; if (k_uptime_get_32() < tms) { return TC_FAIL; } } return TC_PASS; }
/** * * @brief Fatal error handler * * This routine implements the corrective action to be taken when the system * detects a fatal error. * * This sample implementation attempts to abort the current thread and allow * the system to continue executing, which may permit the system to continue * functioning with degraded capabilities. * * System designers may wish to enhance or substitute this sample * implementation to take other actions, such as logging error (or debug) * information to a persistent repository and/or rebooting the system. * * @param reason fatal error reason * @param pEsf pointer to exception stack frame * * @return This function does not return. */ void __weak z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf) { ARG_UNUSED(pEsf); #if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER) #ifdef CONFIG_STACK_SENTINEL if (reason == _NANO_ERR_STACK_CHK_FAIL) { goto hang_system; } #endif if (reason == _NANO_ERR_KERNEL_PANIC) { goto hang_system; } if (k_is_in_isr() || z_is_thread_essential()) { printk("Fatal fault in %s! Spinning...\n", k_is_in_isr() ? "ISR" : "essential thread"); goto hang_system; } printk("Fatal fault in thread %p! Aborting.\n", _current); k_thread_abort(_current); return; hang_system: #else ARG_UNUSED(reason); #endif for (;;) { k_cpu_idle(); } CODE_UNREACHABLE; }
/** * * @brief Test the k_cpu_idle() routine * * This tests the k_cpu_idle() routine. The first thing it does is align to * a tick boundary. The only source of interrupts while the test is running is * expected to be the tick clock timer which should wake the CPU. Thus after * each call to k_cpu_idle(), the tick count should be one higher. * * @return TC_PASS on success * @return TC_FAIL on failure */ static int test_kernel_cpu_idle(int atomic) { int tms, tms2;; /* current time in millisecond */ int i; /* loop variable */ /* Align to a "ms boundary". */ tms = k_uptime_get_32(); while (tms == k_uptime_get_32()) { } tms = k_uptime_get_32(); for (i = 0; i < 5; i++) { /* Repeat the test five times */ if (atomic) { unsigned int key = irq_lock(); k_cpu_atomic_idle(key); } else { k_cpu_idle(); } /* calculating milliseconds per tick*/ tms += sys_clock_us_per_tick / USEC_PER_MSEC; tms2 = k_uptime_get_32(); if (tms2 < tms) { TC_ERROR("Bad ms per tick value computed, got %d which is less than %d\n", tms2, tms); return TC_FAIL; } } return TC_PASS; }
/* * Common thread entry point function (used by all threads) * * This routine invokes the actual thread entry point function and passes * it three arguments. It also handles graceful termination of the thread * if the entry point function ever returns. * * This routine does not return, and is marked as such so the compiler won't * generate preamble code that is only used by functions that actually return. */ FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *), void *p1, void *p2, void *p3) { entry(p1, p2, p3); #ifdef CONFIG_MULTITHREADING if (_is_thread_essential()) { _NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT, &_default_esf); } k_thread_abort(_current); #else for (;;) { k_cpu_idle(); } #endif /* * Compiler can't tell that k_thread_abort() won't return and issues a * warning unless we tell it that control never gets this far. */ CODE_UNREACHABLE; }
static void _sys_power_save_idle(int32_t ticks __unused) { #if defined(CONFIG_TICKLESS_IDLE) if ((ticks == K_FOREVER) || ticks >= _sys_idle_threshold_ticks) { /* * Stop generating system timer interrupts until it's time for * the next scheduled kernel timer to expire. */ _timer_idle_enter(ticks); } #endif /* CONFIG_TICKLESS_IDLE */ set_kernel_idle_time_in_ticks(ticks); #if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \ defined(CONFIG_SYS_POWER_DEEP_SLEEP)) _sys_pm_idle_exit_notify = 1; /* * Call the suspend hook function of the soc interface to allow * entry into a low power state. The function returns * SYS_PM_NOT_HANDLED if low power state was not entered, in which * case, kernel does normal idle processing. * * This function is entered with interrupts disabled. If a low power * state was entered, then the hook function should enable inerrupts * before exiting. This is because the kernel does not do its own idle * processing in those cases i.e. skips k_cpu_idle(). The kernel's * idle processing re-enables interrupts which is essential for * the kernel's scheduling logic. */ if (_sys_soc_suspend(ticks) == SYS_PM_NOT_HANDLED) { _sys_pm_idle_exit_notify = 0; k_cpu_idle(); } #else k_cpu_idle(); #endif }
void sys_reboot(int type) { (void)irq_lock(); sys_clock_disable(); sys_arch_reboot(type); /* should never get here */ printk("Failed to reboot: spinning endlessly...\n"); for (;;) { k_cpu_idle(); } }