FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr) { struct _x86_syscall_stack_frame *ssf = (struct _x86_syscall_stack_frame *)ssf_ptr; NANO_ESF oops_esf = { .eip = ssf->eip, .cs = ssf->cs, .eflags = ssf->eflags }; if (oops_esf.cs == USER_CODE_SEG) { oops_esf.esp = ssf->esp; } _NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops_esf); } #ifdef CONFIG_X86_KERNEL_OOPS /* The reason code gets pushed onto the stack right before the exception is * triggered, so it would be after the nano_esf data */ struct oops_esf { NANO_ESF nano_esf; unsigned int reason; }; FUNC_NORETURN void _do_kernel_oops(const struct oops_esf *esf) { _NanoFatalErrorHandler(esf->reason, &esf->nano_esf); }
/* * Common thread entry point function (used by all threads) * * This routine invokes the actual thread entry point function and passes * it three arguments. It also handles graceful termination of the thread * if the entry point function ever returns. * * This routine does not return, and is marked as such so the compiler won't * generate preamble code that is only used by functions that actually return. */ FUNC_NORETURN void _thread_entry(void (*entry)(void *, void *, void *), void *p1, void *p2, void *p3) { entry(p1, p2, p3); #ifdef CONFIG_MULTITHREADING if (_is_thread_essential()) { _NanoFatalErrorHandler(_NANO_ERR_INVALID_TASK_EXIT, &_default_esf); } k_thread_abort(_current); #else for (;;) { k_cpu_idle(); } #endif /* * Compiler can't tell that k_thread_abort() won't return and issues a * warning unless we tell it that control never gets this far. */ CODE_UNREACHABLE; }
static FUNC_NORETURN void generic_exc_handle(unsigned int vector, const NANO_ESF *pEsf) { printk("***** CPU exception %d\n", vector); if ((1 << vector) & _EXC_ERROR_CODE_FAULTS) { printk("***** Exception code: 0x%x\n", pEsf->errorCode); } _NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf); }
/* * @brief Get first element from lifo and panic if NULL * * Get the first element from the specified lifo but generate a fatal error * if the element is NULL. * * @param lifo LIFO from which to receive. * * @return Pointer to first element in the list * * \NOMANUAL */ void *_nano_fiber_lifo_get_panic(struct nano_lifo *lifo) { void *element; element = nano_fiber_lifo_get(lifo); if (element == NULL) { _NanoFatalErrorHandler(_NANO_ERR_ALLOCATION_FAIL, &_default_esf); } return element; }
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr) { struct _x86_syscall_stack_frame *ssf = (struct _x86_syscall_stack_frame *)ssf_ptr; NANO_ESF oops = { .eip = ssf->eip, .cs = ssf->cs, .eflags = ssf->eflags }; if (oops.cs == USER_CODE_SEG) { oops.esp = ssf->esp; } _NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops); } #ifdef CONFIG_X86_KERNEL_OOPS FUNC_NORETURN void _do_kernel_oops(const NANO_ESF *esf) { u32_t *stack_ptr = (u32_t *)esf->esp; _NanoFatalErrorHandler(*stack_ptr, esf); }
static FUNC_NORETURN void generic_exc_handle(unsigned int vector, const NANO_ESF *pEsf) { printk("***** "); switch (vector) { case IV_GENERAL_PROTECTION: printk("General Protection Fault\n"); break; case IV_DEVICE_NOT_AVAILABLE: printk("Floating point unit not enabled\n"); break; default: printk("CPU exception %d\n", vector); break; } if ((BIT(vector) & _EXC_ERROR_CODE_FAULTS) != 0) { printk("***** Exception code: 0x%x\n", pEsf->errorCode); } _NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf); }
void _do_kernel_oops(const NANO_ESF *esf) { _NanoFatalErrorHandler(esf->r0, esf); }