/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; struct thread_info *thread = current_thread_info(); int ret; oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); if (!user_mode(regs)) bug_type = report_bug(regs->pc, regs); if (bug_type != BUG_TRAP_TYPE_NONE) str = "Oops - BUG"; ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { int ret; unsigned long flags; raw_spin_lock_irqsave(&die_lock, flags); oops_enter(); console_verbose(); bust_spinlocks(1); ret = __die(str, err, regs); if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); raw_spin_unlock_irqrestore(&die_lock, flags); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int esr, struct pt_regs *regs) { #if defined(CONFIG_HTC_DEBUG_RTB) static int enable_logk_die = 1; #endif if (fixup_exception(regs)) return; #if defined(CONFIG_HTC_DEBUG_RTB) if (enable_logk_die) { uncached_logk(LOGK_DIE, (void *)regs->pc); uncached_logk(LOGK_DIE, (void *)regs->regs[30]); uncached_logk(LOGK_DIE, (void *)addr); msm_rtb_disable(); enable_logk_die = 0; } #endif bust_spinlocks(1); pr_alert("Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, esr); bust_spinlocks(0); do_exit(SIGKILL); }
/* * The kernel tried to access some page that wasn't present. */ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int esr, struct pt_regs *regs) { #if defined(CONFIG_HTC_DEBUG_RTB) static int enable_logk_die = 1; #endif /* * Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; #if defined(CONFIG_HTC_DEBUG_RTB) if (enable_logk_die) { uncached_logk(LOGK_DIE, (void *)regs->pc); uncached_logk(LOGK_DIE, (void *)regs->regs[30]); uncached_logk(LOGK_DIE, (void *)addr); /* Disable RTB here to avoid weird recursive spinlock/printk behaviors */ msm_rtb_disable(); enable_logk_die = 0; } #endif /* * No handler, we'll have to terminate things with extreme prejudice. */ bust_spinlocks(1); pr_alert("Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, esr); bust_spinlocks(0); do_exit(SIGKILL); }
void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic) { if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP) return; spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out: */ bust_spinlocks(1); printk(KERN_EMERG "%s", str); printk(" on CPU%d, ip %08lx, registers:\n", smp_processor_id(), regs->ip); show_registers(regs); if (do_panic) panic("Non maskable interrupt"); console_silent(); spin_unlock(&nmi_print_lock); /* * If we are in kernel we are probably nested up pretty bad * and might aswell get out now while we still can: */ //if (!user_mode_vm(regs)) { // current->thread.trap_no = 2; //crash_kexec(regs); //} bust_spinlocks(0); do_exit(SIGSEGV); }
asmlinkage void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) { /* * Since current-> is always on the stack, and we always switch * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu = smp_processor_id(); u8 wdt, tmp; wdt = WDCTR & ~WDCTR_WDCNE; WDCTR = wdt; tmp = WDCTR; NMICR = NMICR_WDIF; nmi_count(cpu)++; kstat_this_cpu.irqs[NMIIRQ]++; sum = irq_stat[cpu].__irq_count; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ watchdog_alert_counter++; if (watchdog_alert_counter == 5 * watchdog_hz) { spin_lock(&watchdog_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk(KERN_ERR "NMI Watchdog detected LOCKUP on CPU%d," " pc %08lx, registers:\n", cpu, regs->pc); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&watchdog_print_lock); bust_spinlocks(0); #ifdef CONFIG_GDBSTUB if (gdbstub_busy) gdbstub_exception(regs, excep); else gdbstub_intercept(regs, excep); #endif do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; watchdog_alert_counter = 0; } WDCTR = wdt | WDCTR_WDRST; tmp = WDCTR; WDCTR = wdt | WDCTR_WDCNE; tmp = WDCTR; }
void die(struct pt_regs *regs, const char *str) { static int die_counter; oops_enter(); lgr_info_log(); debug_stop_all(); console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif #ifdef CONFIG_SMP printk("SMP "); #endif #ifdef CONFIG_DEBUG_PAGEALLOC printk("DEBUG_PAGEALLOC"); #endif printk("\n"); notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); print_modules(); show_regs(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception: panic_on_oops"); oops_exit(); do_exit(SIGSEGV); }
void __noreturn die(const char *str, struct pt_regs *regs, long err, unsigned long addr) { static int die_counter; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff, trap_name(err & 0xffff), addr, ++die_counter); print_modules(); show_regs(regs); pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm, task_pid_nr(current), task_stack_page(current) + THREAD_SIZE); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); if (kexec_should_crash(current)) crash_kexec(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); spin_unlock_irq(&die_lock); oops_exit(); do_exit(SIGSEGV); }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
NORET_TYPE void panic(const char * fmt, ...) { static char buf[1024]; va_list args; #if defined(CONFIG_ARCH_S390) unsigned long caller = (unsigned long) __builtin_return_address(0); #endif bust_spinlocks(1); va_start(args, fmt); vsprintf(buf, fmt, args); va_end(args); printk(KERN_EMERG "Kernel panic: %s\n",buf); if (in_interrupt()) printk(KERN_EMERG "In interrupt handler - not syncing\n"); else if (!current->pid) printk(KERN_EMERG "In idle task - not syncing\n"); else sys_sync(); bust_spinlocks(0); #ifdef CONFIG_SMP smp_send_stop(); #endif notifier_call_chain(&panic_notifier_list, 0, NULL); if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked.. */ printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout); mdelay(panic_timeout*1000); /* * Should we run the reboot notifier. For the moment Im * choosing not too. It might crash, be corrupt or do * more harm than good for other reasons. */ machine_restart(NULL); } #ifdef __sparc__ { extern int stop_a_enabled; /* Make sure the user can actually press L1-A */ stop_a_enabled = 1; printk("Press L1-A to return to the boot prom\n"); } #endif #if defined(CONFIG_ARCH_S390) disabled_wait(caller); #endif sti(); for(;;) { CHECK_EMERGENCY_SYNC } }
void nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ int sum, cpu = smp_processor_id(); sum = irq_stat[cpu].apic_timer_irqs; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) { if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { /* Only P6 based Pentium M need to re-unmask * the apic vector but it doesn't hurt * other P6 variant */ apic_write(APIC_LVTPC, APIC_DM_NMI); } wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); } }
static int panic_exit(struct notifier_block *self, unsigned long unused1, void *unused2) { bust_spinlocks(1); bust_spinlocks(0); uml_exitcode = 1; os_dump_core(); return 0; }
static int panic_exit(struct notifier_block *self, unsigned long unused1, void *unused2) { bust_spinlocks(1); show_regs(&(current->thread.regs)); bust_spinlocks(0); uml_exitcode = 1; machine_halt(); return(0); }
void die(const char * str, struct pt_regs * regs, long err) { console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("%s: %04lx\n", str, err & 0xffff); show_registers(regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); do_exit(SIGSEGV); }
void nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current-> is always on the stack, and we always switch * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu = smp_processor_id(); sum = apic_timer_irqs[cpu]; #if defined(CONFIG_KGDB) && defined(CONFIG_SMP) if (atomic_read(&kgdb_lock)) { /* * The machine is in kgdb, hold this cpu if already * not held. */ if (!procindebug[cpu] && atomic_read(&kgdb_lock) != (cpu + 1)) { gdb_wait(regs); } alert_counter[cpu] = 0; } else #endif if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { CHK_REMOTE_DEBUG(2,SIGSEGV,0,regs,) spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV); }
void NORET_TYPE die(const char *str, struct pt_regs *regs, long err) { static int die_counter; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); printk(KERN_EMERG); #ifdef CONFIG_PREEMPT printk(KERN_CONT "PREEMPT "); #endif #ifdef CONFIG_FRAME_POINTER printk(KERN_CONT "FRAME_POINTER "); #endif if (current_cpu_data.features & AVR32_FEATURE_OCD) { unsigned long did = ocd_read(DID); printk(KERN_CONT "chip: 0x%03lx:0x%04lx rev %lu\n", (did >> 1) & 0x7ff, (did >> 12) & 0x7fff, (did >> 28) & 0xf); } else {
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; trace_hw_branch_oops(); oops_enter(); raw_local_irq_save(flags); cpu = smp_processor_id(); if (!__raw_spin_trylock(&die_lock)) { if (cpu == die_owner) ; else __raw_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; /* notify the hw-branch tracer so it may disable tracing and add the last trace to the trace buffer - the earlier this happens, the more useful the trace. */ trace_hw_branch_oops(); oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); if (!signr) return; if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); /* * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill * the task. */ rewind_stack_do_exit(signr); }
void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); if (!signr) return; if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); gr_handle_kernel_exploit(); do_group_exit(signr); }
void oops_end(unsigned long flags) { die_owner = -1; bust_spinlocks(0); spin_unlock_irqrestore(&die_lock, flags); if (panic_on_oops) panic("Oops"); }
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) { int sum, cpu; cpu = safe_smp_processor_id(); sum = read_pda(apic_timer_irqs); if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_BAD) { alert_counter[cpu] = 0; return; } spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu); show_registers(regs); if (panic_on_timeout || panic_on_oops) panic("nmi watchdog"); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); }
void nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current-> is always on the stack, and we always switch * the stack NMI-atomically, it's safe to use smp_processor_id(). */ int sum, cpu = smp_processor_id(); sum = apic_timer_irqs[cpu]; if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip); show_registers(regs); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); }
void die(const char *str, struct pt_regs *regs, long err) { static int die_counter; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); print_modules(); show_regs(regs); printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, task_pid_nr(current), task_stack_page(current) + 1); if (!user_mode(regs) || in_interrupt()) dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + (unsigned long)task_stack_page(current)); notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); oops_exit(); if (kexec_should_crash(current)) crash_kexec(regs); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); do_exit(SIGSEGV); }
/* * The kernel tried to access some page that wasn't present. */ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int esr, struct pt_regs *regs) { /* * Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; /* * No handler, we'll have to terminate things with extreme prejudice. */ bust_spinlocks(1); pr_alert("Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, esr); bust_spinlocks(0); do_exit(SIGKILL); }
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) { if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) return; console_verbose(); bust_spinlocks(1); printk(KERN_EMERG "%s", str); printk(" on CPU%d, ip %08lx, registers:\n", smp_processor_id(), regs->tpc); show_regs(regs); dump_stack(); bust_spinlocks(0); if (do_panic || panic_on_oops) panic("Non maskable interrupt"); local_irq_enable(); do_exit(SIGBUS); }
/*======================================================================* * do_page_fault() *======================================================================* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * ARGUMENT: * regs : M32R SP reg. * error_code : See below * address : M32R MMU MDEVA reg. (Operand ACE) * : M32R BPC reg. (Instruction ACE) * * error_code : * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user-mode *======================================================================*/ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) { /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n",address); printk(" printing bpc:\n"); printk(KERN_ALERT "bpc = %08lx\n", regs->bpc); die("Oops", regs, error_code); bust_spinlocks(0); do_exit(SIGKILL); }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(LOGLEVEL_DEBUG); #endif oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); #ifdef CONFIG_HISI_BB set_exception_info(instruction_pointer(regs)); #endif ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(sysctl_printk_level); #endif }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; oops_enter(); #ifdef CCI_KLOG_CRASH_SIZE #if CCI_KLOG_CRASH_SIZE set_fault_state(FAULT_LEVEL_DIE, err, str); #endif // #if CCI_KLOG_CRASH_SIZE #endif // #ifdef CCI_KLOG_CRASH_SIZE raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); /* keep preemption/irq disabled in KE flow to prevent context switch*/ /*raw_spin_unlock_irq(&die_lock);*/ oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
void notrace nmi_watchdog_tick (struct pt_regs * regs) { /* * Since current_thread_info()-> is always on the stack, and we * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ unsigned int sum; int cpu = smp_processor_id(); sum = per_cpu(irq_stat, cpu).apic_timer_irqs; profile_tick(CPU_PROFILING, regs); if (nmi_show_regs[cpu]) { nmi_show_regs[cpu] = 0; spin_lock(&nmi_print_lock); printk("NMI show regs on CPU#%d:\n", cpu); show_regs(regs); spin_unlock(&nmi_print_lock); } if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] && !(alert_counter[cpu] % (5*nmi_hz))) { int i; bust_spinlocks(1); spin_lock(&nmi_print_lock); printk("NMI watchdog detected lockup on CPU#%d (%d/%d)\n", cpu, alert_counter[cpu], 5*nmi_hz); show_regs(regs); spin_unlock(&nmi_print_lock); for_each_online_cpu(i) if (i != cpu) nmi_show_regs[i] = 1; for_each_online_cpu(i) while (nmi_show_regs[i] == 1) barrier(); die_nmi(regs, "NMI Watchdog detected LOCKUP"); }