static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; switch (cmd) { case DIE_NMI: if (atomic_read(&kgdb_active) != -1) { /* KGDB CPU roundup */ kgdb_nmicallback(raw_smp_processor_id(), regs); was_in_debug_nmi[raw_smp_processor_id()] = 1; touch_nmi_watchdog(); return NOTIFY_STOP; } return NOTIFY_DONE; case DIE_NMI_IPI: /* Just ignore, we will handle the roundup on DIE_NMI. */ return NOTIFY_DONE; case DIE_NMIUNKNOWN: if (was_in_debug_nmi[raw_smp_processor_id()]) { was_in_debug_nmi[raw_smp_processor_id()] = 0; return NOTIFY_STOP; } return NOTIFY_DONE; case DIE_NMIWATCHDOG: if (atomic_read(&kgdb_active) != -1) { /* KGDB CPU roundup: */ kgdb_nmicallback(raw_smp_processor_id(), regs); return NOTIFY_STOP; } /* Enter debugger: */ break; case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) == raw_smp_processor_id()) { if (user_mode(regs)) return single_step_cont(regs, args); break; } else if (test_thread_flag(TIF_SINGLESTEP)) /* This means a user thread is single stepping * a system call which should be ignored */ return NOTIFY_DONE; /* fall through */ default: if (user_mode(regs)) return NOTIFY_DONE; } if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs)) return NOTIFY_DONE; /* Must touch watchdog before return to normal operation */ touch_nmi_watchdog(); return NOTIFY_STOP; }
/* * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, * then try to fall into the debugger */ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; /* Userpace events, ignore. */ if (user_mode(regs)) return NOTIFY_DONE; if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs)) return NOTIFY_DONE; if (atomic_read(&kgdb_setting_breakpoint)) if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) regs->cp0_epc += 4; /* In SMP mode, __flush_cache_all does IPI */ local_irq_enable(); __flush_cache_all(); return NOTIFY_STOP; }
static int __kgdb_notify(struct die_args *args, unsigned long cmd) { /* */ if (atomic_read(&kgdb_active) != -1) { kgdb_nmicallback(smp_processor_id(), args->regs); return NOTIFY_STOP; } if (user_mode(args->regs)) return NOTIFY_DONE; if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err, args->regs)) return NOTIFY_DONE; return NOTIFY_STOP; }
static void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); }
static void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), NULL); }
void kgdb_passive_cpu_callback(void *info) { kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); }
void nlm_kgdb_call_nmi_hook(void) { kgdb_nmicallback(raw_smp_processor_id(), NULL); }
static int kgdb_call_nmi_hook(struct pt_regs *regs) { kgdb_nmicallback(raw_smp_processor_id(), regs); return 0; }
void debugger_nmi_interrupt(struct pt_regs *regs, enum exception_code code) { kgdb_nmicallback(arch_smp_processor_id(), regs); debugger_local_cache_flushinv(); }