/* * kgdb_handle_exception() - main entry point from a kernel exception * * Locking hierarchy: * interface locks, if any (begin_session) * kgdb lock (kgdb_active) */ int kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; int ret = 0; if (arch_kgdb_ops.enable_nmi) arch_kgdb_ops.enable_nmi(0); if (unlikely(signo != SIGTRAP && !break_on_exception)) return 1; ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; ks->signo = signo; ks->err_code = ecode; ks->kgdb_usethreadid = 0; ks->linux_regs = regs; if (kgdb_reenter_check(ks)) goto out; /* Ouch, double exception ! */ if (kgdb_info[ks->cpu].enter_kgdb != 0) goto out; ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); out: if (arch_kgdb_ops.enable_nmi) arch_kgdb_ops.enable_nmi(1); return ret; }
/* * kgdb_handle_exception() - main entry point from a kernel exception * * Locking hierarchy: * interface locks, if any (begin_session) * kgdb lock (kgdb_active) */ int kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; ks->signo = signo; ks->err_code = ecode; ks->kgdb_usethreadid = 0; ks->linux_regs = regs; if (kgdb_reenter_check(ks)) return 0; /* Ouch, double exception ! */ if (kgdb_info[ks->cpu].enter_kgdb != 0) return 0; return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); }
/* * kgdb_handle_exception() - main entry point from a kernel exception * * Locking hierarchy: * interface locks, if any (begin_session) * kgdb lock (kgdb_active) */ int kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; int ret; ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; ks->signo = signo; ks->err_code = ecode; ks->kgdb_usethreadid = 0; ks->linux_regs = regs; if (kgdb_reenter_check(ks)) return 0; /* Ouch, double exception ! */ kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; ret = kgdb_cpu_enter(ks, regs); kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); return ret; }
/* * kgdb_handle_exception() - main entry point from a kernel exception * * Locking hierarchy: * interface locks, if any (begin_session) * kgdb lock (kgdb_active) */ int kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) { struct kgdb_state kgdb_var; struct kgdb_state *ks = &kgdb_var; int ret = 0; if (arch_kgdb_ops.enable_nmi) arch_kgdb_ops.enable_nmi(0); /* * Avoid entering the debugger if we were triggered due to an oops * but panic_timeout indicates the system should automatically * reboot on panic. We don't want to get stuck waiting for input * on such systems, especially if its "just" an oops. */ if (signo != SIGTRAP && panic_timeout) return 1; memset(ks, 0, sizeof(struct kgdb_state)); ks->cpu = raw_smp_processor_id(); ks->ex_vector = evector; ks->signo = signo; ks->err_code = ecode; ks->linux_regs = regs; if (kgdb_reenter_check(ks)) goto out; /* Ouch, double exception ! */ if (kgdb_info[ks->cpu].enter_kgdb != 0) goto out; ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); out: if (arch_kgdb_ops.enable_nmi) arch_kgdb_ops.enable_nmi(1); return ret; }