static int kgdb_reenter_check(struct kgdb_state *ks) { unsigned long addr; if (atomic_read(&kgdb_active) != raw_smp_processor_id()) return 0; /* Panic on recursive debugger calls: */ exception_level++; addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); dbg_deactivate_sw_breakpoints(); /* * If the break point removed ok at the place exception * occurred, try to recover and print a warning to the end * user because the user planted a breakpoint in a place that * KGDB needs in order to function. */ if (dbg_remove_sw_break(addr) == 0) { exception_level = 0; kgdb_skipexception(ks->ex_vector, ks->linux_regs); dbg_activate_sw_breakpoints(); printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", addr); WARN_ON_ONCE(1); return 1; } dbg_remove_all_break(); kgdb_skipexception(ks->ex_vector, ks->linux_regs); if (exception_level > 1) { dump_stack(); panic("Recursive entry to debugger"); } printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); #ifdef CONFIG_KGDB_KDB /* Allow kdb to debug itself one level */ return 0; #endif dump_stack(); panic("Recursive entry to debugger"); return 1; }
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) { unsigned long flags; int sstep_tries = 100; int error; int i, cpu; int trace_on = 0; acquirelock: /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. */ local_irq_save(flags); cpu = ks->cpu; kgdb_info[cpu].debuggerinfo = regs; kgdb_info[cpu].task = current; kgdb_info[cpu].ret_state = 0; kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; /* * Make sure the above info reaches the primary CPU before * our cpu_in_kgdb[] flag setting does: */ atomic_inc(&cpu_in_kgdb[cpu]); if (exception_level == 1) goto cpu_master_loop; /* * CPU will loop if it is a slave or request to become a kgdb * master cpu and acquire the kgdb_active lock: */ while (1) { cpu_loop: if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) { kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; goto cpu_master_loop; } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) break; } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { if (!atomic_read(&passive_cpu_wait[cpu])) goto return_normal; } else { return_normal: /* Return to normal operation by executing any * hw breakpoint fixup. */ if (arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); atomic_dec(&cpu_in_kgdb[cpu]); touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); return 0; } cpu_relax(); } /* * For single stepping, try to only enter on the processor * that was single stepping. To gaurd against a deadlock, the * kernel will only try for the value of sstep_tries before * giving up and continuing on. */ if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); goto acquirelock; } if (!kgdb_io_ready(1)) { kgdb_info[cpu].ret_state = 1; goto kgdb_restore; /* No I/O connection, resume the system */ } /* * Don't enter if we have hit a removed breakpoint. */ if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) goto kgdb_restore; /* Call the I/O driver's pre_exception routine */ if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); kgdb_disable_hw_debug(ks->linux_regs); /* * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ if (!kgdb_single_step) { for (i = 0; i < NR_CPUS; i++) atomic_inc(&passive_cpu_wait[i]); } #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ if ((!kgdb_single_step) && kgdb_do_roundup) kgdb_roundup_cpus(flags); #endif /* * Wait for the other CPUs to be notified and be waiting for us: */ for_each_online_cpu(i) { while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i])) cpu_relax(); } /* * At this point the primary processor is completely * in the debugger and all secondary CPUs are quiescent */ dbg_deactivate_sw_breakpoints(); kgdb_single_step = 0; kgdb_contthread = current; exception_level = 0; trace_on = tracing_is_on(); if (trace_on) tracing_off(); while (1) { cpu_master_loop: if (dbg_kdb_mode) { kgdb_connected = 1; error = kdb_stub(ks); kgdb_connected = 0; } else { error = gdb_serial_stub(ks); } if (error == DBG_PASS_EVENT) { dbg_kdb_mode = !dbg_kdb_mode; } else if (error == DBG_SWITCH_CPU_EVENT) { dbg_cpu_switch(cpu, dbg_switch_cpu); goto cpu_loop; } else { kgdb_info[cpu].ret_state = error; break; } } /* Call the I/O driver's post_exception routine */ if (dbg_io_ops->post_exception) dbg_io_ops->post_exception(); atomic_dec(&cpu_in_kgdb[ks->cpu]); if (!kgdb_single_step) { for (i = NR_CPUS-1; i >= 0; i--) atomic_dec(&passive_cpu_wait[i]); /* * Wait till all the CPUs have quit from the debugger, * but allow a CPU that hit an exception and is * waiting to become the master to remain in the debug * core. */ for_each_online_cpu(i) { while (kgdb_do_roundup && atomic_read(&cpu_in_kgdb[i]) && !(kgdb_info[i].exception_state & DCPU_WANT_MASTER)) cpu_relax(); } } kgdb_restore: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); if (kgdb_info[sstep_cpu].task) kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; else kgdb_sstep_pid = 0; } if (trace_on) tracing_on(); /* Free kgdb_active */ atomic_set(&kgdb_active, -1); touch_softlockup_watchdog_sync(); clocksource_touch_watchdog(); local_irq_restore(flags); return kgdb_info[cpu].ret_state; }
int kdb_stub(struct kgdb_state *ks) { int error = 0; kdb_bp_t *bp; unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); kdb_reason_t reason = KDB_REASON_OOPS; kdb_dbtrap_t db_result = KDB_DB_NOBPT; int i; if (KDB_STATE(REENTRY)) { reason = KDB_REASON_SWITCH; KDB_STATE_CLEAR(REENTRY); addr = instruction_pointer(ks->linux_regs); } ks->pass_exception = 0; if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; if (addr != instruction_pointer(ks->linux_regs)) kgdb_arch_set_pc(ks->linux_regs, addr); break; } } if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) { for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if (bp->bp_free) continue; if (bp->bp_addr == addr) { bp->bp_delay = 1; bp->bp_delayed = 1; /* * SSBPT is set when the kernel debugger must single step a * task in order to re-establish an instruction breakpoint * which uses the instruction replacement mechanism. It is * cleared by any action that removes the need to single-step * the breakpoint. */ reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; KDB_STATE_SET(SSBPT); break; } } } if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 && ks->signo == SIGTRAP) { reason = KDB_REASON_SSTEP; db_result = KDB_DB_BPT; } /* Set initial kdb state variables */ KDB_STATE_CLEAR(KGDB_TRANS); kdb_initial_cpu = ks->cpu; kdb_current_task = kgdb_info[ks->cpu].task; kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; /* Remove any breakpoints as needed by kdb and clear single step */ kdb_bp_remove(); KDB_STATE_CLEAR(DOING_SS); KDB_STATE_CLEAR(DOING_SSB); KDB_STATE_SET(PAGER); /* zero out any offline cpu data */ for_each_present_cpu(i) { if (!cpu_online(i)) { kgdb_info[i].debuggerinfo = NULL; kgdb_info[i].task = NULL; } } if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) { ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } kdb_initial_cpu = ks->cpu; if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); } else { /* Start kdb main loop */ error = kdb_main_loop(KDB_REASON_ENTER, reason, ks->err_code, db_result, ks->linux_regs); } /* * Upon exit from the kdb main loop setup break points and restart * the system based on the requested continue state */ kdb_initial_cpu = -1; kdb_current_task = NULL; kdb_current_regs = NULL; KDB_STATE_CLEAR(PAGER); kdbnearsym_cleanup(); if (error == KDB_CMD_KGDB) { if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) { /* * This inteface glue which allows kdb to transition in into * the gdb stub. In order to do this the '?' or '' gdb serial * packet response is processed here. And then control is * passed to the gdbstub. */ if (KDB_STATE(DOING_KGDB)) gdbstub_state(ks, "?"); else gdbstub_state(ks, ""); KDB_STATE_CLEAR(DOING_KGDB); KDB_STATE_CLEAR(DOING_KGDB2); } return DBG_PASS_EVENT; } kdb_bp_install(ks->linux_regs); dbg_activate_sw_breakpoints(); /* Set the exit state to a single step or a continue */ if (KDB_STATE(DOING_SS)) gdbstub_state(ks, "s"); else gdbstub_state(ks, "c"); KDB_FLAG_CLEAR(CATASTROPHIC); /* Invoke arch specific exception handling prior to system resume */ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e"); if (ks->pass_exception) kgdb_info[ks->cpu].ret_state = 1; if (error == KDB_CMD_CPU) { KDB_STATE_SET(REENTRY); /* * Force clear the single step bit because kdb emulates this * differently vs the gdbstub */ kgdb_single_step = 0; dbg_deactivate_sw_breakpoints(); return DBG_SWITCH_CPU_EVENT; } return kgdb_info[ks->cpu].ret_state; }
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, int exception_state) { unsigned long flags; int sstep_tries = 100; int error; int cpu; int trace_on = 0; int online_cpus = num_online_cpus(); #ifdef CONFIG_KGDB_KDB if (force_panic) /* Force panic in previous KDB, so skip this time */ return NOTIFY_DONE; #endif kgdb_info[ks->cpu].enter_kgdb++; kgdb_info[ks->cpu].exception_state |= exception_state; if (exception_state == DCPU_WANT_MASTER) atomic_inc(&masters_in_kgdb); else atomic_inc(&slaves_in_kgdb); if (arch_kgdb_ops.disable_hw_break) arch_kgdb_ops.disable_hw_break(regs); acquirelock: /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. */ local_irq_save(flags); cpu = ks->cpu; kgdb_info[cpu].debuggerinfo = regs; kgdb_info[cpu].task = current; kgdb_info[cpu].ret_state = 0; kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT; /* Make sure the above info reaches the primary CPU */ smp_mb(); if (exception_level == 1) { if (raw_spin_trylock(&dbg_master_lock)) atomic_xchg(&kgdb_active, cpu); goto cpu_master_loop; } /* * CPU will loop if it is a slave or request to become a kgdb * master cpu and acquire the kgdb_active lock: */ while (1) { cpu_loop: if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) { kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER; goto cpu_master_loop; } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { if (raw_spin_trylock(&dbg_master_lock)) { atomic_xchg(&kgdb_active, cpu); break; } } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { if (!raw_spin_is_locked(&dbg_slave_lock)) goto return_normal; } else { return_normal: /* Return to normal operation by executing any * hw breakpoint fixup. */ if (arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); kgdb_info[cpu].exception_state &= ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); kgdb_info[cpu].enter_kgdb--; smp_mb__before_atomic_dec(); atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); return 0; } cpu_relax(); } /* * For single stepping, try to only enter on the processor * that was single stepping. To guard against a deadlock, the * kernel will only try for the value of sstep_tries before * giving up and continuing on. */ if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && (kgdb_info[cpu].task && kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { atomic_set(&kgdb_active, -1); raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); goto acquirelock; } if (!kgdb_io_ready(1)) { kgdb_info[cpu].ret_state = 1; goto kgdb_restore; /* No I/O connection, resume the system */ } /* * Don't enter if we have hit a removed breakpoint. */ if (kgdb_skipexception(ks->ex_vector, ks->linux_regs)) goto kgdb_restore; /* Call the I/O driver's pre_exception routine */ if (dbg_io_ops->pre_exception) dbg_io_ops->pre_exception(); /* * Get the passive CPU lock which will hold all the non-primary * CPU in a spin state while the debugger is active */ if (!kgdb_single_step) raw_spin_lock(&dbg_slave_lock); #ifdef CONFIG_SMP /* Signal the other CPUs to enter kgdb_wait() */ if ((!kgdb_single_step) && kgdb_do_roundup) kgdb_roundup_cpus(flags); #endif /* * Wait for the other CPUs to be notified and be waiting for us: */ while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != online_cpus) cpu_relax(); /* * At this point the primary processor is completely * in the debugger and all secondary CPUs are quiescent */ dbg_deactivate_sw_breakpoints(); kgdb_single_step = 0; kgdb_contthread = current; exception_level = 0; trace_on = tracing_is_on(); if (trace_on) tracing_off(); while (1) { cpu_master_loop: if (dbg_kdb_mode) { kgdb_connected = 1; error = kdb_stub(ks); if (error == -1) continue; kgdb_connected = 0; } else { error = gdb_serial_stub(ks); } if (error == DBG_PASS_EVENT) { dbg_kdb_mode = !dbg_kdb_mode; } else if (error == DBG_SWITCH_CPU_EVENT) { kgdb_info[dbg_switch_cpu].exception_state |= DCPU_NEXT_MASTER; goto cpu_loop; } else { kgdb_info[cpu].ret_state = error; break; } } /* Call the I/O driver's post_exception routine */ if (dbg_io_ops->post_exception) dbg_io_ops->post_exception(); if (!kgdb_single_step) { raw_spin_unlock(&dbg_slave_lock); /* Wait till all the CPUs have quit from the debugger. */ while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb)) cpu_relax(); } kgdb_restore: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); if (kgdb_info[sstep_cpu].task) kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; else kgdb_sstep_pid = 0; } if (arch_kgdb_ops.correct_hw_break) arch_kgdb_ops.correct_hw_break(); if (trace_on) tracing_on(); kgdb_info[cpu].exception_state &= ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); kgdb_info[cpu].enter_kgdb--; smp_mb__before_atomic_dec(); atomic_dec(&masters_in_kgdb); /* Free kgdb_active */ atomic_set(&kgdb_active, -1); raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); #ifdef CONFIG_KGDB_KDB /* If no user input, force trigger kernel panic here */ if (force_panic) { printk("KDB : Force Kernal Panic ! \n"); do { *(volatile int *)0 = 0; } while (1); } #endif return kgdb_info[cpu].ret_state; }