void panic_context(unsigned int reason, void *ctx, const char *str, ...) { va_list listp; spl_t s; /* panic_caller is initialized to 0. If set, don't change it */ if ( ! panic_caller ) panic_caller = (unsigned long)(char *)__builtin_return_address(0); s = panic_prologue(str); kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller); if (str) { va_start(listp, str); _doprnt(str, &listp, consdebug_putc, 0); va_end(listp); } kdb_printf("\n"); /* * Release panicwait indicator so that other cpus may call Debugger(). */ panicwait = 0; DebuggerWithContext(reason, ctx, "panic"); panic_epilogue(s); }
static void kdb_printbp(kdb_bp_t *bp, int i) { if (bp->bp_forcehw) { kdb_printf("Forced "); } if (!bp->bp_template.bph_free) { kdb_printf("%s ", kdba_bptype(&bp->bp_template)); } else { kdb_printf("Instruction(i) "); } kdb_printf("BP #%d at ", i); kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT); if (bp->bp_enabled) { kdba_printbp(bp); if (bp->bp_global) kdb_printf(" globally"); else kdb_printf(" on cpu %d", bp->bp_cpu); if (bp->bp_adjust) kdb_printf(" adjust %d", bp->bp_adjust); } else { kdb_printf("\n is disabled"); } kdb_printf("\taddr at %016lx, hardtype=%d, forcehw=%d, installed=%d, hard=%p\n", bp->bp_addr, bp->bp_hardtype, bp->bp_forcehw, bp->bp_installed, bp->bp_hard); kdb_printf("\n"); }
int kdba_removebp(kdb_bp_t *bp) { /* * For hardware breakpoints, remove it from the active register, * for software breakpoints, restore the instruction stream. */ if (KDB_DEBUG(BP)) { kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed); } if (bp->bp_installed) { if (bp->bp_hardtype) { if (KDB_DEBUG(BP)) { kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt "\n", bp->bp_hard->bph_reg, bp->bp_addr); } kdba_removedbreg(bp); } else { if (KDB_DEBUG(BP)) kdb_printf("kdb: restoring instruction 0x%x at " kdb_bfd_vma_fmt "\n", bp->bp_inst, bp->bp_addr); if (kdb_putword(bp->bp_addr, bp->bp_inst, 1)) return(1); } bp->bp_installed = 0; } return(0); }
__private_extern__ void panic_display_zprint() { if(panic_include_zprint == TRUE) { unsigned int i; struct zone zone_copy; if(first_zone!=NULL) { if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { for (i = 0; i < num_zones; i++) { if(zone_copy.cur_size > (1024*1024)) { kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size); } if(zone_copy.next_zone == NULL) { break; } if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) { break; } } } } kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total)); #if defined(__i386__) || defined (__x86_64__) kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count)); #endif kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total); } }
/* * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator. * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c */ __private_extern__ void panic_display_ztrace(void) { if(panic_include_ztrace == TRUE) { unsigned int i = 0; boolean_t keepsyms = FALSE; PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms)); struct ztrace top_ztrace_copy; /* Make sure not to trip another panic if there's something wrong with memory */ if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); /* Print the backtrace addresses */ for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) { kdb_printf("%p ", top_ztrace_copy.zt_stack[i]); if (keepsyms) { panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); } kdb_printf("\n"); } /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); } else { kdb_printf("\nCan't access top_ztrace...\n"); } kdb_printf("\n"); } }
/** * panic_display_time */ void panic_display_time(void) { clock_sec_t secs; clock_usec_t usecs; /* * Header. */ kdb_printf("Epoch Time: sec usec\n"); /* * Boot. */ clock_get_boottime_nanotime(&secs, &usecs); kdb_printf(" Boot : 0x%08x 0x%08x\n", secs, usecs); kdb_printf(" Sleep : 0x%08x 0x%08x\n", gIOLastSleepTime.tv_sec, gIOLastSleepTime.tv_usec); kdb_printf(" Wake : 0x%08x 0x%08x\n", gIOLastWakeTime.tv_sec, gIOLastWakeTime.tv_usec); /* * Uptime. */ clock_get_calendar_microtime(&secs, &secs); kdb_printf(" Calendar: 0x%08x 0x%08x\n\n", secs, usecs); return; }
void panic(const char *str, ...) { va_list listp; spl_t s; boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; /* panic_caller is initialized to 0. If set, don't change it */ if ( ! panic_caller ) panic_caller = (unsigned long)(char *)__builtin_return_address(0); s = panic_prologue(str); /* Never hide pointers from panic logs. */ doprnt_hide_pointers = FALSE; kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller); if (str) { va_start(listp, str); _doprnt(str, &listp, consdebug_putc, 0); va_end(listp); } kdb_printf("\n"); /* * Release panicwait indicator so that other cpus may call Debugger(). */ panicwait = 0; Debugger("panic"); doprnt_hide_pointers = old_doprnt_hide_pointers; panic_epilogue(s); }
static void panic_display_kernel_aslr(void) { #if defined(__x86_64__) || (__arm__) if (0) { kdb_printf("Kernel slide: 0x%016lx\n", vm_kernel_slide); kdb_printf("Kernel text base: %p\n", (void *) vm_kernel_stext); } #endif }
static int kdb_hello_cmd(int argc, const char **argv) { if (argc > 1) return KDB_ARGCOUNT; if (argc) kdb_printf("Hello %s.\n", argv[1]); else kdb_printf("Hello world!\n"); return 0; }
static void mca_dump_bank(mca_state_t *state, int i) { mca_mci_bank_t *bank; ia32_mci_status_t status; bank = &state->mca_error_bank[i]; status = bank->mca_mci_status; kdb_printf( " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n", i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in")); if (!status.bits.val) return; kdb_printf( " MCA error code: 0x%04x\n", status.bits.mca_error); kdb_printf( " Model specific error code: 0x%04x\n", status.bits.model_specific_error); if (!mca_threshold_status_present) { kdb_printf( " Other information: 0x%08x\n", status.bits.other_information); } else { int threshold = status.bits_tes_p.threshold; kdb_printf( " Other information: 0x%08x\n" " Threshold-based status: %s\n", status.bits_tes_p.other_information, (status.bits_tes_p.uc == 0) ? mca_threshold_status[threshold] : "Undefined"); } if (mca_threshold_status_present && mca_sw_error_recovery_present) { kdb_printf( " Software Error Recovery:\n%s%s", IF(status.bits_tes_p.ar, " Recovery action reqd\n"), IF(status.bits_tes_p.s, " Signaling UCR error\n")); } kdb_printf( " Status bits:\n%s%s%s%s%s%s", IF(status.bits.pcc, " Processor context corrupt\n"), IF(status.bits.addrv, " ADDR register valid\n"), IF(status.bits.miscv, " MISC register valid\n"), IF(status.bits.en, " Error enabled\n"), IF(status.bits.uc, " Uncorrected error\n"), IF(status.bits.over, " Error overflow\n")); if (status.bits.addrv) kdb_printf( " IA32_MC%d_ADDR(0x%x): 0x%016qx\n", i, IA32_MCi_ADDR(i), bank->mca_mci_addr); if (status.bits.miscv) kdb_printf( " IA32_MC%d_MISC(0x%x): 0x%016qx\n", i, IA32_MCi_MISC(i), bank->mca_mci_misc); }
static void mca_report_cpu_info(void) { i386_cpu_info_t *infop = cpuid_info(); kdb_printf(" family: %d model: %d stepping: %d microcode: %d\n", infop->cpuid_family, infop->cpuid_model, infop->cpuid_stepping, infop->cpuid_microcode_version); kdb_printf(" %s\n", infop->cpuid_brand_string); }
void kdba_printbp(kdb_bp_t *bp) { kdb_printf("\n is enabled"); if (bp->bp_hardtype) { kdba_printbpreg(bp->bp_hard); if (bp->bp_hard->bph_mode != 0) { kdb_printf(" for %d bytes", bp->bp_hard->bph_length+1); } } }
static void panic_display_kernel_uuid(void) { #ifndef __arm__ char tmp_kernel_uuid[sizeof(kernel_uuid)]; if (ml_nofault_copy((vm_offset_t) &kernel_uuid, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid)) != sizeof(kernel_uuid)) return; if (tmp_kernel_uuid[0] != '\0') kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid); #else kdb_printf("Kernel UUID: %s\n", kernel_uuid); #endif }
int kdb_help(int argc, const char **argv, const char **envp, struct pt_regs *regs) { kdbtab_t *kt; kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description"); kdb_printf("----------------------------------------------------------\n"); for(kt=kdb_commands; kt->cmd_name; kt++) { kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name, kt->cmd_usage, kt->cmd_help); } return 0; }
static void kdb_cmderror(int diag) { if (diag >= 0) { kdb_printf("no error detected\n"); return; } diag = -diag; if (diag >= __nkdb_err) { kdb_printf("Illegal diag %d\n", -diag); return; } kdb_printf("diag: %d: %s\n", diag, kdb_messages[diag]); }
static void panic_display_model_name(void) { #ifndef __arm__ char tmp_model_name[sizeof(model_name)]; if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) return; tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; if (tmp_model_name[0] != 0) kdb_printf("System model name: %s\n", tmp_model_name); #else kdb_printf("System model name: %s\n", model_name); #endif }
static int kdb_ss(int argc, const char **argv) { int ssb = 0; struct pt_regs *regs = get_irq_regs(); ssb = (strcmp(argv[0], "ssb") == 0); if (argc != 0) return KDB_ARGCOUNT; if (!regs) { kdb_printf("%s: pt_regs not available\n", __FUNCTION__); return KDB_BADREG; } /* * Set trace flag and go. */ KDB_STATE_SET(DOING_SS); if (ssb) KDB_STATE_SET(DOING_SSB); kdba_setsinglestep(regs); /* Enable single step */ if (ssb) return KDB_CMD_SSB; return KDB_CMD_SS; }
int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) { if (KDB_DEBUG(AR)) kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, symtab); memset(symtab, 0, sizeof(*symtab)); if ((symtab->sym_start = kallsyms_lookup_name(symname))) { if (KDB_DEBUG(AR)) kdb_printf("kdbgetsymval: returns 1, symtab->sym_start=0x%lx\n", symtab->sym_start); return 1; } if (KDB_DEBUG(AR)) kdb_printf("kdbgetsymval: returns 0\n"); return 0; }
int kdb_ps(int argc, const char **argv, const char **envp, struct pt_regs *regs) { struct task_struct *p; kdb_printf("Task Addr Pid Parent cpu lcpu Tss Command\n"); for_each_task(p) { kdb_printf("0x%8.8x %10.10d %10.10d %4.4d %4.4d 0x%8.8x %s\n", p, p->pid, p->p_pptr->pid, p->processor, p->last_processor, &p->tss, p->comm); } return 0; }
static int kdbm_vm(int argc, const char **argv) { unsigned long addr; long offset = 0; int nextarg; int diag; int verbose_flg = 0; if (argc == 2) { if (strcmp(argv[1], "-v") != 0) { return KDB_ARGCOUNT; } verbose_flg = 1; } else if (argc != 1) { return KDB_ARGCOUNT; } if (strcmp(argv[0], "vmp") == 0) { struct task_struct *g, *tp; struct vm_area_struct *vp; pid_t pid; if ((diag = kdbgetularg(argv[argc], (unsigned long *) &pid))) return diag; kdb_do_each_thread(g, tp) { if (tp->pid == pid) { if (tp->mm != NULL) { if (verbose_flg) kdb_printf ("vm_area_struct "); kdb_printf ("vm_start vm_end vm_flags\n"); vp = tp->mm->mmap; while (vp != NULL) { kdbm_print_vmp(vp, verbose_flg); vp = vp->vm_next; } } return 0; } } kdb_while_each_thread(g, tp); kdb_printf("No process with pid == %d found\n", pid); } else {
__private_extern__ void panic_display_ecc_errors() { uint32_t count = ecc_log_get_correction_count(); if (count > 0) { kdb_printf("ECC Corrections:%u\n", count); } }
kdb_dbtrap_t kdba_bp_trap(struct pt_regs *regs, int error_unused) { int i; kdb_dbtrap_t rv; kdb_bp_t *bp; if (KDB_NULL_REGS(regs)) return KDB_DB_NOBPT; /* * Determine which breakpoint was encountered. */ if (KDB_DEBUG(BP)) kdb_printf("kdba_bp_trap: rip=0x%lx (not adjusted) " "eflags=0x%lx ef=0x%p rsp=0x%lx\n", regs->rip, regs->eflags, regs, regs->rsp); rv = KDB_DB_NOBPT; /* Cause kdb() to return */ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) { if (bp->bp_free) continue; if (!bp->bp_global && bp->bp_cpu != smp_processor_id()) continue; if ((void *)bp->bp_addr == (void *)(regs->rip - bp->bp_adjust)) { /* Hit this breakpoint. */ regs->rip -= bp->bp_adjust; kdb_printf("Instruction(i) breakpoint #%d at 0x%lx (adjusted)\n", i, regs->rip); kdb_id1(regs->rip); rv = KDB_DB_BPT; bp->bp_delay = 1; /* SSBPT is set when the kernel debugger must single * step a task in order to re-establish an instruction * breakpoint which uses the instruction replacement * mechanism. It is cleared by any action that removes * the need to single-step the breakpoint. */ KDB_STATE_SET(SSBPT); break; } } return rv; }
static int kgdb_panic_event(struct notifier_block *self, unsigned long val, void *data) { if (dbg_kdb_mode) kdb_printf("PANIC: %s\n", (char *)data); kgdb_breakpoint(); return NOTIFY_DONE; }
static void panic_display_kernel_uuid(void) { char tmp_kernel_uuid[sizeof(kernel_uuid_string)]; if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) return; if (tmp_kernel_uuid[0] != '\0') kdb_printf("Kernel UUID: %s\n", tmp_kernel_uuid); }
int kdb_cpu(int argc, const char **argv, const char **envp, struct pt_regs *regs) { unsigned long cpunum; int diag; if (argc == 0) { int i; kdb_printf("Currently on cpu %d\n", smp_processor_id()); kdb_printf("Available cpus: "); for (i=0; i<NR_CPUS; i++) { if (test_bit(i, &cpu_online_map)) { if (i) kdb_printf(", "); kdb_printf("%d", i); } } kdb_printf("\n"); return 0; } if (argc != 1) return KDB_ARGCOUNT; diag = kdbgetularg(argv[1], &cpunum); if (diag) return diag; /* * Validate cpunum */ if ((cpunum > NR_CPUS) || !test_bit(cpunum, &cpu_online_map)) return KDB_BADCPUNUM; kdb_new_cpu = cpunum; /* * Switch to other cpu */ return KDB_CPUSWITCH; }
static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) { int ret; /* * Install the breakpoint, if it is not already installed. */ if (KDB_DEBUG(BP)) kdb_printf("%s: bp_installed %d\n", __func__, bp->bp_installed); if (!KDB_STATE(SSBPT)) bp->bp_delay = 0; if (bp->bp_installed) return 1; if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) { if (KDB_DEBUG(BP)) kdb_printf("%s: delayed bp\n", __func__); kdb_handle_bp(regs, bp); return 0; } if (!bp->bp_type) ret = dbg_set_sw_break(bp->bp_addr); else ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr, bp->bph_length, bp->bp_type); if (ret == 0) { bp->bp_installed = 1; } else { kdb_printf("%s: failed to set breakpoint at 0x%lx\n", __func__, bp->bp_addr); #ifdef CONFIG_DEBUG_RODATA if (!bp->bp_type) { kdb_printf("Software breakpoints are unavailable.\n" " Change the kernel CONFIG_DEBUG_RODATA=n\n" " OR use hw breaks: help bph\n"); } #endif return 1; } return 0; }
static int kdbm_print_vmp(struct vm_area_struct *vp, int verbose_flg) { struct __vmflags *tp; if (verbose_flg) { kdb_printf("0x%lx: ", (unsigned long) vp); } kdb_printf("0x%p 0x%p ", (void *) vp->vm_start, (void *) vp->vm_end); for (tp = vmflags; tp->mask; tp++) { if (vp->vm_flags & tp->mask) { kdb_printf(" %s", tp->name); } } kdb_printf("\n"); return 0; }
void panic_display_model_name(void) { char tmp_model_name[sizeof(model_name)]; if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) return; tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; if (tmp_model_name[0] != 0) kdb_printf("System model name: %s\n", tmp_model_name); }
static void mca_dump_error_banks(mca_state_t *state) { unsigned int i; kdb_printf("MCA error-reporting registers:\n"); for (i = 0; i < mca_error_bank_count; i++ ) { if (i == 8) { /* * Fatal Memory Error */ /* Dump MC8 for local package */ kdb_printf(" Package %d logged:\n", x86_package()->ppkg_num); mca_dump_bank_mc8(state, 8); /* If there's other packages, report their MC8s */ x86_pkg_t *pkg; uint64_t deadline; for (pkg = x86_pkgs; pkg != NULL; pkg = pkg->next) { if (pkg == x86_package()) continue; deadline = mach_absolute_time() + LockTimeOut; while (pkg->mca_state == NULL && mach_absolute_time() < deadline) cpu_pause(); if (pkg->mca_state) { kdb_printf(" Package %d logged:\n", pkg->ppkg_num); mca_dump_bank_mc8(pkg->mca_state, 8); } else { kdb_printf(" Package %d timed out!\n", pkg->ppkg_num); } } continue; } mca_dump_bank(state, i); } }
__private_extern__ void panic_display_system_configuration(void) { panic_display_process_name(); if (OSCompareAndSwap(0, 1, &config_displayed)) { char buf[256]; if (strlcpy(buf, PE_boot_args(), sizeof(buf))) kdb_printf("Boot args: %s\n", buf); kdb_printf("\nMac OS version:\n%s\n", (osversion[0] != 0) ? osversion : "Not yet set"); kdb_printf("\nKernel version:\n%s\n",version); panic_display_kernel_uuid(); panic_display_pal_info(); panic_display_model_name(); panic_display_uptime(); panic_display_zprint(); #if CONFIG_ZLEAKS panic_display_ztrace(); #endif /* CONFIG_ZLEAKS */ kext_dump_panic_lists(&kdb_log); } }