static void save_context_stack(struct stack_trace *trace, struct task_struct *tsk, struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(tsk); if (stack_page && sp >= stack_page && sp <= stack_page + THREAD_SIZE - 32) save_raw_context_stack(trace, sp); return; } do { if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = pc; if (trace->nr_entries >= trace->max_entries) break; pc = unwind_stack(tsk, &sp, pc, &ra); } while (pc); #else save_raw_context_stack(trace, sp); #endif }
/* * get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; #ifdef CONFIG_KALLSYMS unsigned long sp; unsigned long ra = 0; #endif if (!task || task == current || task->state == TASK_RUNNING) goto out; if (!task_stack_page(task)) goto out; pc = thread_saved_pc(task); #ifdef CONFIG_KALLSYMS sp = task->thread.reg29 + schedule_mfi.frame_size; while (in_sched_functions(pc)) pc = unwind_stack(task, &sp, pc, &ra); #endif out: return pc; }
static void __print_stack_unwind(struct abort_info *ai) { struct unwind_state state; memset(&state, 0, sizeof(state)); state.registers[0] = ai->regs->r0; state.registers[1] = ai->regs->r1; state.registers[2] = ai->regs->r2; state.registers[3] = ai->regs->r3; state.registers[4] = ai->regs->r4; state.registers[5] = ai->regs->r5; state.registers[6] = ai->regs->r6; state.registers[7] = ai->regs->r7; state.registers[8] = ai->regs->r8; state.registers[9] = ai->regs->r9; state.registers[10] = ai->regs->r10; state.registers[11] = ai->regs->r11; state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK); state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK); state.registers[15] = ai->pc; do { EMSG_RAW(" pc 0x%08x", state.registers[15]); } while (unwind_stack(&state)); }
static void perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) { callchain_store(entry, PERF_CONTEXT_KERNEL); callchain_store(entry, regs->pc); unwind_stack(NULL, regs, NULL, &callchain_ops, entry); }
void save_stack_trace(struct stack_trace *trace) { unsigned long *sp = (unsigned long *)current_stack_pointer; unwind_stack(current, NULL, sp, &save_stack_ops, trace); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = (unsigned long *)tsk->thread.sp; unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
/* * jsonwr_close * Closes a JSON writer. */ void jsonwr_close(JSONWR_T* jsonwr) { unwind_stack(jsonwr); linklst_destroy_all(jsonwr->stack, free); str_destroy(jsonwr->value_buf, FALSE); str_destroy(jsonwr->line_buf, FALSE); memset(jsonwr, 0, sizeof(JSONWR_T)); free(jsonwr); }
static void __print_stack_unwind(struct abort_info *ai) { struct unwind_state state; memset(&state, 0, sizeof(state)); state.pc = ai->regs->elr; state.fp = ai->regs->x29; do { EMSG_RAW("pc 0x%016" PRIx64, state.pc); } while (unwind_stack(&state)); }
void stack_trace(void) { void *end; symbol_t *end_sym = get_trace_symbol("__end"); if (end_sym == NULL) { end = (void *)0x200000; /* provide some sentinel protection */ } else { end = end_sym->addr; } kprintf("Call trace (from top of stack):\n"); unwind_stack((void **)READ_EBP(), end); /* kernel */ }
void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs) { if (regs && user_mode(regs)) return; printk("\nCall trace:\n"); unwind_stack(tsk, regs, sp, &print_trace_ops, ""); printk("\n"); if (!tsk) tsk = current; debug_show_held_locks(tsk); }
void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; if (depth > backtrace_limit) depth = backtrace_limit; stackaddr = (unsigned long *)kernel_stack_pointer(regs); if (!user_mode(regs)) { if (depth) unwind_stack(NULL, regs, stackaddr, &backtrace_ops, &depth); return; } while (depth-- && (stackaddr != NULL)) stackaddr = user_backtrace(stackaddr, regs); }
void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; /* * Paranoia - clip max depth as we could get lost in the weeds. */ if (depth > backtrace_limit) depth = backtrace_limit; stackaddr = (unsigned long *)kernel_stack_pointer(regs); if (!user_mode(regs)) { if (depth) unwind_stack(NULL, regs, stackaddr, &backtrace_ops, &depth); return; } while (depth-- && (stackaddr != NULL)) stackaddr = user_backtrace(stackaddr, regs); }
/** * * @brief Kernel fatal error handler * * This routine is called when a fatal error condition is detected by either * hardware or software. * * The caller is expected to always provide a usable ESF. In the event that the * fatal error does not have a hardware generated ESF, the caller should either * create its own or use a pointer to the global default ESF <_default_esf>. * * @param reason the reason that the handler was called * @param pEsf pointer to the exception stack frame * * @return This function does not return. */ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf) { _debug_fatal_hook(pEsf); #ifdef CONFIG_PRINTK /* Display diagnostic information about the error */ switch (reason) { case _NANO_ERR_CPU_EXCEPTION: break; case _NANO_ERR_SPURIOUS_INT: { int vector = _irq_controller_isr_vector_get(); printk("***** Unhandled interrupt vector "); if (vector >= 0) { printk("%d ", vector); } printk("*****\n"); break; } #if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL) || \ defined(CONFIG_HW_STACK_PROTECTION) || \ defined(CONFIG_USERSPACE) case _NANO_ERR_STACK_CHK_FAIL: printk("***** Stack Check Fail! *****\n"); break; #endif /* CONFIG_STACK_CANARIES */ case _NANO_ERR_KERNEL_OOPS: printk("***** Kernel OOPS! *****\n"); break; case _NANO_ERR_KERNEL_PANIC: printk("***** Kernel Panic! *****\n"); break; case _NANO_ERR_ALLOCATION_FAIL: printk("**** Kernel Allocation Failure! ****\n"); break; default: printk("**** Unknown Fatal Error %d! ****\n", reason); break; } printk("Current thread ID = %p\n" "eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x\n" "esi: 0x%08x, edi: 0x%08x, ebp: 0x%08x, esp: 0x%08x\n" "eflags: 0x%08x cs: 0x%04x\n" #ifdef CONFIG_EXCEPTION_STACK_TRACE "call trace:\n" #endif "eip: 0x%08x\n", k_current_get(), pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx, pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp, pEsf->eflags, pEsf->cs & 0xFFFF, pEsf->eip); #ifdef CONFIG_EXCEPTION_STACK_TRACE unwind_stack(pEsf->ebp); #endif #endif /* CONFIG_PRINTK */ /* * Error was fatal to a kernel task or a thread; invoke the system * fatal error handling policy defined for the platform. */ _SysFatalErrorHandler(reason, pEsf); }