void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { unsigned long *stack; int i; if (sp == NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp = (unsigned long *)&sp; } stack = sp; for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if ((i % STACKSLOTS_PER_LINE) == 0) { if (i != 0) pr_cont("\n"); printk("%s %08lx", log_lvl, *stack++); } else pr_cont(" %08lx", *stack++); touch_nmi_watchdog(); } pr_cont("\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); }
void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack; int i; /* * debugging aid: "show_stack(NULL);" prints the * back trace for this cpu. */ if(sp==NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp=(unsigned long*)&sp; } stack = sp; for(i=0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (i && ((i % 4) == 0)) printk("\n "); printk("%08lx ", *stack++); } printk("\n"); show_trace(task, sp); }
/* displays a short stack trace */ void show_stack(struct task_struct *task, unsigned long *esp) { unsigned long addr, *stack; int i; if (esp == NULL) esp = (unsigned long *)&esp; stack = esp; printk("Stack dump [0x%08lx]:\n", (unsigned long)esp); for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (__get_user(addr, stack)) { /* This message matches "failing address" marked s390 in ksymoops, so lines containing it will not be filtered out by ksymoops. */ printk("Failing address 0x%lx\n", (unsigned long)stack); break; } stack++; printk("sp + %02d: 0x%08lx\n", i * 4, addr); } printk("\n"); show_trace(task, esp); return; }
void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack; int i; if (sp == NULL) { if (task) sp = (unsigned long *) ((struct thread_info *) (task->stack))->cpu_context.r1; else sp = (unsigned long *)&sp; } stack = sp; printk(KERN_INFO "\nStack:\n "); for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(sp)) break; if (i && ((i % 8) == 0)) printk("\n "); printk("%08lx ", *sp++); } printk("\n"); show_trace(task, stack); }
void show_trace(struct task_struct *task, unsigned long *stack) { unsigned long addr; if (!stack) stack = (unsigned long *)&stack; printk(KERN_NOTICE "Call Trace: "); #ifdef CONFIG_KALLSYMS printk(KERN_NOTICE "\n"); #endif while (!kstack_end(stack)) { addr = *stack++; /* * If the address is either in the text segment of the * kernel, or in the region which contains vmalloc'ed * memory, it *may* be the address of a calling * routine; if so, print it so that someone tracing * down the cause of the crash will be able to figure * out the call path that was taken. */ if (kernel_text_address(addr)) print_ip_sym(addr); } printk(KERN_NOTICE "\n"); if (!task) task = current; debug_show_held_locks(task); }
static void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long sp, pc; unsigned long *ksp; if (regs) { sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { const register unsigned long current_sp __asm__ ("sp"); sp = current_sp; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ sp = task->thread.sp; pc = task->thread.ra; } if (unlikely(sp & 0x7)) return; ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) break; pc = (*ksp++) - 0x4; } }
static void show_trace(struct task_struct *tsk, unsigned long *sp) { unsigned long addr; printk("\nCall Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif while (!kstack_end(sp)) { addr = *sp++; /* * If the address is either in the text segment of the * kernel, or in the region which contains vmalloc'ed * memory, it *may* be the address of a calling * routine; if so, print it so that someone tracing * down the cause of the crash will be able to figure * out the call path that was taken. */ if (kernel_text_address(addr)) print_ip_sym(addr); } printk("\n"); }
/* This recently started being used in arch-independent code too, as in * kernel/sched.c.*/ void show_stack(struct task_struct *task, unsigned long *esp) { unsigned long *stack; int i; if (esp == NULL) { if (task != current && task != NULL) { esp = (unsigned long *) KSTK_ESP(task); } else { esp = (unsigned long *) &esp; } } stack = esp; for(i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; if (i && ((i % 8) == 0)) printk("\n "); printk("%08lx ", *stack++); } printk("Call Trace: \n"); show_trace(task, esp); }
void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) { unsigned long *irq_stack_end; unsigned long *irq_stack; unsigned long *stack; int cpu; int i; preempt_disable(); cpu = smp_processor_id(); irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); /* * Debugging aid: "show_stack(NULL, NULL);" prints the * back trace for this cpu: */ if (sp == NULL) { if (task) sp = (unsigned long *)task->thread.sp; else sp = (unsigned long *)&sp; } stack = sp; for (i = 0; i < kstack_depth_to_print; i++) { if (stack >= irq_stack && stack <= irq_stack_end) { if (stack == irq_stack_end) { stack = (unsigned long *) (irq_stack_end[-1]); pr_cont(" <EOI> "); } } else { if (kstack_end(stack)) break; } if ((i % STACKSLOTS_PER_LINE) == 0) { if (i != 0) pr_cont("\n"); printk("%s %016lx", log_lvl, *stack++); } else pr_cont(" %016lx", *stack++); touch_nmi_watchdog(); } preempt_enable(); pr_cont("\n"); show_trace_log_lvl(task, regs, sp, bp, log_lvl); }
static void __dump(struct task_struct *tsk, unsigned long *base_reg) { unsigned long ret_addr; int cnt = LOOP_TIMES, graph = 0; pr_emerg("Call Trace:\n"); if (!IS_ENABLED(CONFIG_FRAME_POINTER)) { while (!kstack_end(base_reg)) { ret_addr = *base_reg++; if (__kernel_text_address(ret_addr)) { get_real_ret_addr(&ret_addr, tsk, &graph); print_ip_sym(ret_addr); } if (--cnt < 0) break; } } else { while (!kstack_end((void *)base_reg) && !((unsigned long)base_reg & 0x3) && ((unsigned long)base_reg >= TASK_SIZE)) { unsigned long next_fp; #if !defined(NDS32_ABI_2) ret_addr = base_reg[0]; next_fp = base_reg[1]; #else ret_addr = base_reg[-1]; next_fp = base_reg[FP_OFFSET]; #endif if (__kernel_text_address(ret_addr)) { get_real_ret_addr(&ret_addr, tsk, &graph); print_ip_sym(ret_addr); } if (--cnt < 0) break; base_reg = (unsigned long *)next_fp; } } pr_emerg("\n"); }
static void show_trace(struct task_struct *task, unsigned long *stack) { unsigned long addr; if (!stack) stack = (unsigned long*)&stack; printk("Call Trace: "); while (!kstack_end(stack)) { addr = *stack++; if (__kernel_text_address(addr)) printk("[<%08lx>] %pSR\n", addr, (void *)addr); } printk("\n"); }
void show_trace(struct task_struct *task, unsigned long *stack) { unsigned long addr; if (!stack) stack = (unsigned long*)&stack; printk("Call Trace: "); while (!kstack_end(stack)) { addr = *stack++; if (__kernel_text_address(addr)) { printk("[<%08lx>] ", addr); print_symbol("%s\n", addr); } } printk("\n"); }
/* * Save stack-backtrace addresses into a stack_trace buffer: */ static void save_raw_context_stack(struct stack_trace *trace, unsigned long reg29) { unsigned long *sp = (unsigned long *)reg29; unsigned long addr; while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = addr; if (trace->nr_entries >= trace->max_entries) break; } } }
void stack_reader_dump(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, const struct stacktrace_ops *ops, void *data) { struct thread_info *context; int graph = 0; context = (struct thread_info *) ((unsigned long)sp & (~(THREAD_SIZE - 1))); while (!kstack_end(sp)) { unsigned long addr = *sp++; if (__kernel_text_address(addr)) { ops->address(data, addr, 1); print_ftrace_graph_addr(addr, data, ops, context, &graph); } } }