unsigned long print_context_stack_bp(struct task_struct *task, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) { struct stack_frame *frame = (struct stack_frame *)bp; unsigned long *retp = &frame->return_address; while (valid_stack_ptr(task, retp, sizeof(*retp), end)) { unsigned long addr = *retp; unsigned long real_addr; if (!__kernel_text_address(addr)) break; real_addr = ftrace_graph_ret_addr(task, graph, addr, retp); if (ops->address(data, real_addr, 1)) break; frame = frame->next_frame; retp = &frame->return_address; } return (unsigned long)frame; }
unsigned long unwind_get_return_address(struct unwind_state *state) { if (unwind_done(state)) return 0; return ftrace_graph_ret_addr(state->task, &state->graph_idx, *state->sp, state->sp); }
unsigned long print_context_stack(struct task_struct *task, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) { struct stack_frame *frame = (struct stack_frame *)bp; /* * If we overflowed the stack into a guard page, jump back to the * bottom of the usable stack. */ if ((unsigned long)task_stack_page(task) - (unsigned long)stack < PAGE_SIZE) stack = (unsigned long *)task_stack_page(task); while (valid_stack_ptr(task, stack, sizeof(*stack), end)) { unsigned long addr = *stack; if (__kernel_text_address(addr)) { unsigned long real_addr; int reliable = 0; if ((unsigned long) stack == bp + sizeof(long)) { reliable = 1; frame = frame->next_frame; bp = (unsigned long) frame; } /* * When function graph tracing is enabled for a * function, its return address on the stack is * replaced with the address of an ftrace handler * (return_to_handler). In that case, before printing * the "real" address, we want to print the handler * address as an "unreliable" hint that function graph * tracing was involved. */ real_addr = ftrace_graph_ret_addr(task, graph, addr, stack); if (real_addr != addr) ops->address(data, addr, 0); ops->address(data, real_addr, reliable); } stack++; } return bp; }
unsigned long unwind_get_return_address(struct unwind_state *state) { unsigned long addr; unsigned long *addr_p = unwind_get_return_address_ptr(state); if (unwind_done(state)) return 0; if (state->regs && user_mode(state->regs)) return 0; addr = READ_ONCE_TASK_STACK(state->task, *addr_p); addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr, addr_p); return __kernel_text_address(addr) ? addr : 0; }
static void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long fp, sp, pc; if (regs) { fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { const register unsigned long current_sp __asm__ ("sp"); fp = (unsigned long)__builtin_frame_address(0); sp = current_sp; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ fp = task->thread.s[0]; sp = task->thread.sp; pc = task->thread.ra; } for (;;) { unsigned long low, high; struct stackframe *frame; if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) break; /* Validate frame pointer */ low = sp + sizeof(struct stackframe); high = ALIGN(sp, THREAD_SIZE); if (unlikely(fp < low || fp > high || fp & 0x7)) break; /* Unwind stack frame */ frame = (struct stackframe *)fp - 1; sp = fp; fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, (unsigned long *)(fp - 8)); } }
unsigned long unwind_get_return_address(struct unwind_state *state) { unsigned long addr; unsigned long *addr_p = unwind_get_return_address_ptr(state); if (unwind_done(state)) return 0; if (state->regs && user_mode(state->regs)) return 0; addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, addr_p); if (!__kernel_text_address(addr)) { printk_deferred_once(KERN_WARNING "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n", (void *)addr, addr_p, state->task->comm, state->task->pid); return 0; } return addr; }
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { struct unwind_state state; struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); /* * Iterate through the stacks, starting with the current stack pointer. * Each stack has a pointer to the next one. * * x86-64 can have several stacks: * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) * * x86-32 can have up to three stacks: * - task stack * - softirq stack * - hardirq stack */ for (regs = NULL; stack; stack = stack_info.next_sp) { const char *stack_name; /* * If we overflowed the task stack into a guard page, jump back * to the bottom of the usable stack. */ if (task_stack_page(task) - (void *)stack < PAGE_SIZE) stack = task_stack_page(task); if (get_stack_info(stack, task, &stack_info, &visit_mask)) break; stack_name = stack_type_name(stack_info.type); if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); /* * Scan the stack, printing any text addresses we find. At the * same time, follow proper stack frames with the unwinder. * * Addresses found during the scan which are not reported by * the unwinder are considered to be additional clues which are * sometimes useful for debugging and are prefixed with '?'. * This also serves as a failsafe option in case the unwinder * goes off in the weeds. */ for (; stack < stack_info.end; stack++) { unsigned long real_addr; int reliable = 0; unsigned long addr = READ_ONCE_NOCHECK(*stack); unsigned long *ret_addr_p = unwind_get_return_address_ptr(&state); if (!__kernel_text_address(addr)) continue; /* * Don't print regs->ip again if it was already printed * by __show_regs() below. */ if (regs && stack == ®s->ip) { unwind_next_frame(&state); continue; } if (stack == ret_addr_p) reliable = 1; /* * When function graph tracing is enabled for a * function, its return address on the stack is * replaced with the address of an ftrace handler * (return_to_handler). In that case, before printing * the "real" address, we want to print the handler * address as an "unreliable" hint that function graph * tracing was involved. */ real_addr = ftrace_graph_ret_addr(task, &graph_idx, addr, stack); if (real_addr != addr) printk_stack_address(addr, 0, log_lvl); printk_stack_address(real_addr, reliable, log_lvl); if (!reliable) continue; /* * Get the next frame from the unwinder. No need to * check for an error: if anything goes wrong, the rest * of the addresses will just be printed as unreliable. */ unwind_next_frame(&state); /* if the frame has entry regs, print them */ regs = unwind_get_entry_regs(&state); if (regs) __show_regs(regs, 0); } if (stack_name) printk("%s </%s>\n", log_lvl, stack_name); } }
static bool update_stack_state(struct unwind_state *state, unsigned long *next_bp) { struct stack_info *info = &state->stack_info; enum stack_type prev_type = info->type; struct pt_regs *regs; unsigned long *frame, *prev_frame_end, *addr_p, addr; size_t len; if (state->regs) prev_frame_end = (void *)state->regs + regs_size(state->regs); else prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE; /* Is the next frame pointer an encoded pointer to pt_regs? */ regs = decode_frame_pointer(next_bp); if (regs) { frame = (unsigned long *)regs; len = regs_size(regs); state->got_irq = true; } else { frame = next_bp; len = FRAME_HEADER_SIZE; } /* * If the next bp isn't on the current stack, switch to the next one. * * We may have to traverse multiple stacks to deal with the possibility * that info->next_sp could point to an empty stack and the next bp * could be on a subsequent stack. */ while (!on_stack(info, frame, len)) if (get_stack_info(info->next_sp, state->task, info, &state->stack_mask)) return false; /* Make sure it only unwinds up and doesn't overlap the prev frame: */ if (state->orig_sp && state->stack_info.type == prev_type && frame < prev_frame_end) return false; /* Move state to the next frame: */ if (regs) { state->regs = regs; state->bp = NULL; } else { state->bp = next_bp; state->regs = NULL; } /* Save the return address: */ if (state->regs && user_mode(state->regs)) state->ip = 0; else { addr_p = unwind_get_return_address_ptr(state); addr = READ_ONCE_TASK_STACK(state->task, *addr_p); state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr, addr_p); } /* Save the original stack pointer for unwind_dump(): */ if (!state->orig_sp) state->orig_sp = frame; return true; }
int save_stack_trace_tsk_reliable(struct task_struct *tsk, struct stack_trace *trace) { unsigned long sp; unsigned long stack_page = (unsigned long)task_stack_page(tsk); unsigned long stack_end; int graph_idx = 0; /* * The last frame (unwinding first) may not yet have saved * its LR onto the stack. */ int firstframe = 1; if (tsk == current) sp = current_stack_pointer(); else sp = tsk->thread.ksp; stack_end = stack_page + THREAD_SIZE; if (!is_idle_task(tsk)) { /* * For user tasks, this is the SP value loaded on * kernel entry, see "PACAKSAVE(r13)" in _switch() and * system_call_common()/EXCEPTION_PROLOG_COMMON(). * * Likewise for non-swapper kernel threads, * this also happens to be the top of the stack * as setup by copy_thread(). * * Note that stack backlinks are not properly setup by * copy_thread() and thus, a forked task() will have * an unreliable stack trace until it's been * _switch()'ed to for the first time. */ stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); } else { /* * idle tasks have a custom stack layout, * c.f. cpu_idle_thread_init(). */ stack_end -= STACK_FRAME_OVERHEAD; } if (sp < stack_page + sizeof(struct thread_struct) || sp > stack_end - STACK_FRAME_MIN_SIZE) { return 1; } for (;;) { unsigned long *stack = (unsigned long *) sp; unsigned long newsp, ip; /* sanity check: ABI requires SP to be aligned 16 bytes. */ if (sp & 0xF) return 1; /* Mark stacktraces with exception frames as unreliable. */ if (sp <= stack_end - STACK_INT_FRAME_SIZE && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { return 1; } newsp = stack[0]; /* Stack grows downwards; unwinder may only go up. */ if (newsp <= sp) return 1; if (newsp != stack_end && newsp > stack_end - STACK_FRAME_MIN_SIZE) { return 1; /* invalid backlink, too far up. */ } /* Examine the saved LR: it must point into kernel code. */ ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe && !__kernel_text_address(ip)) return 1; firstframe = 0; /* * FIXME: IMHO these tests do not belong in * arch-dependent code, they are generic. */ ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL); #ifdef CONFIG_KPROBES /* * Mark stacktraces with kretprobed functions on them * as unreliable. */ if (ip == (unsigned long)kretprobe_trampoline) return 1; #endif if (!trace->skip) trace->entries[trace->nr_entries++] = ip; else trace->skip--; if (newsp == stack_end) break; if (trace->nr_entries >= trace->max_entries) return -E2BIG; sp = newsp; } return 0; }