/* This is called from KBacktraceIterator_init_current() */ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, ulong lr, ulong sp, ulong r52) { struct pt_regs regs; KBacktraceIterator_init(kbt, NULL, regs_to_pt_regs(®s, pc, lr, sp, r52)); }
/* This is called only from kernel/sched.c, with esp == NULL */ void show_stack(struct task_struct *task, unsigned long *esp) { struct KBacktraceIterator kbt; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); tile_show_stack(&kbt, 0); }
void tile_backtrace(struct pt_regs *const regs, unsigned int depth) { struct KBacktraceIterator kbt; unsigned int i; /* * Get the address just after the "jalr" instruction that * jumps to the handler for a syscall. When we find this * address in a backtrace, we silently ignore it, which gives * us a one-step backtrace connection from the sys_xxx() * function in the kernel to the xxx() function in libc. * Otherwise, we lose the ability to properly attribute time * from the libc calls to the kernel implementations, since * oprofile only considers PCs from backtraces a pair at a time. */ unsigned long handle_syscall_pc = handle_syscall_link_address(); KBacktraceIterator_init(&kbt, NULL, regs); kbt.profile = 1; /* * The sample for the pc is already recorded. Now we are adding the * address of the callsites on the stack. Our iterator starts * with the frame of the (already sampled) call site. If our * iterator contained a "return address" field, we could have just * used it and wouldn't have needed to skip the first * frame. That's in effect what the arm and x86 versions do. * Instead we peel off the first iteration to get the equivalent * behavior. */ if (KBacktraceIterator_end(&kbt)) return; KBacktraceIterator_next(&kbt); for (i = 0; i < depth; ++i) { int is_kernel; unsigned long pc; if (KBacktraceIterator_end(&kbt)) break; pc = kbt.it.pc; is_kernel = (pc >= PAGE_OFFSET && !is_arch_mappable_range(pc, 1)); if (pc != handle_syscall_pc) oprofile_add_pc(pc, is_kernel, 0); KBacktraceIterator_next(&kbt); } }
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) { struct KBacktraceIterator kbt; int skip = trace->skip; int i = 0; if (task == NULL || task == current) KBacktraceIterator_init_current(&kbt); else KBacktraceIterator_init(&kbt, task, NULL); for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { if (skip) { --skip; continue; } if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) break; trace->entries[i++] = kbt.it.pc; } trace->nr_entries = i; }
/* This is called from show_regs() and _dump_stack() */ void dump_stack_regs(struct pt_regs *regs) { struct KBacktraceIterator kbt; KBacktraceIterator_init(&kbt, NULL, regs); tile_show_stack(&kbt, 1); }