static inline void __save_stack_trace_user(struct stack_trace *trace) { const struct pt_regs *regs = task_pt_regs(current); const void __user *fp = (const void __user *)regs->bp; if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = regs->ip; while (trace->nr_entries < trace->max_entries) { struct stack_frame_user frame; frame.next_fp = NULL; frame.ret_addr = 0; if (!copy_stack_frame(fp, &frame)) break; if ((unsigned long)fp < regs->sp) break; if (frame.ret_addr) { trace->entries[trace->nr_entries++] = frame.ret_addr; } if (fp == frame.next_fp) break; fp = frame.next_fp; } }
void copy_stack_frame_sp(void) { copy_stack_frame(); new_prof_frame(TRUE); }
static void timer_notify(struct pt_regs *regs, int cpu) { struct trace_array_cpu *data; struct stack_frame_user frame; struct trace_array *tr; const void __user *fp; int is_user; int i; if (!regs) return; tr = sysprof_trace; data = tr->data[cpu]; is_user = user_mode(regs); if (!current || current->pid == 0) return; if (is_user && current->state != TASK_RUNNING) return; __trace_special(tr, data, 0, 0, current->pid); if (!is_user) i = trace_kernel(regs, tr, data); else i = 0; /* * Trace user stack if we are not a kernel thread */ if (current->mm && i < sample_max_depth) { regs = (struct pt_regs *)current->thread.sp0 - 1; fp = (void __user *)regs->bp; __trace_special(tr, data, 2, regs->ip, 0); while (i < sample_max_depth) { frame.next_fp = NULL; frame.return_address = 0; if (!copy_stack_frame(fp, &frame)) break; if ((unsigned long)fp < regs->sp) break; __trace_special(tr, data, 2, frame.return_address, (unsigned long)fp); fp = frame.next_fp; i++; } } /* * Special trace entry if we overflow the max depth: */ if (i == sample_max_depth) __trace_special(tr, data, -1, -1, -1); __trace_special(tr, data, 3, current->pid, i); }