static void backtrace_address(void *data, unsigned long addr, int reliable) { unsigned int *depth = data; if ((*depth)--) oprofile_add_trace(addr); }
void avr32_backtrace(struct pt_regs * const regs, unsigned int depth) { /* Get first frame pointer */ struct frame_head *head = (struct frame_head *)(regs->r7); if (!user_mode(regs)) { #ifdef CONFIG_FRAME_POINTER /* * Traverse the kernel stack from frame to frame up to * "depth" steps. */ while (depth-- && valid_stack_ptr(task_thread_info(current), (unsigned long)head)) { oprofile_add_trace(head->lr); if (head->fp <= head) break; head = head->fp; } #endif } else { /* Assume we have frame pointers in user mode process */ while (depth-- && head) head = dump_user_backtrace(head); } }
static unsigned int user_getsp32(unsigned int sp, int is_first) { unsigned int stack_frame[2]; void __user *p = compat_ptr(sp); if (!access_ok(VERIFY_READ, p, sizeof(stack_frame))) return 0; /* * The most likely reason for this is that we returned -EFAULT, * which means that we've done all that we can do from * interrupt context. */ if (__copy_from_user_inatomic(stack_frame, p, sizeof(stack_frame))) return 0; if (!is_first) oprofile_add_trace(STACK_LR32(stack_frame)); /* * We do not enforce increasing stack addresses here because * we may transition to a different stack, eg a signal handler. */ return STACK_SP(stack_frame); }
static unsigned long __show_trace(unsigned int *depth, unsigned long sp, unsigned long low, unsigned long high) { struct stack_frame *sf; struct pt_regs *regs; while (*depth) { sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); /* Follow the backchain. */ while (*depth) { low = sp; sp = sf->back_chain & PSW_ADDR_INSN; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); } if (*depth == 0) break; /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; (*depth)--; oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); low = sp; sp = regs->gprs[15]; } return sp; }
static int __s390_backtrace(void *data, unsigned long address) { unsigned int *depth = data; if (*depth == 0) return 1; (*depth)--; oprofile_add_trace(address); return 0; }
static int report_trace(struct stackframe *frame, void *d) { unsigned int *depth = d; if (*depth) { oprofile_add_trace(frame->pc); (*depth)--; } return *depth == 0; }
static inline void do_kernel_backtrace(unsigned long low_addr, struct stackframe *frame, unsigned int depth) { while (depth-- && frame->pc) { frame->pc = unwind_stack_by_address(low_addr, &(frame->sp), frame->pc, &(frame->ra)); oprofile_add_trace(frame->ra); } }
static struct frame_head * dump_backtrace(struct frame_head * head) { oprofile_add_trace(head->ret); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= head->ebp) return NULL; return head->ebp; }
static inline void do_user_backtrace(unsigned long low_addr, struct stackframe *frame, unsigned int depth) { const unsigned int max_instr_check = 512; const unsigned long high_addr = low_addr + THREAD_SIZE; while (depth-- && !unwind_user_frame(frame, max_instr_check)) { oprofile_add_trace(frame->ra); if (frame->sp < low_addr || frame->sp > high_addr) break; } }
static unsigned long user_getsp64(unsigned long sp, int is_first) { unsigned long stack_frame[3]; if (!access_ok(VERIFY_READ, (void __user *)sp, sizeof(stack_frame))) return 0; if (__copy_from_user_inatomic(stack_frame, (void __user *)sp, sizeof(stack_frame))) return 0; if (!is_first) oprofile_add_trace(STACK_LR64(stack_frame)); return STACK_SP(stack_frame); }
static struct frame_head *dump_user_backtrace(struct frame_head *head) { struct frame_head bufhead[2]; if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; oprofile_add_trace(bufhead[0].lr); if (bufhead[0].fp <= head) return NULL; return bufhead[0].fp; }
static unsigned long kernel_getsp(unsigned long sp, int is_first) { unsigned long *stack_frame = (unsigned long *)sp; if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) return 0; if (!is_first) oprofile_add_trace(STACK_LR(stack_frame)); /* * We do not enforce increasing stack addresses here because * we might be transitioning from an interrupt stack to a kernel * stack. validate_sp() is designed to understand this, so just * use it. */ return STACK_SP(stack_frame); }
static struct stack_frame *dump_user_backtrace(struct stack_frame *head) { /* */ struct stack_frame bufhead[2]; unsigned long bytes; bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); if (bytes != sizeof(bufhead)) return NULL; oprofile_add_trace(bufhead[0].return_address); /* */ if (head >= bufhead[0].next_frame) return NULL; return bufhead[0].next_frame; }
static struct stack_frame *dump_user_backtrace(struct stack_frame *head) { struct stack_frame bufhead[2]; /* Also check accessibility of one struct stack_frame beyond */ if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; oprofile_add_trace(bufhead[0].return_address); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= bufhead[0].next_frame) return NULL; return bufhead[0].next_frame; }
static struct frame_tail* user_backtrace(struct frame_tail *tail) { struct frame_tail buftail[2]; /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) return NULL; oprofile_add_trace(buftail[0].lr); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (tail + 1 >= buftail[0].fp) return NULL; return buftail[0].fp-1; }
static struct frame_head *dump_user_backtrace(struct frame_head *head) { /* Also check accessibility of one struct frame_head beyond: */ struct frame_head bufhead[2]; unsigned long bytes; bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); if (bytes != sizeof(bufhead)) return NULL; oprofile_add_trace(bufhead[0].ret); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= bufhead[0].bp) return NULL; return bufhead[0].bp; }
void avr32_backtrace(struct pt_regs * const regs, unsigned int depth) { struct frame_head *head = (struct frame_head *)(regs->r7); if (!user_mode(regs)) { #ifdef CONFIG_FRAME_POINTER while (depth-- && valid_stack_ptr(task_thread_info(current), (unsigned long)head)) { oprofile_add_trace(head->lr); if (head->fp <= head) break; head = head->fp; } #endif } else { while (depth-- && head) head = dump_user_backtrace(head); } }
static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata) { ia64_backtrace_t *bt = vdata; struct switch_stack *sw; int count = 0; u_long pc, sp; sw = (struct switch_stack *)(info+1); /* */ sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15); unw_init_frame_info(&bt->frame, current, sw); /* */ do { unw_get_sp(&bt->frame, &sp); if (sp >= (u_long)bt->regs) break; if (!next_frame(bt)) return; } while (count++ < 200); /* */ while (bt->depth-- && next_frame(bt)) { unw_get_ip(&bt->frame, &pc); oprofile_add_trace(pc); if (unw_is_intr_frame(&bt->frame)) { /* */ /* */ break; } } }
static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata) { ia64_backtrace_t *bt = vdata; struct switch_stack *sw; int count = 0; u_long pc, sp; sw = (struct switch_stack *)(info+1); /* padding from unw_init_running */ sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15); unw_init_frame_info(&bt->frame, current, sw); /* skip over interrupt frame and oprofile calls */ do { unw_get_sp(&bt->frame, &sp); if (sp >= (u_long)bt->regs) break; if (!next_frame(bt)) return; } while (count++ < 200); /* finally, grab the actual sample */ while (bt->depth-- && next_frame(bt)) { unw_get_ip(&bt->frame, &pc); oprofile_add_trace(pc); if (unw_is_intr_frame(&bt->frame)) { /* * Interrupt received on kernel stack; this can * happen when timer interrupt fires while processing * a softirq from the tail end of a hardware interrupt * which interrupted a system call. Don't laugh, it * happens! Splice the backtrace into two parts to * avoid spurious cycles in the gprof output. */ /* TODO: split rather than drop the 2nd half */ break; } } }
static unsigned long * user_backtrace(unsigned long *stackaddr, struct pt_regs *regs) { unsigned long buf_stack; if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) return NULL; if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) return NULL; if (buf_stack & 3) return NULL; oprofile_add_trace(buf_stack); stackaddr++; return stackaddr; }
static unsigned long * user_backtrace(unsigned long *stackaddr, struct pt_regs *regs) { unsigned long buf_stack; /* Also check accessibility of address */ if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) return NULL; if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) return NULL; /* Quick paranoia check */ if (buf_stack & 3) return NULL; oprofile_add_trace(buf_stack); stackaddr++; return stackaddr; }
static struct stack_frame_ia32 * dump_user_backtrace_32(struct stack_frame_ia32 *head) { /* */ struct stack_frame_ia32 bufhead[2]; struct stack_frame_ia32 *fp; unsigned long bytes; bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); if (bytes != sizeof(bufhead)) return NULL; fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); oprofile_add_trace(bufhead[0].return_address); /* */ if (head >= fp) return NULL; return fp; }
static struct stack_frame_ia32 * dump_user_backtrace_32(struct stack_frame_ia32 *head) { struct stack_frame_ia32 bufhead[2]; struct stack_frame_ia32 *fp; /* Also check accessibility of one struct frame_head beyond */ if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); oprofile_add_trace(bufhead[0].return_address); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= fp) return NULL; return fp; }
static struct stack_frame_ia32 * dump_user_backtrace_32(struct stack_frame_ia32 *head) { /* Also check accessibility of one struct frame_head beyond: */ struct stack_frame_ia32 bufhead[2]; struct stack_frame_ia32 *fp; unsigned long bytes; bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); if (bytes != sizeof(bufhead)) return NULL; fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); oprofile_add_trace(bufhead[0].return_address); /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ if (head >= fp) return NULL; return fp; }