static void aed_get_bt(struct task_struct *tsk, struct aee_process_bt *bt) { struct stackframe frame; unsigned int stack_address; bt->nr_entries = 0; memset(&frame, 0, sizeof(struct stackframe)); if (tsk != current) { frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = thread_saved_pc(tsk); frame.pc = 0xffffffff; } else { register unsigned long current_sp asm("sp"); frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)aed_get_bt; } stack_address = ALIGN(frame.sp, THREAD_SIZE); if ((stack_address >= (PAGE_OFFSET + THREAD_SIZE)) && virt_addr_valid(stack_address)) { aed_walk_stackframe(&frame, bt, stack_address); } else { LOGD("%s: Invalid sp value %lx\n", __func__, frame.sp); } }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } else { register unsigned long current_sp asm("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
unsigned long get_wchan(struct task_struct *p) { unsigned long fp, lr; unsigned long stack_start, stack_end; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; if (IS_ENABLED(CONFIG_FRAME_POINTER)) { stack_start = (unsigned long)end_of_stack(p); stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE; fp = thread_saved_fp(p); do { if (fp < stack_start || fp > stack_end) return 0; lr = ((unsigned long *)fp)[0]; if (!in_sched_functions(lr)) return lr; fp = *(unsigned long *)(fp + 4); } while (count++ < 16); } return 0; }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } else { data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)save_stack_trace_tsk; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; #endif walk_stackframe(tsk, &frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { #ifdef CONFIG_SMP /* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */ #if 1 /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else // old /* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */ /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ BUG(); /* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */ #endif /* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */ #else data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
static struct pt_regs * unwind_get_regs(struct task_struct *tsk) { struct stackframe frame; register unsigned long current_sp asm ("sp"); int found = 0; //unsigned long sc; if (!tsk) tsk = current; printk("tsk = %p,comm=%s,pid=%d,pgd=0x%p\n", tsk , tsk->comm , tsk->pid , tsk->mm->pgd ); if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)unwind_get_regs; } else { /* task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); /* * The function calling __switch_to cannot be a leaf function * so LR is recovered from the stack. */ frame.lr = 0; frame.pc = thread_saved_pc(tsk); } while (1) { int urc; //unsigned long where = frame.pc; urc = unwind_frame(&frame); if (urc < 0) break; //dump_backtrace_entry(where, frame.pc, frame.sp - 4); if( frame.pc == (unsigned long)ret_fast_syscall ){ found = 1; break; } } if( !found ) return NULL; #if 0 //printk("FRAME:sp=0x%lx,pc=0x%lx,lr=0x%lx,fp=0x%lx\n" , frame.sp,frame.pc,frame.lr,frame.fp ); //rk28_printk_mem((unsigned int*)(frame.sp-sizeof(struct pt_regs)),2*sizeof(struct pt_regs)/4+8, NULL ); sc =*( (unsigned long*)(frame.sp-4)); if( sc >= (unsigned long)&_text && sc < (unsigned long)&_end ){ print_symbol("sys call=%s\n",sc ); } #endif return (struct pt_regs *)(frame.sp+8); // 8 for reg r4,r5 as fifth and sixth args. }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { /* CORE-HC-ANR_Kernel_Stack-00*[ */ /*#ifdef CONFIG_SMP*/ #if defined(CONFIG_SMP) && !defined(CONFIG_FIH_DUMP_KERNEL_STACK) /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else pr_info("save_stack_trace_tsk: %s[%d] %s[%d]\r\n", current->comm, smp_processor_id(), tsk->comm, task_thread_info(tsk)->cpu); data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif /* CORE-HC-ANR_Kernel_Stack-00*] */ } else { register unsigned long current_sp asm ("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; int skip; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (!try_get_task_stack(tsk)) return; if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.pc = (unsigned long)dump_backtrace; } else { /* * task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.pc = thread_saved_pc(tsk); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = 0; #endif skip = !!regs; printk("Call trace:\n"); do { /* skip until specified stack frame */ if (!skip) { dump_backtrace_entry(frame.pc); } else if (frame.fp == regs->regs[29]) { skip = 0; /* * Mostly, this is the case where this function is * called in panic/abort. As exception handler's * stack frame does not contain the corresponding pc * at which an exception has taken place, use regs->pc * instead. */ dump_backtrace_entry(regs->pc); } } while (!unwind_frame(tsk, &frame)); put_task_stack(tsk); }
/* This must be noinline to so that our skip calculation works correctly */ static noinline void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; data.no_sched_functions = nosched; if (tsk != current) { /* Bright Lee, 20130322, enable stack trace when android hang in debug stage { */ // #ifdef CONFIG_SMP #if 0 /* } Bright Lee, 20130322 */ /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); /* We don't want this function nor the caller */ data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)__save_stack_trace; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
unsigned long get_wchan(struct task_struct *p) { unsigned long fp, lr; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = 4096 + (unsigned long)p; fp = thread_saved_fp(p); do { if (fp < stack_page || fp > 4092+stack_page) return 0; lr = pc_pointer (((unsigned long *)fp)[-1]); if (!in_sched_functions(lr)) return lr; fp = *(unsigned long *) (fp - 12); } while (count ++ < 16); return 0; }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { /* enable call stack in smp arch */ #if defined(CONFIG_SMP) && !defined(CONFIG_HUAWEI_KERNEL) /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
void aee_dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; const register unsigned long current_sp asm("sp"); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (regs) { frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.pc = (unsigned long)aee_dump_backtrace; } else { /* * task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } aee_sram_fiq_log("Call trace:\n"); while (1) { unsigned long where = frame.pc; int ret; ret = unwind_frame(&frame); if (ret < 0) break; dump_backtrace_entry(where, frame.sp); } }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; if (tsk != current) { #ifdef CONFIG_SMP /* * What guarantees do we have here that 'tsk' * is not running on another CPU? */ /* 20101006 [email protected] temporary block because of reset */ /* BUG(); */ return; #else data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); data.no_sched_functions = 0; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)save_stack_trace_tsk; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
/* This must be noinline to so that our skip calculation works correctly */ static noinline void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; data.no_sched_functions = nosched; if (tsk != current) { #ifdef CONFIG_SMP if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; frame.pc = thread_saved_pc(tsk); #endif } else { register unsigned long current_sp asm ("sp"); /* We don't want this function nor the caller */ data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)__save_stack_trace; } walk_stackframe(&frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; }
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; unsigned long irq_stack_ptr; int skip; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (!try_get_task_stack(tsk)) return; /* * Switching between stacks is valid when tracing current and in * non-preemptible context. */ if (tsk == current && !preemptible()) irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); else irq_stack_ptr = 0; if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)dump_backtrace; } else { /* * task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; #endif skip = !!regs; printk("Call trace:\n"); while (1) { unsigned long where = frame.pc; unsigned long stack; int ret; /* skip until specified stack frame */ if (!skip) { dump_backtrace_entry(where); } else if (frame.fp == regs->regs[29]) { skip = 0; /* * Mostly, this is the case where this function is * called in panic/abort. As exception handler's * stack frame does not contain the corresponding pc * at which an exception has taken place, use regs->pc * instead. */ dump_backtrace_entry(regs->pc); } ret = unwind_frame(tsk, &frame); if (ret < 0) break; stack = frame.sp; if (in_exception_text(where)) { /* * If we switched to the irq_stack before calling this * exception handler, then the pt_regs will be on the * task stack. The easiest way to tell is if the large * pt_regs would overlap with the end of the irq_stack. */ if (stack < irq_stack_ptr && (stack + sizeof(struct pt_regs)) > irq_stack_ptr) stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); dump_mem("", "Exception stack", stack, stack + sizeof(struct pt_regs)); } } put_task_stack(tsk); }