Ejemplo n.º 1
0
unsigned long get_wchan(struct task_struct *p)
{
	unsigned long schedule_frame;
	unsigned long pc;

	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

	pc = thread_saved_pc(&p->thread);
	if (pc == (unsigned long) interruptible_sleep_on
	    || pc == (unsigned long) sleep_on) {
		schedule_frame = ((unsigned long *)p->thread.reg30)[9];
		return ((unsigned long *)schedule_frame)[15];
	}
	if (pc == (unsigned long) interruptible_sleep_on_timeout
	    || pc == (unsigned long) sleep_on_timeout) {
		schedule_frame = ((unsigned long *)p->thread.reg30)[9];
		return ((unsigned long *)schedule_frame)[16];
	}
	if (pc >= first_sched && pc < last_sched) {
		printk(KERN_DEBUG "Bug in %s\n", __FUNCTION__);
	}

	return pc;
}
Ejemplo n.º 2
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
#ifdef CONFIG_SMP
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 3
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.pc = thread_saved_pc(tsk);
	} else {
		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_stack_pointer;
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	frame.graph = tsk->curr_ret_stack;
#endif

	walk_stackframe(tsk, &frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 4
0
/*
 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 */
unsigned long get_wchan(struct task_struct *task)
{
	unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
	unsigned long sp;
	unsigned long ra = 0;
#endif

	if (!task || task == current || task->state == TASK_RUNNING)
		goto out;
	if (!task_stack_page(task))
		goto out;

	pc = thread_saved_pc(task);

#ifdef CONFIG_KALLSYMS
	sp = task->thread.reg29 + schedule_mfi.frame_size;

	while (in_sched_functions(pc))
		pc = unwind_stack(task, &sp, pc, &ra);
#endif

out:
	return pc;
}
Ejemplo n.º 5
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
#ifdef CONFIG_SMP

/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */
#if 1 
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else // old
/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */

		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		BUG();

/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_START] */
#endif
/* 20110630, [email protected], Merge Black DCM, Power Management. [UB_END] */

#else
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 6
0
static struct pt_regs * unwind_get_regs(struct task_struct *tsk)
{
	struct stackframe frame;
	register unsigned long current_sp asm ("sp");
       int found = 0;
	//unsigned long  sc;
	
	if (!tsk)
		tsk = current;
		
       printk("tsk = %p,comm=%s,pid=%d,pgd=0x%p\n", tsk , tsk->comm , tsk->pid , tsk->mm->pgd );
	if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)unwind_get_regs;
	} else {
		/* task blocked in __switch_to */
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		/*
		 * The function calling __switch_to cannot be a leaf function
		 * so LR is recovered from the stack.
		 */
		frame.lr = 0;
		frame.pc = thread_saved_pc(tsk);
	}

	while (1) {
		int urc;
		//unsigned long where = frame.pc;

		urc = unwind_frame(&frame);
		if (urc < 0)
			break;
		//dump_backtrace_entry(where, frame.pc, frame.sp - 4);
		if( frame.pc == (unsigned long)ret_fast_syscall ){
		        found = 1;
		        break;
		}
	}
	if( !found )
	        return NULL;
	#if 0
	//printk("FRAME:sp=0x%lx,pc=0x%lx,lr=0x%lx,fp=0x%lx\n" , frame.sp,frame.pc,frame.lr,frame.fp );
	//rk28_printk_mem((unsigned int*)(frame.sp-sizeof(struct pt_regs)),2*sizeof(struct pt_regs)/4+8, NULL );
	sc =*( (unsigned long*)(frame.sp-4));
	if( sc >= (unsigned long)&_text && sc < (unsigned long)&_end ){
	    print_symbol("sys call=%s\n",sc );
	}
	#endif
	return (struct pt_regs *)(frame.sp+8); // 8 for reg r4,r5 as fifth and sixth args.
}
Ejemplo n.º 7
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {

/* CORE-HC-ANR_Kernel_Stack-00*[ */
/*#ifdef CONFIG_SMP*/
#if defined(CONFIG_SMP) && !defined(CONFIG_FIH_DUMP_KERNEL_STACK)
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		
		pr_info("save_stack_trace_tsk: %s[%d] %s[%d]\r\n",
			current->comm, 
			smp_processor_id(), 
			tsk->comm, 
			task_thread_info(tsk)->cpu);
		
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
/* CORE-HC-ANR_Kernel_Stack-00*] */
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 8
0
static unsigned long get_wchan(struct task_struct *p)
{
    if (!p || p == current || p->state == TASK_RUNNING)
        return 0;
#if defined(__i386__)
    {
        unsigned long ebp, eip;
        unsigned long stack_page;
        int count = 0;

        stack_page = p->kernel_stack_page;
        if (!stack_page)
            return 0;
        ebp = p->tss.ebp;
        do {
            if (ebp < stack_page || ebp >= 4092+stack_page)
                return 0;
            eip = *(unsigned long *) (ebp+4);
            if ((void *)eip != sleep_on &&
                    (void *)eip != interruptible_sleep_on)
                return eip;
            ebp = *(unsigned long *) ebp;
        } while (count++ < 16);
    }
#elif defined(__alpha__)
    /*
     * This one depends on the frame size of schedule().  Do a
     * "disass schedule" in gdb to find the frame size.  Also, the
     * code assumes that sleep_on() follows immediately after
     * interruptible_sleep_on() and that add_timer() follows
     * immediately after interruptible_sleep().  Ugly, isn't it?
     * Maybe adding a wchan field to task_struct would be better,
     * after all...
     */
    {
        unsigned long schedule_frame;
        unsigned long pc;

        pc = thread_saved_pc(&p->tss);
        if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
            schedule_frame = ((unsigned long *)p->tss.ksp)[6];
            return ((unsigned long *)schedule_frame)[12];
        }
        return pc;
    }
#endif
    return 0;
}
Ejemplo n.º 9
0
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
	struct stackframe frame;
	int skip;

	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);

	if (!tsk)
		tsk = current;

	if (!try_get_task_stack(tsk))
		return;

	if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.pc = (unsigned long)dump_backtrace;
	} else {
		/*
		 * task blocked in __switch_to
		 */
		frame.fp = thread_saved_fp(tsk);
		frame.pc = thread_saved_pc(tsk);
	}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	frame.graph = 0;
#endif

	skip = !!regs;
	printk("Call trace:\n");
	do {
		/* skip until specified stack frame */
		if (!skip) {
			dump_backtrace_entry(frame.pc);
		} else if (frame.fp == regs->regs[29]) {
			skip = 0;
			/*
			 * Mostly, this is the case where this function is
			 * called in panic/abort. As exception handler's
			 * stack frame does not contain the corresponding pc
			 * at which an exception has taken place, use regs->pc
			 * instead.
			 */
			dump_backtrace_entry(regs->pc);
		}
	} while (!unwind_frame(tsk, &frame));

	put_task_stack(tsk);
}
Ejemplo n.º 10
0
/* get_wchan - a maintenance nightmare ...  */
unsigned long get_wchan(struct task_struct *p)
{
	unsigned long frame, pc;

	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

	pc = thread_saved_pc(&p->thread);
	if (pc < first_sched || pc >= last_sched)
		goto out;

	if (pc >= (unsigned long) sleep_on_timeout)
		goto schedule_timeout_caller;
	if (pc >= (unsigned long) sleep_on)
		goto schedule_caller;
	if (pc >= (unsigned long) interruptible_sleep_on_timeout)
		goto schedule_timeout_caller;
	if (pc >= (unsigned long)interruptible_sleep_on)
		goto schedule_caller;
	goto schedule_timeout_caller;

schedule_caller:
	frame = ((unsigned long *)p->thread.reg30)[10];
	pc    = ((unsigned long *)frame)[7];
	goto out;

schedule_timeout_caller:
	/* Must be schedule_timeout ...  */
	pc    = ((unsigned long *)p->thread.reg30)[11];
	frame = ((unsigned long *)p->thread.reg30)[10];

	/* The schedule_timeout frame ...  */
	pc    = ((unsigned long *)frame)[9];
	frame = ((unsigned long *)frame)[8];

	if (pc >= first_sched && pc < last_sched) {
		/* schedule_timeout called by interruptible_sleep_on_timeout */
		pc    = ((unsigned long *)frame)[7];
		frame = ((unsigned long *)frame)[6];
	}

out:
	if (current->thread.mflags & MF_32BIT)	/* Kludge for 32-bit ps  */
		pc &= 0xffffffff;

	return pc;
}
Ejemplo n.º 11
0
/* get_wchan - a maintenance nightmare^W^Wpain in the ass ...  */
unsigned long get_wchan(struct task_struct *p)
{
	unsigned long stack_page;
	unsigned long pc;
#ifdef CONFIG_KALLSYMS
	unsigned long frame;
#endif

	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

	stack_page = (unsigned long)task_stack_page(p);
	if (!stack_page || !mfinfo_num)
		return 0;

	pc = thread_saved_pc(p);
#ifdef CONFIG_KALLSYMS
	if (!in_sched_functions(pc))
		return pc;

	frame = p->thread.reg29 + schedule_frame->frame_size;
	do {
		int i;

		if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32)
			return 0;

		for (i = mfinfo_num - 1; i >= 0; i--) {
			if (pc >= (unsigned long) mfinfo[i].func)
				break;
		}
		if (i < 0)
			break;

		if (mfinfo[i].pc_offset < 0)
			break;
		pc = ((unsigned long *)frame)[mfinfo[i].pc_offset];
		if (!mfinfo[i].frame_size)
			break;
		frame += mfinfo[i].frame_size;
	} while (in_sched_functions(pc));
#endif

	return pc;
}
/* This must be noinline to so that our skip calculation works correctly */
static noinline void __save_stack_trace(struct task_struct *tsk,
	struct stack_trace *trace, unsigned int nosched)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;
	data.no_sched_functions = nosched;

	if (tsk != current) {
/* Bright Lee, 20130322, enable stack trace when android hang in debug stage { */
// #ifdef CONFIG_SMP
#if 0
/* } Bright Lee, 20130322 */
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		/* We don't want this function nor the caller */
		data.skip += 2;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)__save_stack_trace;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 13
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
		/* enable call stack in smp arch */
#if defined(CONFIG_SMP) && !defined(CONFIG_HUAWEI_KERNEL)
		/*
		 * What guarantees do we have here that 'tsk' is not
		 * running on another CPU?  For now, ignore it as we
		 * can't guarantee we won't explode.
		 */
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 14
0
void aee_dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
	struct stackframe frame;
	const register unsigned long current_sp asm("sp");

	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);

	if (!tsk)
		tsk = current;

	if (regs) {
		frame.fp = regs->regs[29];
		frame.sp = regs->sp;
		frame.pc = regs->pc;
	} else if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.pc = (unsigned long)aee_dump_backtrace;
	} else {
		/*
		 * task blocked in __switch_to
		 */
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.pc = thread_saved_pc(tsk);
	}

	aee_sram_fiq_log("Call trace:\n");
	while (1) {
		unsigned long where = frame.pc;
		int ret;

		ret = unwind_frame(&frame);
		if (ret < 0)
			break;
		dump_backtrace_entry(where, frame.sp);
	}
}
Ejemplo n.º 15
0
/* get_wchan - a maintenance nightmare^W^Wpain in the ass ...  */
unsigned long get_wchan(struct task_struct *p)
{
	unsigned long stack_page;
	unsigned long frame, pc;

	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

	stack_page = (unsigned long)p->thread_info;
	if (!stack_page || !mips_frame_info_initialized)
		return 0;

	pc = thread_saved_pc(p);
	if (!in_sched_functions(pc))
		return pc;

	frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
	do {
		int i;

		if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32)
			return 0;

		for (i = ARRAY_SIZE(mfinfo) - 1; i >= 0; i--) {
			if (pc >= (unsigned long) mfinfo[i].func)
				break;
		}
		if (i < 0)
			break;

		if (mfinfo[i].omit_fp)
			break;
		pc = ((unsigned long *)frame)[mfinfo[i].pc_offset];
		frame = ((unsigned long *)frame)[mfinfo[i].frame_offset];
	} while (in_sched_functions(pc));

	return pc;
}
Ejemplo n.º 16
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;

	if (tsk != current) {
#ifdef CONFIG_SMP
		/*
		 * What guarantees do we have here that 'tsk'
		 * is not running on another CPU?
		 */
		/* 20101006 [email protected] temporary block because of reset */
		/* BUG(); */
		return;
#else
		data.no_sched_functions = 1;
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		/* recovered from the stack */
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		data.no_sched_functions = 0;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)save_stack_trace_tsk;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
/* This must be noinline to so that our skip calculation works correctly */
static noinline void __save_stack_trace(struct task_struct *tsk,
	struct stack_trace *trace, unsigned int nosched)
{
	struct stack_trace_data data;
	struct stackframe frame;

	data.trace = trace;
	data.skip = trace->skip;
	data.no_sched_functions = nosched;

	if (tsk != current) {
#ifdef CONFIG_SMP
		if (trace->nr_entries < trace->max_entries)
			trace->entries[trace->nr_entries++] = ULONG_MAX;
		return;
#else
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.lr = 0;		
		frame.pc = thread_saved_pc(tsk);
#endif
	} else {
		register unsigned long current_sp asm ("sp");

		/* We don't want this function nor the caller */
		data.skip += 2;
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_sp;
		frame.lr = (unsigned long)__builtin_return_address(0);
		frame.pc = (unsigned long)__save_stack_trace;
	}

	walk_stackframe(&frame, save_trace, &data);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Ejemplo n.º 18
0
unsigned long
get_wchan(struct task_struct *p)
{
	unsigned long schedule_frame;
	unsigned long pc;
	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;
	/*
	 * This one depends on the frame size of schedule().  Do a
	 * "disass schedule" in gdb to find the frame size.  Also, the
	 * code assumes that sleep_on() follows immediately after
	 * interruptible_sleep_on() and that add_timer() follows
	 * immediately after interruptible_sleep().  Ugly, isn't it?
	 * Maybe adding a wchan field to task_struct would be better,
	 * after all...
	 */

	pc = thread_saved_pc(p);
	if (in_sched_functions(pc)) {
		schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
		return ((unsigned long *)schedule_frame)[12];
	}
	return pc;
}
Ejemplo n.º 19
0
static unsigned long get_wchan(struct task_struct *p)
{
	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;
#if defined(__i386__)
	{
		unsigned long ebp, esp, eip;
		unsigned long stack_page;
		int count = 0;

		stack_page = (unsigned long)p;
		esp = p->tss.esp;
		if (!stack_page || esp < stack_page || esp >= 8188+stack_page)
			return 0;
		/* include/asm-i386/system.h:switch_to() pushes ebp last. */
		ebp = *(unsigned long *) esp;
		do {
			if (ebp < stack_page || ebp >= 8188+stack_page)
				return 0;
			eip = *(unsigned long *) (ebp+4);
			if (eip < first_sched || eip >= last_sched)
				return eip;
			ebp = *(unsigned long *) ebp;
		} while (count++ < 16);
	}
#elif defined(__alpha__)
	/*
	 * This one depends on the frame size of schedule().  Do a
	 * "disass schedule" in gdb to find the frame size.  Also, the
	 * code assumes that sleep_on() follows immediately after
	 * interruptible_sleep_on() and that add_timer() follows
	 * immediately after interruptible_sleep().  Ugly, isn't it?
	 * Maybe adding a wchan field to task_struct would be better,
	 * after all...
	 */
	{
	    unsigned long schedule_frame;
	    unsigned long pc;

	    pc = thread_saved_pc(&p->tss);
	    if (pc >= first_sched && pc < last_sched) {
		schedule_frame = ((unsigned long *)p->tss.ksp)[6];
		return ((unsigned long *)schedule_frame)[12];
	    }
	    return pc;
	}
#elif defined(__mips__)
	/*
	 * The same comment as on the Alpha applies here, too ...
	 */
	{
		unsigned long schedule_frame;
		unsigned long pc;

		pc = thread_saved_pc(&p->tss);
		if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
			schedule_frame = ((unsigned long *)(long)p->tss.reg30)[16];
			return (unsigned long)((unsigned long *)schedule_frame)[11];
		}
		return pc;
	}
#elif defined(__mc68000__)
	{
	    unsigned long fp, pc;
	    unsigned long stack_page;
	    int count = 0;

	    stack_page = (unsigned long)p;
	    fp = ((struct switch_stack *)p->tss.ksp)->a6;
	    do {
		    if (fp < stack_page+sizeof(struct task_struct) ||
			fp >= 8184+stack_page)
			    return 0;
		    pc = ((unsigned long *)fp)[1];
		/* FIXME: This depends on the order of these functions. */
		    if (pc < first_sched || pc >= last_sched)
		      return pc;
		    fp = *(unsigned long *) fp;
	    } while (count++ < 16);
	}
#elif defined(__powerpc__)
	{
		unsigned long ip, sp;
		unsigned long stack_page = (unsigned long) p;
		int count = 0;

		sp = p->tss.ksp;
		do {
			sp = *(unsigned long *)sp;
			if (sp < stack_page || sp >= stack_page + 8188)
				return 0;
			if (count > 0) {
				ip = *(unsigned long *)(sp + 4);
				if (ip < first_sched || ip >= last_sched)
					return ip;
			}
		} while (count++ < 16);
	}
#elif defined(__arm__)
	{
		unsigned long fp, lr;
		unsigned long stack_page;
		int count = 0;

		stack_page = 4096 + (unsigned long)p;
		fp = get_css_fp (&p->tss);
		do {
			if (fp < stack_page || fp > 4092+stack_page)
				return 0;
			lr = pc_pointer (((unsigned long *)fp)[-1]);
			if (lr < first_sched || lr > last_sched)
				return lr;
			fp = *(unsigned long *) (fp - 12);
		} while (count ++ < 16);
	}
#elif defined (__sparc__)
	{
		unsigned long pc, fp, bias = 0;
		unsigned long task_base = (unsigned long) p;
		struct reg_window *rw;
		int count = 0;

#ifdef __sparc_v9__
		bias = STACK_BIAS;
#endif
		fp = p->tss.ksp + bias;
		do {
			/* Bogus frame pointer? */
			if (fp < (task_base + sizeof(struct task_struct)) ||
			    fp >= (task_base + (2 * PAGE_SIZE)))
				break;
			rw = (struct reg_window *) fp;
			pc = rw->ins[7];
			if (pc < first_sched || pc >= last_sched)
				return pc;
			fp = rw->ins[6] + bias;
		} while (++count < 16);
	}
#elif defined (__s390__)
        {
                unsigned long ksp, backchain, ip;
                unsigned long stack_page;
                int count = 0;

                stack_page = (unsigned long)p;
                ksp = p->tss.ksp;
                if (!stack_page || ksp < stack_page || ksp >= 8188+stack_page)
                        return 0;
                backchain = (*(unsigned long *) ksp) & 0x7fffffff;
                do {
                        if (backchain < stack_page || backchain >= 8188+stack_page)
                                return 0;
                        ip = (*(unsigned long *) (backchain+56)) & 0x7fffffff;
                        if (ip < first_sched || ip >= last_sched)
                                return ip;
                        backchain = (*(unsigned long *) backchain) & 0x7fffffff;
                } while (count++ < 16);
        }
#endif

	return 0;
}
Ejemplo n.º 20
0
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
	struct stackframe frame;
	unsigned long irq_stack_ptr;
	int skip;

	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);

	if (!tsk)
		tsk = current;

	if (!try_get_task_stack(tsk))
		return;

	/*
	 * Switching between stacks is valid when tracing current and in
	 * non-preemptible context.
	 */
	if (tsk == current && !preemptible())
		irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
	else
		irq_stack_ptr = 0;

	if (tsk == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_stack_pointer;
		frame.pc = (unsigned long)dump_backtrace;
	} else {
		/*
		 * task blocked in __switch_to
		 */
		frame.fp = thread_saved_fp(tsk);
		frame.sp = thread_saved_sp(tsk);
		frame.pc = thread_saved_pc(tsk);
	}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	frame.graph = tsk->curr_ret_stack;
#endif

	skip = !!regs;
	printk("Call trace:\n");
	while (1) {
		unsigned long where = frame.pc;
		unsigned long stack;
		int ret;

		/* skip until specified stack frame */
		if (!skip) {
			dump_backtrace_entry(where);
		} else if (frame.fp == regs->regs[29]) {
			skip = 0;
			/*
			 * Mostly, this is the case where this function is
			 * called in panic/abort. As exception handler's
			 * stack frame does not contain the corresponding pc
			 * at which an exception has taken place, use regs->pc
			 * instead.
			 */
			dump_backtrace_entry(regs->pc);
		}
		ret = unwind_frame(tsk, &frame);
		if (ret < 0)
			break;
		stack = frame.sp;
		if (in_exception_text(where)) {
			/*
			 * If we switched to the irq_stack before calling this
			 * exception handler, then the pt_regs will be on the
			 * task stack. The easiest way to tell is if the large
			 * pt_regs would overlap with the end of the irq_stack.
			 */
			if (stack < irq_stack_ptr &&
			    (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
				stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);

			dump_mem("", "Exception stack", stack,
				 stack + sizeof(struct pt_regs));
		}
	}

	put_task_stack(tsk);
}