static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[40];
    unsigned long long t1;
    t1 = sched_clock();
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, HZ:%d)", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ);
		if (print_once) {
			print_once = 0;
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            sprintf( aee_str, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_exception( aee_str,"spinlock debugger\n");
		}
	}
}
Exemplo n.º 2
0
static void oom_reap_task(struct task_struct *tsk)
{
	int attempts = 0;
	struct mm_struct *mm = tsk->signal->oom_mm;

	/* Retry the down_read_trylock(mmap_sem) a few times */
	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
		schedule_timeout_idle(HZ/10);

	if (attempts <= MAX_OOM_REAP_RETRIES ||
	    test_bit(MMF_OOM_SKIP, &mm->flags))
		goto done;

	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
		task_pid_nr(tsk), tsk->comm);
	debug_show_all_locks();

done:
	tsk->oom_reaper_list = NULL;

	/*
	 * Hide this mm from OOM killer because it has been either reaped or
	 * somebody can't call up_write(mmap_sem).
	 */
	set_bit(MMF_OOM_SKIP, &mm->flags);

	/* Drop a reference taken by wake_oom_reaper */
	put_task_struct(tsk);
}
static void ShowStatus(void)
{

	struct task_struct *t,*p;
	struct pid *pid;
	int count=0;
	InDumpAllStack=1;
	
	//show all kbt in init
	LOGE("[Hang_Detect] dump init all thread bt \n");
	if(init_pid)
	{	pid=find_get_pid(init_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		do 
		{
			sched_show_task_local(t);
		} while_each_thread(p, t);
	}	
	
	//show all kbt in surfaceflinger
	LOGE("[Hang_Detect] dump surfaceflinger all thread bt \n");
	if(surfaceflinger_pid)
	{	pid=find_get_pid(surfaceflinger_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		count=0;
		do 
		{
			sched_show_task_local(t);
			if((++count)%5==4)
				msleep(20);
		} while_each_thread(p, t);
	}	
	msleep(100);
	//show all kbt in system_server
	LOGE("[Hang_Detect] dump system_server all thread bt \n");
	if(system_server_pid)
	{	pid=find_get_pid(system_server_pid);
		t=p=get_pid_task(pid,PIDTYPE_PID);
		count=0;
		do 
		{
			sched_show_task_local(t);
			if((++count)%5==4)
				msleep(20);
		} while_each_thread(p, t);
	}	
	msleep(100);
	//show all D state thread kbt
	LOGE("[Hang_Detect] dump all D thread bt \n");
	show_state_filter_local(TASK_UNINTERRUPTIBLE); 
	debug_show_all_locks();
	system_server_pid=0;
	surfaceflinger_pid=0;
	init_pid=0;
	InDumpAllStack=0;
	msleep(10);
}
Exemplo n.º 4
0
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
	struct task_struct *task;

	if (!waiter->deadlock_lock || !debug_locks)
		return;

	rcu_read_lock();
	task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
	if (!task) {
		rcu_read_unlock();
		return;
	}

	if (!debug_locks_off()) {
		rcu_read_unlock();
		return;
	}

	pr_warn("\n");
	pr_warn("============================================\n");
	pr_warn("WARNING: circular locking deadlock detected!\n");
	pr_warn("%s\n", print_tainted());
	pr_warn("--------------------------------------------\n");
	printk("%s/%d is deadlocking current task %s/%d\n\n",
	       task->comm, task_pid_nr(task),
	       current->comm, task_pid_nr(current));

	printk("\n1) %s/%d is trying to acquire this lock:\n",
	       current->comm, task_pid_nr(current));
	printk_lock(waiter->lock, 1);

	printk("\n2) %s/%d is blocked on this lock:\n",
		task->comm, task_pid_nr(task));
	printk_lock(waiter->deadlock_lock, 1);

	debug_show_held_locks(current);
	debug_show_held_locks(task);

	printk("\n%s/%d's [blocked] stackdump:\n\n",
		task->comm, task_pid_nr(task));
	show_stack(task, NULL);
	printk("\n%s/%d's [current] stackdump:\n\n",
		current->comm, task_pid_nr(current));
	dump_stack();
	debug_show_all_locks();
	rcu_read_unlock();

	printk("[ turning off deadlock detection."
	       "Please report this trace. ]\n\n");
}
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
	struct task_struct *task;

	if (!waiter->deadlock_lock || !rt_trace_on)
		return;

	rcu_read_lock();
	task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
	if (!task) {
		rcu_read_unlock();
		return;
	}

	TRACE_OFF_NOLOCK();

	printk("\n============================================\n");
	printk(  "[ BUG: circular locking deadlock detected! ]\n");
	printk(  "--------------------------------------------\n");
	printk("%s/%d is deadlocking current task %s/%d\n\n",
	       task->comm, task_pid_nr(task),
	       current->comm, task_pid_nr(current));

	printk("\n1) %s/%d is trying to acquire this lock:\n",
	       current->comm, task_pid_nr(current));
	printk_lock(waiter->lock, 1);

	printk("\n2) %s/%d is blocked on this lock:\n",
		task->comm, task_pid_nr(task));
	printk_lock(waiter->deadlock_lock, 1);

	debug_show_held_locks(current);
	debug_show_held_locks(task);

	printk("\n%s/%d's [blocked] stackdump:\n\n",
		task->comm, task_pid_nr(task));
	show_stack(task, NULL);
	printk("\n%s/%d's [current] stackdump:\n\n",
		current->comm, task_pid_nr(current));
	dump_stack();
	debug_show_all_locks();
	rcu_read_unlock();

	printk("[ turning off deadlock detection."
	       "Please report this trace. ]\n\n");
	local_irq_disable();
}
Exemplo n.º 6
0
static void panic_print_sys_info(void)
{
	if (panic_print & PANIC_PRINT_TASK_INFO)
		show_state();

	if (panic_print & PANIC_PRINT_MEM_INFO)
		show_mem(0, NULL);

	if (panic_print & PANIC_PRINT_TIMER_INFO)
		sysrq_timer_list_show();

	if (panic_print & PANIC_PRINT_LOCK_INFO)
		debug_show_all_locks();

	if (panic_print & PANIC_PRINT_FTRACE_INFO)
		ftrace_dump(DUMP_ALL);
}
Exemplo n.º 7
0
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
	struct task_struct *task;

	if (!waiter->deadlock_lock || !debug_locks)
		return;

	task = find_task_by_pid(waiter->deadlock_task_pid);
	if (!task)
		return;

	if (!debug_locks_off())
		return;

	printk("\n============================================\n");
	printk(  "[ BUG: circular locking deadlock detected! ]\n");
	printk(  "--------------------------------------------\n");
	printk("%s/%d is deadlocking current task %s/%d\n\n",
	       task->comm, task->pid, current->comm, current->pid);

	printk("\n1) %s/%d is trying to acquire this lock:\n",
	       current->comm, current->pid);
	printk_lock(waiter->lock, 1);

	printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
	printk_lock(waiter->deadlock_lock, 1);

	debug_show_held_locks(current);
	debug_show_held_locks(task);

	printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
	show_stack(task, NULL);
	printk("\n%s/%d's [current] stackdump:\n\n",
	       current->comm, current->pid);
	dump_stack();
	debug_show_all_locks();

	printk("[ turning off deadlock detection."
	       "Please report this trace. ]\n\n");
}
Exemplo n.º 8
0
static void sysrq_handle_showlocks(int key)
{
	debug_show_all_locks();
}
Exemplo n.º 9
0
static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
{
    debug_show_all_locks();
}
Exemplo n.º 10
0
static bool __oom_reap_task(struct task_struct *tsk)
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
	struct mm_struct *mm = NULL;
	struct task_struct *p;
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

	/*
	 * We have to make sure to not race with the victim exit path
	 * and cause premature new oom victim selection:
	 * __oom_reap_task		exit_mm
	 *   atomic_inc_not_zero
	 *				  mmput
	 *				    atomic_dec_and_test
	 *				  exit_oom_victim
	 *				[...]
	 *				out_of_memory
	 *				  select_bad_process
	 *				    # no TIF_MEMDIE task selects new victim
	 *  unmap_page_range # frees some memory
	 */
	mutex_lock(&oom_lock);

	/*
	 * Make sure we find the associated mm_struct even when the particular
	 * thread has already terminated and cleared its mm.
	 * We might have race with exit path so consider our work done if there
	 * is no mm.
	 */
	p = find_lock_task_mm(tsk);
	if (!p)
		goto unlock_oom;
	mm = p->mm;
	atomic_inc(&mm->mm_users);
	task_unlock(p);

	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
		goto unlock_oom;
	}

	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
	up_read(&mm->mmap_sem);

	/*
	 * This task can be safely ignored because we cannot do much more
	 * to release its memory.
	 */
	set_bit(MMF_OOM_REAPED, &mm->flags);
unlock_oom:
	mutex_unlock(&oom_lock);
	/*
	 * Drop our reference but make sure the mmput slow path is called from a
	 * different context because we shouldn't risk we get stuck there and
	 * put the oom_reaper out of the way.
	 */
	if (mm)
		mmput_async(mm);
	return ret;
}

#define MAX_OOM_REAP_RETRIES 10
static void oom_reap_task(struct task_struct *tsk)
{
	int attempts = 0;

	/* Retry the down_read_trylock(mmap_sem) a few times */
	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk))
		schedule_timeout_idle(HZ/10);

	if (attempts > MAX_OOM_REAP_RETRIES) {
		pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
				task_pid_nr(tsk), tsk->comm);
		debug_show_all_locks();
	}

	/*
	 * Clear TIF_MEMDIE because the task shouldn't be sitting on a
	 * reasonably reclaimable memory anymore or it is not a good candidate
	 * for the oom victim right now because it cannot release its memory
	 * itself nor by the oom reaper.
	 */
	tsk->oom_reaper_list = NULL;
	exit_oom_victim(tsk);

	/* Drop a reference taken by wake_oom_reaper */
	put_task_struct(tsk);
}
Exemplo n.º 11
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
#ifdef CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[50];
    unsigned long long t1,t2;
    t1 = sched_clock();
    t2 = t1;
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		if(sched_clock() - t2 < WARNING_TIME) continue;
		t2 = sched_clock();
		if(oops_in_progress != 0) continue;  // in exception follow, printk maybe spinlock error
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, lock->raw_lock.slock);
		if (print_once) {
			print_once = 0;
	        spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");

            #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK
            panic("Please check this spin_lock bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n");
            #endif
		}
	}
#else //CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
#endif //CONFIG_MTK_MUTATION
}