static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
	if (!debug_locks_off())
		return;

	spin_dump(lock, msg);
}
示例#2
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	int print_once = 1;

	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
		if (print_once) {
			print_once = 0;
			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
					"%s/%d, %ps\n",
				raw_smp_processor_id(), current->comm,
				task_pid_nr(current), lock);
			printk(KERN_EMERG "%s: loops_per_jiffy=%lu, HZ=%d, loops=%llu\n", __func__, loops_per_jiffy, HZ, loops);	/* Added by HTC */
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
		}
	}
}
示例#3
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = (loops_per_jiffy * HZ) >> 4;

	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[40];
    unsigned long long t1;
    t1 = sched_clock();
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, HZ:%d)", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ);
		if (print_once) {
			print_once = 0;
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            sprintf( aee_str, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_exception( aee_str,"spinlock debugger\n");
		}
	}
}
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
    char aee_str[40];
//	if (!debug_locks_off())
//		return;

	spin_dump(lock, msg);
    sprintf( aee_str, "Spinlock %s :%s\n", current->comm, msg);
    aee_kernel_warning( aee_str,"spinlock debugger\n");
}
示例#6
0
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
    char aee_str[50];
//	if (!debug_locks_off())
//		return;

	spin_dump(lock, msg);
    snprintf( aee_str, 50, "Spinlock %s :%s\n", current->comm, msg);
    aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
}
示例#7
0
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
    char aee_str[50];
    spin_dump(lock, msg);
    snprintf( aee_str, 50, "Spinlock %s :%s\n", current->comm, msg);
    if(!strcmp(msg,"bad magic")){
        printk("[spin lock debug] bad magic:%08x, should be %08x, may use an un-initial spin_lock or mem corrupt\n", lock->magic, SPINLOCK_MAGIC);
        printk(">>>>>>>>>>>>>> Let's KE <<<<<<<<<<<<<<\n");
        BUG_ON(1);
    }
    aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
    #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK
    panic("Please check this spin bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n");
    #endif
}
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
    char aee_str[50];
//  if (!debug_locks_off())
//      return;

    spin_dump(lock, msg);
    snprintf( aee_str, 50, "Spinlock %s :%s\n", current->comm, msg);
    if(!strcmp(msg,"bad magic")){
        printk("[spin lock debug] bad magic:%08x, should be %08x, may use an un-initial spin_lock or mem corrupt\n", lock->magic, SPINLOCK_MAGIC);
        printk(">>>>>>>>>>>>>> Let's KE <<<<<<<<<<<<<<\n");
        BUG_ON(1);
    }
    aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
}
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	int print_once = 1;

	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
		if (print_once) {
			print_once = 0;
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
		}
	}
}
示例#10
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
#ifdef CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[50];
    unsigned long long t1,t2;
    t1 = sched_clock();
    t2 = t1;
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		if(sched_clock() - t2 < WARNING_TIME) continue;
		t2 = sched_clock();
		if(oops_in_progress != 0) continue;  // in exception follow, printk maybe spinlock error
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, lock->raw_lock.slock);
		if (print_once) {
			print_once = 0;
	        spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");

            #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK
            panic("Please check this spin_lock bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n");
            #endif
		}
	}
#else //CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
#endif //CONFIG_MTK_MUTATION
}