Esempio n. 1
0
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
	if (!debug_locks_off())
		return;

	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
		msg, raw_smp_processor_id(), current->comm,
		task_pid_nr(current), lock);
	dump_stack();
}
Esempio n. 2
0
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
	if (!debug_locks_off())
		return;

	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p (%s)\n",
		msg, raw_smp_processor_id(), current->comm,
		current->pid, lock, print_tainted());
	dump_stack();
}
Esempio n. 3
0
/*
 * Reset system and call either kdump or normal kexec
 */
static void __machine_kexec(void *data)
{
	__arch_local_irq_stosm(0x04); /* enable DAT */
	pfault_fini();
	tracing_off();
	debug_locks_off();
#ifdef CONFIG_CRASH_DUMP
	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
		__machine_kdump(data);
#endif
	__do_machine_kexec(data);
}
Esempio n. 4
0
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
	if (!debug_locks_off())
		return;

	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
		msg, raw_smp_processor_id(), current->comm,
		task_pid_nr(current), lock);
	dump_stack();
    #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK
    panic("Please check this rwlock bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n");
    #endif
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
	char aee_str[40];
	if (!debug_locks_off())
		return;

	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
		msg, raw_smp_processor_id(), current->comm,
		task_pid_nr(current), lock);
	dump_stack();
    sprintf( aee_str, "rwlock %s:%s\n", msg, current->comm);
    aee_kernel_exception( aee_str,"spinlock debugger\n");
}
Esempio n. 6
0
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
	struct task_struct *task;

	if (!waiter->deadlock_lock || !debug_locks)
		return;

	rcu_read_lock();
	task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
	if (!task) {
		rcu_read_unlock();
		return;
	}

	if (!debug_locks_off()) {
		rcu_read_unlock();
		return;
	}

	pr_warn("\n");
	pr_warn("============================================\n");
	pr_warn("WARNING: circular locking deadlock detected!\n");
	pr_warn("%s\n", print_tainted());
	pr_warn("--------------------------------------------\n");
	printk("%s/%d is deadlocking current task %s/%d\n\n",
	       task->comm, task_pid_nr(task),
	       current->comm, task_pid_nr(current));

	printk("\n1) %s/%d is trying to acquire this lock:\n",
	       current->comm, task_pid_nr(current));
	printk_lock(waiter->lock, 1);

	printk("\n2) %s/%d is blocked on this lock:\n",
		task->comm, task_pid_nr(task));
	printk_lock(waiter->deadlock_lock, 1);

	debug_show_held_locks(current);
	debug_show_held_locks(task);

	printk("\n%s/%d's [blocked] stackdump:\n\n",
		task->comm, task_pid_nr(task));
	show_stack(task, NULL);
	printk("\n%s/%d's [current] stackdump:\n\n",
		current->comm, task_pid_nr(current));
	dump_stack();
	debug_show_all_locks();
	rcu_read_unlock();

	printk("[ turning off deadlock detection."
	       "Please report this trace. ]\n\n");
}
static void __machine_kexec(void *data)
{
	struct kimage *image = data;

	pfault_fini();
	tracing_off();
	debug_locks_off();
	if (image->type == KEXEC_TYPE_CRASH) {
		lgr_info_log();
		s390_reset_system(__do_machine_kdump, data);
	} else {
		s390_reset_system(__do_machine_kexec, data);
	}
	disabled_wait((unsigned long) __builtin_return_address(0));
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
	if (!debug_locks_off())
		return;

	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
		msg, raw_smp_processor_id(), current->comm,
		task_pid_nr(current), lock);
#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
	msm_trigger_wdog_bite();
#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
	BUG();
#endif
	dump_stack();
}
Esempio n. 9
0
/*
 * Reset system and call either kdump or normal kexec
 */
static void __machine_kexec(void *data)
{
	__arch_local_irq_stosm(0x04); /* enable DAT */
	pfault_fini();
	tracing_off();
	debug_locks_off();
#ifdef CONFIG_CRASH_DUMP
	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {

		lgr_info_log();
		s390_reset_system(setup_regs, __do_machine_kdump, data);
	} else
#endif
		s390_reset_system(NULL, __do_machine_kexec, data);
	disabled_wait((unsigned long) __builtin_return_address(0));
}
Esempio n. 10
0
void rt_mutex_deadlock_account_unlock(struct task_struct *task)
{
#ifdef CONFIG_DEBUG_PREEMPT
	if (!task->lock_count) {
		if (!debug_locks_off())
			return;
		printk("BUG: %s/%d: lock count underflow!\n",
			task->comm, task->pid);
		dump_stack();
		return;
	}
	task->lock_count--;
#ifdef CONFIG_PREEMPT_RT
	task->owned_lock[task->lock_count] = NULL;
#endif
#endif
}
Esempio n. 11
0
/*
 * Reset system and call either kdump or normal kexec
 */
void machine_kexec(struct kimage *image)
{
	if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
		return;

	smp_send_stop();
	pfault_fini();
	tracing_off();
	debug_locks_off();
	if (image->type == KEXEC_TYPE_CRASH) {
		lgr_info_log();
		s390_reset_system(__do_machine_kdump, image);
	} else {
		s390_reset_system(__do_machine_kexec, image);
	}
	disabled_wait((unsigned long) __builtin_return_address(0));
}
Esempio n. 12
0
void
rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
{
#ifdef CONFIG_DEBUG_PREEMPT
	if (task->lock_count >= MAX_LOCK_STACK) {
		if (!debug_locks_off())
			return;
		printk("BUG: %s/%d: lock count overflow!\n",
			task->comm, task->pid);
		dump_stack();
		return;
	}
#ifdef CONFIG_PREEMPT_RT
	task->owned_lock[task->lock_count] = lock;
#endif
	task->lock_count++;
#endif
}
Esempio n. 13
0
static void spin_bug(spinlock_t *lock, const char *msg)
{
	struct task_struct *owner = NULL;

	if (!debug_locks_off())
		return;

	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
		owner = lock->owner;
	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
		msg, raw_smp_processor_id(),
		current->comm, task_pid_nr(current));
	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
			".owner_cpu: %d\n",
		lock, lock->magic,
		owner ? owner->comm : "<none>",
		owner ? task_pid_nr(owner) : -1,
		lock->owner_cpu);
	dump_stack();
}
Esempio n. 14
0
static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
	struct task_struct *owner = NULL;

	if (!debug_locks_off())
		return;

	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
		owner = lock->owner;
//	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
//		msg, raw_smp_processor_id(),
;
//	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
//			".owner_cpu: %d\n",
//		lock, lock->magic,
//		owner ? owner->comm : "<none>",
//		owner ? task_pid_nr(owner) : -1,
;
	BUG_ON(PANIC_CORRUPTION);
	dump_stack();
}
Esempio n. 15
0
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
	struct task_struct *task;

	if (!waiter->deadlock_lock || !debug_locks)
		return;

	task = find_task_by_pid(waiter->deadlock_task_pid);
	if (!task)
		return;

	if (!debug_locks_off())
		return;

	printk("\n============================================\n");
	printk(  "[ BUG: circular locking deadlock detected! ]\n");
	printk(  "--------------------------------------------\n");
	printk("%s/%d is deadlocking current task %s/%d\n\n",
	       task->comm, task->pid, current->comm, current->pid);

	printk("\n1) %s/%d is trying to acquire this lock:\n",
	       current->comm, current->pid);
	printk_lock(waiter->lock, 1);

	printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
	printk_lock(waiter->deadlock_lock, 1);

	debug_show_held_locks(current);
	debug_show_held_locks(task);

	printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
	show_stack(task, NULL);
	printk("\n%s/%d's [current] stackdump:\n\n",
	       current->comm, current->pid);
	dump_stack();
	debug_show_all_locks();

	printk("[ turning off deadlock detection."
	       "Please report this trace. ]\n\n");
}
Esempio n. 16
0
static ssize_t mt_pvlk_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *data)
{
	char buf[64];
	unsigned long val;
	int ret;
	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	ret = strict_strtoul(buf, 10, &val);
	if (ret < 0)
		return ret;
	if (val == 0) {
		debug_locks_off();
	} else if (val == 2) {
		pr_err("==== circular lock test=====\n");
		mutex_lock(&mtx_a);
		mutex_lock(&mtx_b);
		mutex_lock(&mtx_c);
		mutex_unlock(&mtx_c);
		mutex_unlock(&mtx_b);
		mutex_unlock(&mtx_a);

		mutex_lock(&mtx_c);
		mutex_lock(&mtx_a);
		mutex_lock(&mtx_b);
		mutex_unlock(&mtx_b);
		mutex_unlock(&mtx_a);
		mutex_unlock(&mtx_c);

	}
	pr_err("[MT prove locking] debug_locks = %d\n", debug_locks);
	return cnt;
}
Esempio n. 17
0
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
void panic(const char *fmt, ...)
{
	static char buf[1024];
	va_list args;
	long i, i_next = 0, len;
	int state = 0;
	int old_cpu, this_cpu;
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;

	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
	 * after setting panic_cpu) from invoking panic() again.
	 */
	local_irq_disable();

	/*
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
	 * preempt to be disabled. No point enabling it later though...
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
	 */
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
		panic_smp_self_stop();

	console_verbose();
	bust_spinlocks(1);
	va_start(args, fmt);
	len = vscnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);

	if (len && buf[len - 1] == '\n')
		buf[len - 1] = '\0';

	pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
		dump_stack();
#endif

	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
	 */
	if (!_crash_kexec_post_notifiers) {
		printk_safe_flush_on_panic();
		__crash_kexec(NULL);

		/*
		 * Note smp_send_stop is the usual smp shutdown function, which
		 * unfortunately means it may not be hardened to work in a
		 * panic situation.
		 */
		smp_send_stop();
	} else {
		/*
		 * If we want to do crash dump after notifier calls and
		 * kmsg_dump, we will need architecture dependent extra
		 * works in addition to stopping other CPUs.
		 */
		crash_smp_send_stop();
	}

	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);

	/* Call flush even twice. It tries harder with a single online CPU */
	printk_safe_flush_on_panic();
	kmsg_dump(KMSG_DUMP_PANIC);

	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
	 */
	if (_crash_kexec_post_notifiers)
		__crash_kexec(NULL);

#ifdef CONFIG_VT
	unblank_screen();
#endif
	console_unblank();

	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
	 */
	debug_locks_off();
	console_flush_on_panic();

	panic_print_sys_info();

	if (!panic_blink)
		panic_blink = no_blink;

	if (panic_timeout > 0) {
		/*
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);

		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
			touch_nmi_watchdog();
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
		}
	}
	if (panic_timeout != 0) {
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
		 */
		emergency_restart();
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
		/* Make sure the user can actually press Stop-A (L1-A) */
		stop_a_enabled = 1;
		pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
			 "twice on console to return to the boot prom\n");
	}
#endif
#if defined(CONFIG_S390)
	{
		unsigned long caller;

		caller = (unsigned long)__builtin_return_address(0);
		disabled_wait(caller);
	}
#endif
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
	local_irq_enable();
	for (i = 0; ; i += PANIC_TIMER_STEP) {
		touch_softlockup_watchdog();
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
	}
}
Esempio n. 18
0
/*
 * Called when the architecture enters its oops handler, before it prints
 * anything.  If this is the first CPU to oops, and it's oopsing the first time
 * then let it proceed.
 *
 * This is all enabled by the pause_on_oops kernel boot option.  We do all this
 * to ensure that oopses don't scroll off the screen.  It has the side-effect
 * of preventing later-oopsing CPUs from mucking up the display, too.
 *
 * It turns out that the CPU which is allowed to print ends up pausing for the
 * right duration, whereas all the other CPUs pause for twice as long: once in
 * oops_enter(), once in oops_exit().
 */
void oops_enter(void)
{
	debug_locks_off(); /* can't trust the integrity of the kernel anymore */
	do_oops_enter_exit();
}