コード例 #1
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;

	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_bug(lock, "lockup suspected");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
}
コード例 #2
0
ファイル: spinlock_debug.c プロジェクト: Snuzzo/funky_dna_old
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	int print_once = 1;

	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
		if (print_once) {
			print_once = 0;
			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
					"%s/%d, %ps\n",
				raw_smp_processor_id(), current->comm,
				task_pid_nr(current), lock);
			printk(KERN_EMERG "%s: loops_per_jiffy=%lu, HZ=%d, loops=%llu\n", __func__, loops_per_jiffy, HZ, loops);	/* Added by HTC */
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
		}
	}
}
コード例 #3
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[40];
    unsigned long long t1;
    t1 = sched_clock();
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, HZ:%d)", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ);
		if (print_once) {
			print_once = 0;
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            sprintf( aee_str, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_exception( aee_str,"spinlock debugger\n");
		}
	}
}
コード例 #4
0
static void __spin_lock_debug(spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	int print_once = 1;

	for (;;) {
		for (i = 0; i < loops; i++) {
			if (__raw_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
		if (print_once) {
			print_once = 0;
			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
					"%s/%d, %p\n",
				raw_smp_processor_id(), current->comm,
				task_pid_nr(current), lock);
			dump_stack();
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
		}
	}
}
コード例 #5
0
static void dump_softlock_debug(unsigned long data)
{
	int i, reboot;
	u64 system[NR_CPUS], num_jifs;

	memset(system, 0, NR_CPUS*sizeof(u64));

	num_jifs = jiffies - beattime;
	for_each_possible_cpu(i) {
		system[i] = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM] -
				heartbeats[i];
	}

	reboot = 0;

	for_each_possible_cpu(i) {
		if ((num_jifs - cputime_to_jiffies(system[i])) <
						msecs_to_jiffies(10)) {
			WARN(1, "cpu %d wedged\n", i);
			smp_call_function_single(i, smp_dumpstack, NULL, 1);
			reboot = 1;
		}
	}

	if (reboot) {
		panic_timeout = 10;
		trigger_all_cpu_backtrace();
		panic("Soft lock on CPUs\n");
	}
}
コード例 #6
0
/* Called from the FIQ asm handler */
void msm7k_fiq_handler(void)
{
	struct irq_data *d;
	struct irq_chip *c;
	struct pt_regs context_regs;

	pr_info("Fiq is received %s\n", __func__);
	fiq_counter++;
	d = irq_get_irq_data(MSM8625_INT_A9_M2A_2);
	c = irq_data_get_irq_chip(d);
	c->irq_mask(d);
	local_irq_disable();

	/* Clear the IRQ from the ENABLE_SET */
	gic_clear_irq_pending(MSM8625_INT_A9_M2A_2);
	local_irq_enable();
	flush_cache_all();
	outer_flush_all();
 pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x", __func__, msm_dump_cpu_ctx.usr_r0);
	pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x usr_r1:0x%x usr_r2:0x%x usr_r3:0x%x usr_r4:0x%x usr_r5:0x%x usr_r6:0x%x usr_r7:0x%x usr_r8:0x%x usr_r9:0x%x usr_r10:0x%x usr_r11:0x%x usr_r12:0x%x usr_r13:0x%x usr_r14:0x%x irq_spsr:0x%x irq_r13:0x%x irq_r14:0x%x svc_spsr:0x%x svc_r13:0x%x svc_r14:0x%x abt_spsr:0x%x abt_r13:0x%x abt_r14:0x%x und_spsr:0x%x und_r13:0x%x und_r14:0x%x fiq_spsr:0x%x fiq_r8:0x%x fiq_r9:0x%x fiq_r10:0x%x fiq_r11:0x%x fiq_r12:0x%x fiq_r13:0x%x fiq_r14:0x%x\n",__func__, msm_dump_cpu_ctx.usr_r0,msm_dump_cpu_ctx.usr_r1,msm_dump_cpu_ctx.usr_r2,msm_dump_cpu_ctx.usr_r3, msm_dump_cpu_ctx.usr_r4, msm_dump_cpu_ctx.usr_r5, msm_dump_cpu_ctx.usr_r6, msm_dump_cpu_ctx.usr_r7, msm_dump_cpu_ctx.usr_r8, msm_dump_cpu_ctx.usr_r9, msm_dump_cpu_ctx.usr_r10, msm_dump_cpu_ctx.usr_r11, msm_dump_cpu_ctx.usr_r12, msm_dump_cpu_ctx.usr_r13, msm_dump_cpu_ctx.usr_r14, msm_dump_cpu_ctx.irq_spsr, msm_dump_cpu_ctx.irq_r13, msm_dump_cpu_ctx.irq_r14, msm_dump_cpu_ctx.svc_spsr, msm_dump_cpu_ctx.svc_r13, msm_dump_cpu_ctx.svc_r14, msm_dump_cpu_ctx.abt_spsr,msm_dump_cpu_ctx.abt_r13, msm_dump_cpu_ctx.abt_r14, msm_dump_cpu_ctx.und_spsr,msm_dump_cpu_ctx.und_r13, msm_dump_cpu_ctx.und_r14, msm_dump_cpu_ctx.fiq_spsr,msm_dump_cpu_ctx.fiq_r8, msm_dump_cpu_ctx.fiq_r9, msm_dump_cpu_ctx.fiq_r10, msm_dump_cpu_ctx.fiq_r11, msm_dump_cpu_ctx.fiq_r12, msm_dump_cpu_ctx.fiq_r13, msm_dump_cpu_ctx.fiq_r14);
	context_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13;
	context_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14;
	context_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; //for the svc r11 is the same with usr r11
	context_regs.ARM_pc = msm_dump_cpu_ctx.svc_r14;
	//dump_stack();
	unwind_backtrace(&context_regs, current);
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	return;
}
コード例 #7
0
/* warning interrupt handler */
static irqreturn_t watchdog_warning_interrupt(int irq, void *dev_id)
{
	pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT!\n", __func__);

	/* Let's reset the platform after dumping some data */
	trigger_all_cpu_backtrace();
	panic("Kernel Watchdog");

	/* This code should not be reached */
	return IRQ_HANDLED;
}
コード例 #8
0
static void sysrq_handle_showallcpus(int key)
{
	if (!trigger_all_cpu_backtrace()) {
		struct pt_regs *regs = get_irq_regs();

		if (regs) {
			printk(KERN_INFO "CPU%d:\n", smp_processor_id());
			show_regs(regs);
		}
		schedule_work(&sysrq_showallcpus);
	}
}
コード例 #9
0
static void sysrq_handle_showallcpus(int key)
{
	/*
	 * Fall back to the workqueue based printing if the
	 * backtrace printing did not succeed or the
	 * architecture has no support for it:
	 */
	if (!trigger_all_cpu_backtrace()) {
		struct pt_regs *regs = get_irq_regs();

		if (regs) {
			printk(KERN_INFO "CPU%d:\n", smp_processor_id());
			show_regs(regs);
		}
		schedule_work(&sysrq_showallcpus);
	}
}
コード例 #10
0
static void __spin_lock_debug(raw_spinlock_t *lock)
{
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	int print_once = 1;

	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		/* lockup suspected: */
		if (print_once) {
			print_once = 0;
			spin_dump(lock, "lockup");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
		}
	}
}
コード例 #11
0
/* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event,
		 struct perf_sample_data *data,
		 struct pt_regs *regs)
{
	/* Ensure the watchdog never gets throttled */
	event->hw.interrupts = 0;

	if (__this_cpu_read(watchdog_nmi_touch) == true) {
		__this_cpu_write(watchdog_nmi_touch, false);
		return;
	}

	/* check for a hardlockup
	 * This is done by making sure our timer interrupt
	 * is incrementing.  The timer interrupt should have
	 * fired multiple times before we overflow'd.  If it hasn't
	 * then this is a good indication the cpu is stuck
	 */
	if (is_hardlockup()) {
		int this_cpu = smp_processor_id();

		/* only print hardlockups once */
		if (__this_cpu_read(hard_watchdog_warn) == true)
			return;

		if (hardlockup_panic) {
			trigger_all_cpu_backtrace();
			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
		}
		else
			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);

		__this_cpu_write(hard_watchdog_warn, true);
		return;
	}

	__this_cpu_write(hard_watchdog_warn, false);
	return;
}
コード例 #12
0
ファイル: spinlock_debug.c プロジェクト: SelfImp/m75
static void __spin_lock_debug(raw_spinlock_t *lock)
{
#ifdef CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * LOOP_HZ;
	int print_once = 1;
    char aee_str[50];
    unsigned long long t1,t2;
    t1 = sched_clock();
    t2 = t1;
	for (;;) {
		for (i = 0; i < loops; i++) {
			if (arch_spin_trylock(&lock->raw_lock))
				return;
			__delay(1);
		}
		if(sched_clock() - t2 < WARNING_TIME) continue;
		t2 = sched_clock();
		if(oops_in_progress != 0) continue;  // in exception follow, printk maybe spinlock error
		/* lockup suspected: */
        printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, lock->raw_lock.slock);
		if (print_once) {
			print_once = 0;
	        spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
			trigger_all_cpu_backtrace();
#endif
            debug_show_all_locks();
            snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm);
            aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");

            #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK
            panic("Please check this spin_lock bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n");
            #endif
		}
	}
#else //CONFIG_MTK_MUTATION
	u64 i;
	u64 loops = loops_per_jiffy * HZ;
	for (i = 0; i < loops; i++) {
		if (arch_spin_trylock(&lock->raw_lock))
			return;
		__delay(1);
	}
	/* lockup suspected: */
	spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
	trigger_all_cpu_backtrace();
#endif

	/*
	 * The trylock above was causing a livelock.  Give the lower level arch
	 * specific lock code a chance to acquire the lock. We have already
	 * printed a warning/backtrace at this point. The non-debug arch
	 * specific code might actually succeed in acquiring the lock.  If it is
	 * not successful, the end-result is the same - there is no forward
	 * progress.
	 */
	arch_spin_lock(&lock->raw_lock);
#endif //CONFIG_MTK_MUTATION
}
コード例 #13
0
static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	int new_options, retval = -EINVAL;
	int new_heartbeat;
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	static const struct watchdog_info ident = {
		.options =		WDIOF_SETTIMEOUT |
					WDIOF_KEEPALIVEPING |
					WDIOF_MAGICCLOSE,
		.firmware_version =	0,
		.identity =		DRV_NAME,
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);

	case WDIOC_SETOPTIONS:
	{
		if (get_user(new_options, p))
			return -EFAULT;

		if (new_options & WDIOS_DISABLECARD) {
			iTCO_wdt_stop();
			retval = 0;
		}
		if (new_options & WDIOS_ENABLECARD) {
			iTCO_wdt_keepalive();
			iTCO_wdt_start();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		iTCO_wdt_keepalive();
		return 0;

	case WDIOC_SETTIMEOUT:
	{
		if (get_user(new_heartbeat, p))
			return -EFAULT;
		if (iTCO_wdt_set_heartbeat(new_heartbeat))
			return -EINVAL;
		iTCO_wdt_keepalive();
		/* Fall */
	}
	case WDIOC_GETTIMEOUT:
		return put_user(heartbeat, p);
	case WDIOC_GETTIMELEFT:
	{
		int time_left;
		if (iTCO_wdt_get_timeleft(&time_left))
			return -EINVAL;
		return put_user(time_left, p);
	}
	default:
		return -ENOTTY;
	}
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations iTCO_wdt_fops = {
	.owner =		THIS_MODULE,
	.llseek =		no_llseek,
	.write =		iTCO_wdt_write,
	.unlocked_ioctl =	iTCO_wdt_ioctl,
	.open =			iTCO_wdt_open,
	.release =		iTCO_wdt_release,
};

static struct miscdevice iTCO_wdt_miscdev = {
	.minor =	WATCHDOG_MINOR,
	.name =		"watchdog",
	.fops =		&iTCO_wdt_fops,
};

/*
 *	Reboot notifier
 */

static int TCO_reboot_notifier(struct notifier_block *this,
			   unsigned long code,
			   void *another_unused)
{
	if (code == SYS_HALT || code == SYS_POWER_OFF) {
		iTCO_wdt_set_reset_type(TCO_POLICY_HALT);
	}

	iTCO_wdt_last_kick(5);

#ifdef CONFIG_DEBUG_FS
	if (iTCO_wdt_private.panic_reboot_notifier) {
		BUG();
	}
#endif

	return NOTIFY_DONE;
}

static irqreturn_t tco_irq_handler(int irq, void *arg)
{
	pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT HANDLER!\n", __func__);

	/* reduce the timeout to the minimum, but sufficient for tracing */
	bypass_keepalive = false;
	iTCO_wdt_last_kick(15);

	trigger_all_cpu_backtrace();

	/* Let the watchdog reset the board */
	panic_timeout = 0;
	panic("Kernel Watchdog");

	/* This code should not be reached */

	return IRQ_HANDLED;
}


static ssize_t shutdown_ongoing_store(struct device *dev,
			struct device_attribute *attr, const char *buf, size_t size)
{
	iTCO_wdt_set_reset_type(TCO_POLICY_HALT);
	return size;
}
コード例 #14
0
static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	int new_options, retval = -EINVAL;
	int new_heartbeat;
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	static const struct watchdog_info ident = {
		.options =		WDIOF_SETTIMEOUT |
					WDIOF_KEEPALIVEPING |
					WDIOF_MAGICCLOSE,
		.firmware_version =	0,
		.identity =		DRV_NAME,
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);

	case WDIOC_SETOPTIONS:
	{
		if (get_user(new_options, p))
			return -EFAULT;

		if (new_options & WDIOS_DISABLECARD) {
			iTCO_wdt_stop();
			retval = 0;
		}
		if (new_options & WDIOS_ENABLECARD) {
			iTCO_wdt_keepalive();
			iTCO_wdt_start();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		iTCO_wdt_keepalive();
		return 0;

	case WDIOC_SETTIMEOUT:
	{
		if (get_user(new_heartbeat, p))
			return -EFAULT;
		if (iTCO_wdt_set_heartbeat(new_heartbeat))
			return -EINVAL;
		iTCO_wdt_keepalive();
		/* Fall */
	}
	case WDIOC_GETTIMEOUT:
		return put_user(heartbeat, p);
	case WDIOC_GETTIMELEFT:
	{
		int time_left;
		if (iTCO_wdt_get_timeleft(&time_left))
			return -EINVAL;
		return put_user(time_left, p);
	}
	default:
		return -ENOTTY;
	}
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations iTCO_wdt_fops = {
	.owner =		THIS_MODULE,
	.llseek =		no_llseek,
	.write =		iTCO_wdt_write,
	.unlocked_ioctl =	iTCO_wdt_ioctl,
	.open =			iTCO_wdt_open,
	.release =		iTCO_wdt_release,
};

static struct miscdevice iTCO_wdt_miscdev = {
	.minor =	WATCHDOG_MINOR,
	.name =		"watchdog",
	.fops =		&iTCO_wdt_fops,
};

/*
 *	Reboot notifier
 */

static int TCO_reboot_notifier(struct notifier_block *this,
			   unsigned long code,
			   void *another_unused)
{
	if (code == SYS_HALT || code == SYS_POWER_OFF) {
		iTCO_wdt_set_reset_type(TCO_POLICY_HALT);
	}

	iTCO_wdt_last_kick(5);

#ifdef CONFIG_DEBUG_FS
	if (iTCO_wdt_private.panic_reboot_notifier) {
		BUG();
	}
#endif

	return NOTIFY_DONE;
}

static irqreturn_t tco_irq_handler(int irq, void *arg)
{
	unsigned long val32;

	pr_warn("[SHTDWN] %s, WATCHDOG TIMEOUT HANDLER!\n", __func__);

	/* reduce the timeout to the minimum, but sufficient for tracing */
	iTCO_wdt_set_heartbeat(15);
	iTCO_wdt_keepalive();

	trigger_all_cpu_backtrace();
	panic("Kernel Watchdog");

	/* This code should not be reached */

	return IRQ_HANDLED;
}

/*
 *	Init & exit routines
 */

static int iTCO_wdt_init(struct pci_dev *pdev,
		const struct pci_device_id *ent, struct platform_device *dev)
{
	int ret;
	u32 base_address;
	unsigned long val32;

	/*
	 *      Find the ACPI/PM base I/O address which is the base
	 *      for the TCO registers (TCOBASE=ACPIBASE + 0x60)
	 *      ACPIBASE is bits [15:7] from 0x40-0x43
	 */
	pci_read_config_dword(pdev, 0x40, &base_address);
	base_address &= 0x0000ff80;
	if (base_address == 0x00000000) {
		/* Something's wrong here, ACPIBASE has to be set */
		pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
		return -ENODEV;
	}
	iTCO_wdt_private.iTCO_version =
			iTCO_chipset_info[ent->driver_data].iTCO_version;
	iTCO_wdt_private.ACPIBASE = base_address;
	iTCO_wdt_private.pdev = pdev;

	pci_read_config_dword(pdev, 0x44, &pmc_base_address);
	pmc_base_address &= 0xFFFFFE00;

	/*
	 * Disable watchdog on command-line demand
	 */
	if (strstr(saved_command_line, "disable_kernel_watchdog=1")) {
		pr_warn("disable_kernel_watchdog=1 watchdog will not be started\n");
		iTCO_wdt_private.enable = false;
		/* Set the NO_REBOOT bit to prevent later reboots */
		iTCO_wdt_set_NO_REBOOT_bit();
		/* Ensure Wdt is well stopped in case started by IAFW */
		iTCO_wdt_stop();
	} else {
		iTCO_wdt_private.enable = true;
		/* Check chipset's NO_REBOOT bit */
		if (iTCO_wdt_unset_NO_REBOOT_bit()) {
			pr_err("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
			ret = -ENODEV;	/* Cannot reset NO_REBOOT bit */
			goto out;
		}
	}

	/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
	if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
		pr_err("I/O address 0x%04lx already in use, device disabled\n",
		       SMI_EN);
		ret = -EIO;
		goto out;
	}
	if (!request_region(SMI_STS, 4,	"iTCO_wdt")) {
		pr_err("I/O address 0x%04lx already in use, device disabled\n",
				SMI_STS);
		ret = -EIO;
		goto unreg_smi_sts;
	}

	if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
		/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
		val32 = inl(SMI_EN);
		val32 &= ~TCO_EN_BIT;	/* Turn off TCO watchdog timer */
		outl(val32, SMI_EN);
	}

	/* The TCO I/O registers reside in a 32-byte range pointed to
	   by the TCOBASE value */
	if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
		pr_err("I/O address 0x%04lx already in use, device disabled\n",
		       TCOBASE);
		ret = -EIO;
		goto unreg_smi_en;
	}

	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
		iTCO_chipset_info[ent->driver_data].name,
		iTCO_chipset_info[ent->driver_data].iTCO_version,
		TCOBASE);

	/* Check that the heartbeat value is within it's range;
	   if not reset to the default */
	if (iTCO_wdt_set_heartbeat(heartbeat)) {
		iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
		pr_info("timeout value out of range, using %d\n", heartbeat);
	}

	ret = misc_register(&iTCO_wdt_miscdev);
	if (ret != 0) {
		pr_err("cannot register miscdev on minor=%d (err=%d)\n",
		       WATCHDOG_MINOR, ret);
		goto unreg_region;
	}

	pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
		heartbeat, nowayout);

	/* Reset OS policy */
	iTCO_wdt_set_reset_type(TCO_POLICY_NORM);

	ret = acpi_register_gsi(NULL, TCO_WARNING_IRQ,
				ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH);
	if (ret < 0) {
		pr_err("failed to configure TCO warning IRQ %d\n", TCO_WARNING_IRQ);
		goto misc_unreg;
	}
	ret = request_irq(TCO_WARNING_IRQ, tco_irq_handler, 0, "tco_watchdog", NULL);
	if (ret < 0) {
		pr_err("failed to request TCO warning IRQ %d\n", TCO_WARNING_IRQ);
		goto gsi_unreg;
	}

	/* Clear old TCO timeout status */
	val32 = TCO_TIMEOUT_BIT | SECOND_TO_STS_BIT;
	outl(val32, TCO1_STS);
	/* Clear the SMI status */
	outl(TCO_STS_BIT, SMI_STS);

	/* Enable SMI for TCO */
	val32 = inl(SMI_EN);
	val32 |= TCO_EN_BIT;
	outl(val32, SMI_EN);
	/* then ensure that PMC is ready to handle next SMI */
	val32 |= EOS_BIT;
	outl(val32, SMI_EN);

	reboot_notifier.notifier_call = TCO_reboot_notifier;
	reboot_notifier.priority = 1;
	ret = register_reboot_notifier(&reboot_notifier);
	if (ret)
		/* We continue as reboot notifier is not critical for
			 * watchdog */
		pr_err("cannot register reboot notifier %d\n", ret);

	return 0;

gsi_unreg:
	acpi_unregister_gsi(TCO_WARNING_IRQ);
misc_unreg:
	misc_deregister(&iTCO_wdt_miscdev);
unreg_region:
	release_region(TCOBASE, 0x20);
unreg_smi_sts:
	release_region(SMI_STS, 4);
unreg_smi_en:
	release_region(SMI_EN, 4);
out:
	iTCO_wdt_private.ACPIBASE = 0;
	return ret;
}
コード例 #15
0
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
	struct pt_regs *regs = get_irq_regs();
	int duration;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* test for hardlockups on the next cpu */
	watchdog_check_hardlockup_other_cpu();

	/* kick the softlockup detector */
	wake_up_process(__this_cpu_read(softlockup_watchdog));

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));

	if (touch_ts == 0) {
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
			__this_cpu_write(softlockup_touch_sync, false);
			sched_clock_tick();
		}

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
	duration = is_softlockup(touch_ts);
	if (unlikely(duration)) {
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

		/* only warn once */
		if (__this_cpu_read(soft_watchdog_warn) == true)
			return HRTIMER_RESTART;

		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
			smp_processor_id(), duration,
			current->comm, task_pid_nr(current));
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_panic) {
			trigger_all_cpu_backtrace();
			panic("softlockup: hung tasks");
		}
		__this_cpu_write(soft_watchdog_warn, true);
	} else
		__this_cpu_write(soft_watchdog_warn, false);

	return HRTIMER_RESTART;
}