Exemple #1
0
static void ipmi_wdog_pretimeout_handler(void *handler_data)
{
	if (preaction_val != WDOG_PRETIMEOUT_NONE) {
		if (preop_val == WDOG_PREOP_PANIC) {
			if (atomic_inc_and_test(&preop_panic_excl))
				panic("Watchdog pre-timeout");
		} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
			spin_lock(&ipmi_read_lock);
			data_to_read = 1;
			wake_up_interruptible(&read_q);
			kill_fasync(&fasync_q, SIGIO, POLL_IN);

			spin_unlock(&ipmi_read_lock);
		}
	}

	/* On some machines, the heartbeat will give
	   an error and not work unless we re-enable
	   the timer.   So do so. */
	pretimeout_since_last_heartbeat = 1;
}
Exemple #2
0
static int
ipmi_nmi(unsigned int val, struct pt_regs *regs)
{
	/*
	 * If we get here, it's an NMI that's not a memory or I/O
	 * error.  We can't truly tell if it's from IPMI or not
	 * without sending a message, and sending a message is almost
	 * impossible because of locking.
	 */

	if (testing_nmi) {
		testing_nmi = 2;
		return NMI_HANDLED;
	}

	/* If we are not expecting a timeout, ignore it. */
	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
		return NMI_DONE;

	if (preaction_val != WDOG_PRETIMEOUT_NMI)
		return NMI_DONE;

	/*
	 * If no one else handled the NMI, we assume it was the IPMI
	 * watchdog.
	 */
	if (preop_val == WDOG_PREOP_PANIC) {
		/* On some machines, the heartbeat will give
		   an error and not work unless we re-enable
		   the timer.   So do so. */
		pretimeout_since_last_heartbeat = 1;
		if (atomic_inc_and_test(&preop_panic_excl))
			panic(PFX "pre-timeout");
	}

	return NMI_HANDLED;
}
Exemple #3
0
void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);
	u32 sync_default = AR_INTR_SYNC_DEFAULT;
	u32 async_mask;

	if (!(ah->imask & ATH9K_INT_GLOBAL))
		return;

	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
		ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
			atomic_read(&ah->intr_ref_cnt));
		return;
	}

	if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
	    AR_SREV_9561(ah))
		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;

	async_mask = AR_INTR_MAC_IRQ;

	if (ah->imask & ATH9K_INT_MCI)
		async_mask |= AR_INTR_ASYNC_MASK_MCI;

	ath_dbg(common, INTERRUPT, "enable IER\n");
	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
		REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);

		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
	}
	ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
Exemple #4
0
static int
ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
{
	struct die_args *args = data;

	if (val != DIE_NMI)
		return NOTIFY_OK;

	
	if (args->err & 0xc0)
		return NOTIFY_OK;

	

	if (testing_nmi) {
		testing_nmi = 2;
		return NOTIFY_STOP;
	}

	
	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
		return NOTIFY_OK;

	if (preaction_val != WDOG_PRETIMEOUT_NMI)
		return NOTIFY_OK;

	
	if (preop_val == WDOG_PREOP_PANIC) {
		
		pretimeout_since_last_heartbeat = 1;
		if (atomic_inc_and_test(&preop_panic_excl))
			panic(PFX "pre-timeout");
	}

	return NOTIFY_STOP;
}
Exemple #5
0
/* DIV4 */
static void div4_kick(struct clk *clk)
{
	if (!WARN(!atomic_inc_and_test(&frqcr_lock), "FRQCR* lock broken!\n"))
		frqcr_kick_do(clk);
	atomic_dec(&frqcr_lock);
}
Exemple #6
0
static inline int gnt_map(struct page *page)
{
    /* return true is transition from -1 to 0 */
    return atomic_inc_and_test(&page->_mapcount);
}