Esempio n. 1
0
/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @regs:	pointer to a register structure
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
			     struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;

	trace_irq_entry(irq, regs);

	handle_dynamic_tick(action);

	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();

	do {
		ret = action->handler(irq, action->dev_id, regs);
		if (ret == IRQ_HANDLED)
			status |= action->flags;
		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	trace_irq_exit(irq, retval);

	return retval;
}
/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;
	struct pt_regs *regs = get_irq_regs();

	trace_mark(kernel_irq_entry, "irq_id %u kernel_mode %u", irq,
		(regs)?(!user_mode(regs)):(1));

	handle_dynamic_tick(action);

	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();

	do {
		ret = action->handler(irq, action->dev_id);
		if (ret == IRQ_HANDLED)
			status |= action->flags;
		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	trace_mark(kernel_irq_exit, MARK_NOARGS);

	return retval;
}
Esempio n. 3
0
/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;

	handle_dynamic_tick(action);

	//如果第一个处理程序函数中没有设置IRQF_DISABLED,
	//则用local_irq_enable_in_hardirq启用(当前CPU的)中断。
	//换句话说,该处理程序可以被其他IRQ中断。但根据电流类型,
	//也可能一直屏蔽刚处理的IRQ
	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();
	//在共享IRQ时
	do 
	{
		ret = action->handler(irq, action->dev_id);
		if (ret == IRQ_HANDLED)
			status |= action->flags;
		retval |= ret;
		action = action->next;
	} while (action);

	//如果该IRQ设置了IRQF_SAMPLE_RANDOM,
	//则调用add_interrupt_randomness,将事件的时间作为熵池的一个源
	//(如果中断是随机发生的,那么他们是理想的源)
	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	//禁用中断。
	//因为中断的启用和禁用是不嵌套的,与中断在处理开始时是否启用是不相关的。
	//handle_IRQ_event在调用时禁用中断,在退出时然然预期禁用中断
	local_irq_disable();

	return retval;
}
Esempio n. 4
0
/*
 * This routine busy-waits for the drive status to be not "busy".
 * It then checks the status for all of the "good" bits and none
 * of the "bad" bits, and if all is okay it returns 0.  All other
 * cases return error -- caller may then invoke ide_error().
 *
 * This routine should get fixed to not hog the cpu during extra long waits..
 * That could be done by busy-waiting for the first jiffy or two, and then
 * setting a timer to wake up at half second intervals thereafter,
 * until timeout is achieved, before timing out.
 */
int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
		    unsigned long timeout, u8 *rstat)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	unsigned long flags;
	bool irqs_threaded = force_irqthreads;
	int i;
	u8 stat;

	udelay(1);	/* spec allows drive 400ns to assert "BUSY" */
	stat = tp_ops->read_status(hwif);

	if (stat & ATA_BUSY) {
		if (!irqs_threaded) {
			local_save_flags(flags);
			local_irq_enable_in_hardirq();
		}
		timeout += jiffies;
		while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
			if (time_after(jiffies, timeout)) {
				/*
				 * One last read after the timeout in case
				 * heavy interrupt load made us not make any
				 * progress during the timeout..
				 */
				stat = tp_ops->read_status(hwif);
				if ((stat & ATA_BUSY) == 0)
					break;

				if (!irqs_threaded)
					local_irq_restore(flags);
				*rstat = stat;
				return -EBUSY;
			}
		}
		if (!irqs_threaded)
			local_irq_restore(flags);
	}
	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		stat = tp_ops->read_status(hwif);

		if (OK_STAT(stat, good, bad)) {
			*rstat = stat;
			return 0;
		}
	}
	*rstat = stat;
	return -EFAULT;
}
Esempio n. 5
0
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
 * the CPU is idle. To make sure the NMI watchdog really ticks on all
 * CPUs during the test make them busy.
 */
static __init void nmi_cpu_busy(void *data)
{
	local_irq_enable_in_hardirq();
	/* Intentionally don't use cpu_relax here. This is
	   to make sure that the performance counter really ticks,
	   even if there is a simulator or similar that catches the
	   pause instruction. On a real HT machine this is fine because
	   all other CPUs are busy with "useless" delay loops and don't
	   care if they get somewhat less cycles. */
	while (endflag == 0)
		mb();
}
/*
 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
 */
static ide_startstop_t recal_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	u8 stat;

	local_irq_enable_in_hardirq();
	stat = hwif->tp_ops->read_status(hwif);

	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
		return ide_error(drive, "recal_intr", stat);
	return ide_stopped;
}
/*
 * Handler for commands without a data phase
 */
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	ide_task_t *task = &hwif->task;
	struct ide_taskfile *tf = &task->tf;
	int custom = (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
	int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
	u8 stat;

	local_irq_enable_in_hardirq();

	while (1) {
		stat = hwif->tp_ops->read_status(hwif);
		if ((stat & ATA_BUSY) == 0 || retries-- == 0)
			break;
		udelay(10);
	};

	if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
		if (custom && tf->command == ATA_CMD_SET_MULTI) {
			drive->mult_req = drive->mult_count = 0;
			drive->special.b.recalibrate = 1;
			(void)ide_dump_status(drive, __func__, stat);
			return ide_stopped;
		} else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
			if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
				ide_set_handler(drive, &task_no_data_intr,
						WAIT_WORSTCASE, NULL);
				return ide_started;
			}
		}
		return ide_error(drive, "task_no_data_intr", stat);
		/* calls ide_end_drive_cmd */
	}

	if (!custom)
		ide_end_drive_cmd(drive, stat, ide_read_error(drive));
	else if (tf->command == ATA_CMD_IDLEIMMEDIATE) {
		hwif->tp_ops->tf_read(drive, task);
		if (tf->lbal != 0xc4) {
			printk(KERN_ERR "%s: head unload failed!\n",
			       drive->name);
			ide_tf_dump(drive->name, tf);
		} else
			drive->dev_flags |= IDE_DFLAG_PARKED;
		ide_end_drive_cmd(drive, stat, ide_read_error(drive));
	} else if (tf->command == ATA_CMD_SET_MULTI)
		drive->mult_count = drive->mult_req;

	return ide_stopped;
}
Esempio n. 8
0
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	struct ide_cmd *cmd = &hwif->cmd;
	struct ide_taskfile *tf = &cmd->tf;
	int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
	int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
	u8 stat;

	local_irq_enable_in_hardirq();

	while (1) {
		stat = hwif->tp_ops->read_status(hwif);
		if ((stat & ATA_BUSY) == 0 || retries-- == 0)
			break;
		udelay(10);
	};

	if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
		if (custom && tf->command == ATA_CMD_SET_MULTI) {
			drive->mult_req = drive->mult_count = 0;
			drive->special_flags |= IDE_SFLAG_RECALIBRATE;
			(void)ide_dump_status(drive, __func__, stat);
			return ide_stopped;
		} else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
			if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
				ide_set_handler(drive, &task_no_data_intr,
						WAIT_WORSTCASE);
				return ide_started;
			}
		}
		return ide_error(drive, "task_no_data_intr", stat);
	}

	if (custom && tf->command == ATA_CMD_SET_MULTI)
		drive->mult_count = drive->mult_req;

	if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
	    tf->command == ATA_CMD_CHK_POWER) {
		struct request *rq = hwif->rq;

		if (blk_pm_request(rq))
			ide_complete_pm_rq(drive, rq);
		else
			ide_finish_cmd(drive, cmd, stat);
	}

	return ide_stopped;
}
/*
 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
 */
static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	u8 stat;

	local_irq_enable_in_hardirq();
	stat = hwif->tp_ops->read_status(hwif);

	if (OK_STAT(stat, READY_STAT, BAD_STAT))
		drive->mult_count = drive->mult_req;
	else {
		drive->mult_req = drive->mult_count = 0;
		drive->special.b.recalibrate = 1;
		(void) ide_dump_status(drive, "set_multmode", stat);
	}
	return ide_stopped;
}
Esempio n. 10
0
/*
 * Handler for commands without a data phase
 */
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
	ide_task_t *args	= HWGROUP(drive)->rq->special;
	u8 stat;

	local_irq_enable_in_hardirq();
	stat = ide_read_status(drive);

	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
		return ide_error(drive, "task_no_data_intr", stat);
		/* calls ide_end_drive_cmd */

	if (args)
		ide_end_drive_cmd(drive, stat, ide_read_error(drive));

	return ide_stopped;
}
/*
 * Handler for commands without a data phase
 */
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	ide_task_t *args = hwif->hwgroup->rq->special;
	u8 stat;

	local_irq_enable_in_hardirq();
	stat = hwif->tp_ops->read_status(hwif);

	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
		return ide_error(drive, "task_no_data_intr", stat);
		/* calls ide_end_drive_cmd */

	if (args)
		ide_end_drive_cmd(drive, stat, ide_read_error(drive));

	return ide_stopped;
}
Esempio n. 12
0
/* The performance counters used by NMI_LOCAL_APIC don't trigger when
 * the CPU is idle. To make sure the NMI watchdog really ticks on all
 * CPUs during the test make them busy.
 */
static __init void nmi_cpu_busy(void *data)
{
	volatile int *endflag = data;
	/*
	 * avoid a warning, on PREEMPT_RT this wont run in hardirq context:
	 */
#ifndef CONFIG_PREEMPT_RT
	local_irq_enable_in_hardirq();
#endif
	/* Intentionally don't use cpu_relax here. This is
	   to make sure that the performance counter really ticks,
	   even if there is a simulator or similar that catches the
	   pause instruction. On a real HT machine this is fine because
	   all other CPUs are busy with "useless" delay loops and don't
	   care if they get somewhat less cycles. */
	while (*endflag == 0)
		barrier();
}
int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
		    unsigned long timeout, u8 *rstat)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	unsigned long flags;
	int i;
	u8 stat;

	udelay(1);	
	stat = tp_ops->read_status(hwif);

	if (stat & ATA_BUSY) {
		local_save_flags(flags);
		local_irq_enable_in_hardirq();
		timeout += jiffies;
		while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
			if (time_after(jiffies, timeout)) {
				stat = tp_ops->read_status(hwif);
				if ((stat & ATA_BUSY) == 0)
					break;

				local_irq_restore(flags);
				*rstat = stat;
				return -EBUSY;
			}
		}
		local_irq_restore(flags);
	}
	for (i = 0; i < 10; i++) {
		udelay(1);
		stat = tp_ops->read_status(hwif);

		if (OK_STAT(stat, good, bad)) {
			*rstat = stat;
			return 0;
		}
	}
	*rstat = stat;
	return -EFAULT;
}
/*
 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
 */
static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	int retries = 5;
	u8 stat;

	local_irq_enable_in_hardirq();

	while (1) {
		stat = hwif->tp_ops->read_status(hwif);
		if ((stat & BUSY_STAT) == 0 || retries-- == 0)
			break;
		udelay(10);
	};

	if (OK_STAT(stat, READY_STAT, BAD_STAT))
		return ide_stopped;

	if (stat & (ERR_STAT|DRQ_STAT))
		return ide_error(drive, "set_geometry_intr", stat);

	ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
	return ide_started;
}
Esempio n. 15
0
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;

	struct timeval tv;

	do_gettimeofday(&tv);
	irq_history_int[irq_history_idx] = irq;
	irq_history_fp[irq_history_idx] = action->handler;
	irq_history_us[irq_history_idx] = tv.tv_usec;
	irq_history_idx = (irq_history_idx+1) % IRQ_HIST_NUM;

	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();

	do {
		trace_irq_handler_entry(irq, action);
		ret = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, ret);

		switch (ret) {
		case IRQ_WAKE_THREAD:
			/*
			 * Set result to handled so the spurious check
			 * does not trigger.
			 */
			ret = IRQ_HANDLED;

			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			/*
			 * Wake up the handler thread for this
			 * action. In case the thread crashed and was
			 * killed we just pretend that we handled the
			 * interrupt. The hardirq handler above has
			 * disabled the device interrupt, so no irq
			 * storm is lurking.
			 */
			if (likely(!test_bit(IRQTF_DIED,
					     &action->thread_flags))) {
				set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
				wake_up_process(action->thread);
			}

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			status |= action->flags;
			break;

		default:
			break;
		}

		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	return retval;
}
Esempio n. 16
0
irqreturn_t ide_intr (int irq, void *dev_id)
{
	ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
	ide_drive_t *uninitialized_var(drive);
	ide_handler_t *handler;
	unsigned long flags;
	ide_startstop_t startstop;
	irqreturn_t irq_ret = IRQ_NONE;
	int plug_device = 0;

	if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
		if (hwif != hwif->host->cur_port)
			goto out_early;
	}

	spin_lock_irqsave(&hwif->lock, flags);

	if (!ide_ack_intr(hwif))
		goto out;

	handler = hwif->handler;

	if (handler == NULL || hwif->polling) {
		/*
		 * Not expecting an interrupt from this drive.
		 * That means this could be:
		 *	(1) an interrupt from another PCI device
		 *	sharing the same PCI INT# as us.
		 * or	(2) a drive just entered sleep or standby mode,
		 *	and is interrupting to let us know.
		 * or	(3) a spurious interrupt of unknown origin.
		 *
		 * For PCI, we cannot tell the difference,
		 * so in that case we just ignore it and hope it goes away.
		 *
		 * FIXME: unexpected_intr should be hwif-> then we can
		 * remove all the ifdef PCI crap
		 */
#ifdef CONFIG_BLK_DEV_IDEPCI
		if (hwif->chipset != ide_pci)
#endif	/* CONFIG_BLK_DEV_IDEPCI */
		{
			/*
			 * Probably not a shared PCI interrupt,
			 * so we can safely try to do something about it:
			 */
			unexpected_intr(irq, hwif);
#ifdef CONFIG_BLK_DEV_IDEPCI
		} else {
			/*
			 * Whack the status register, just in case
			 * we have a leftover pending IRQ.
			 */
			(void)hwif->tp_ops->read_status(hwif);
#endif /* CONFIG_BLK_DEV_IDEPCI */
		}
		goto out;
	}

	drive = hwif->cur_dev;

	if (!drive_is_ready(drive))
		/*
		 * This happens regularly when we share a PCI IRQ with
		 * another device.  Unfortunately, it can also happen
		 * with some buggy drives that trigger the IRQ before
		 * their status register is up to date.  Hopefully we have
		 * enough advance overhead that the latter isn't a problem.
		 */
		goto out;

	hwif->handler = NULL;
	hwif->req_gen++;
	del_timer(&hwif->timer);
	spin_unlock(&hwif->lock);

	if (hwif->port_ops && hwif->port_ops->clear_irq)
		hwif->port_ops->clear_irq(drive);

	if (drive->dev_flags & IDE_DFLAG_UNMASK)
		local_irq_enable_in_hardirq();

	/* service this interrupt, may set handler for next interrupt */
	startstop = handler(drive);

	spin_lock_irq(&hwif->lock);
	/*
	 * Note that handler() may have set things up for another
	 * interrupt to occur soon, but it cannot happen until
	 * we exit from this routine, because it will be the
	 * same irq as is currently being serviced here, and Linux
	 * won't allow another of the same (on any CPU) until we return.
	 */
	if (startstop == ide_stopped) {
		BUG_ON(hwif->handler);
		ide_unlock_port(hwif);
		plug_device = 1;
	}
	irq_ret = IRQ_HANDLED;
out:
	spin_unlock_irqrestore(&hwif->lock, flags);
out_early:
	if (plug_device) {
		ide_unlock_host(hwif->host);
		ide_plug_device(drive);
	}

	return irq_ret;
}
Esempio n. 17
0
/**
 * handle_IRQ_event - irq action chain handler
 * @irq:	the interrupt number
 * @action:	the interrupt action chain for this irq
 *
 * Handles the action chain of an irq event
 */
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
	irqreturn_t ret, retval = IRQ_NONE;
	unsigned int status = 0;

	if (!(action->flags & IRQF_DISABLED))
		local_irq_enable_in_hardirq();

	do {
#ifdef CONFIG_TIVO_PLOG
		plog_intr_enter(irq, current_thread_info()->regs->cp0_epc);
#endif
		trace_irq_handler_entry(irq, action);
		ret = action->handler(irq, action->dev_id);
		trace_irq_handler_exit(irq, action, ret);
#ifdef CONFIG_TIVO_PLOG
		plog_intr_leave(irq);
#endif

		switch (ret) {
		case IRQ_WAKE_THREAD:
			/*
			 * Set result to handled so the spurious check
			 * does not trigger.
			 */
			ret = IRQ_HANDLED;

			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			/*
			 * Wake up the handler thread for this
			 * action. In case the thread crashed and was
			 * killed we just pretend that we handled the
			 * interrupt. The hardirq handler above has
			 * disabled the device interrupt, so no irq
			 * storm is lurking.
			 */
			if (likely(!test_bit(IRQTF_DIED,
					     &action->thread_flags))) {
				set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
				wake_up_process(action->thread);
			}

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			status |= action->flags;
			break;

		default:
			break;
		}

		retval |= ret;
		action = action->next;
	} while (action);

	if (status & IRQF_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	local_irq_disable();

	return retval;
}
Esempio n. 18
0
static __init void nmi_cpu_busy(void *data)
{
	local_irq_enable_in_hardirq();
	while (endflag == 0)
		mb();
}