Beispiel #1
0
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
void fastcall
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	irqreturn_t action_ret;
	const unsigned int cpu = smp_processor_id();

	spin_lock(&desc->lock);

	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out_unlock;
	kstat_cpu(cpu).irqs[irq]++;

	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
		if (desc->chip->mask)
			desc->chip->mask(irq);
		desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
		desc->status |= IRQ_PENDING;
		goto out_unlock;
	}

	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;
out_unlock:
	spin_unlock(&desc->lock);
}
Beispiel #2
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the associated event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
	raw_spin_lock(&desc->lock);

	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);

	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
		    !desc->action)) {
		desc->status |= (IRQ_PENDING | IRQ_MASKED);
		mask_ack_irq(desc, irq);
		goto out_unlock;
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	/* Start handling the irq */
	if (desc->chip->ack)
		desc->chip->ack(irq);

	/* Mark the IRQ currently in progress.*/
	desc->status |= IRQ_INPROGRESS;

	do {
		struct irqaction *action = desc->action;
		irqreturn_t action_ret;

		if (unlikely(!action)) {
			mask_irq(desc, irq);
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
		if (unlikely((desc->status &
			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
			      (IRQ_PENDING | IRQ_MASKED))) {
			unmask_irq(desc, irq);
		}

		desc->status &= ~IRQ_PENDING;
		raw_spin_unlock(&desc->lock);
		action_ret = handle_IRQ_event(irq, action);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret);
		raw_spin_lock(&desc->lock);

	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);

	desc->status &= ~IRQ_INPROGRESS;
out_unlock:
	raw_spin_unlock(&desc->lock);
}
Beispiel #3
0
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irqaction *action;
    irqreturn_t action_ret;

    spin_lock(&desc->lock);

    if (unlikely(desc->status & IRQ_INPROGRESS))
        goto out_unlock;
    desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    action = desc->action;
    if (unlikely(!action || (desc->status & IRQ_DISABLED)))
        goto out_unlock;

    desc->status |= IRQ_INPROGRESS;
    spin_unlock(&desc->lock);

    action_ret = handle_IRQ_event(irq, action);
    if (!noirqdebug)
        note_interrupt(irq, desc, action_ret);

    spin_lock(&desc->lock);
    desc->status &= ~IRQ_INPROGRESS;
out_unlock:
    spin_unlock(&desc->lock);
}
Beispiel #4
0
static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs)
{
    struct irqServer * iServer;
    struct irqDescriptor *desc = &irq_desc[irq];

    {
        unsigned int status;
        mask_and_ack_8259A(irq);
        status = desc->id_status & ~(IRQ_REPLAY | IRQ_WAITING);
        iServer = NULL;
        if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
            iServer = desc->id_server;
            status |= IRQ_INPROGRESS;
        }
        desc->id_status = status;
    }

    /* Exit early if we had no action or it was disabled */
    if (!iServer)
        return;

    handle_IRQ_event(irq, regs, iServer);

    {
        unsigned int status = desc->id_status & ~IRQ_INPROGRESS;
        desc->id_status = status;
        if (!(status & IRQ_DISABLED))
            enable_8259A_irq(irq);
    }
}
Beispiel #5
0
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	irqreturn_t action_ret;

	spin_lock(&desc->lock);
	/*
	调用中断线所属的中断控制的mask_ack函数.
	如果没有定义该函数,就调用mask,然后调用ack函数.
	总之,对于水平触发的来说,它就是屏蔽当前的中断线,应当当前中断..
	比如ARM来说,就是清除SRCPND、SRCINTPND寄存器的位.
	*/
	mask_ack_irq(desc, irq);
	desc = irq_remap_to_desc(irq, desc);
	/*
	在多处理器的系统上,可能中断已经被另外一个CPU开始处理..但是该CPU也
	进入到进入处理..此时另外处理的CPU会设置状态为正在处理当中..
	因此,当前CPU就直接退出。
	*/
	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out_unlock;
	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
	kstat_incr_irqs_this_cpu(irq, desc);

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
	action = desc->action;
	/*
	如果该中断线上没有中断处理函数,那就直接退出.
	或者说,该中断线当前状态是DISABLED,也就是要屏蔽掉该中断的..?
	但是由于某种原因还产生了中断?不过确实状态就是DISABLED,那也直接退出。
	*/
	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
		goto out_unlock;
	//好这里设置为正在处理当中...避免多处理的竞争...哦。这断代码的执行
	//是在上锁的状态下的..下面解锁.
	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	//处理完成之后..这里要修改当前中断线的状态了..一样是上锁.
	desc->status &= ~IRQ_INPROGRESS;
	//该中断线的状态是不屏蔽的,并且有unmask函数..那就调用unmask,它就是
	//取消屏蔽该中断线...因为水平电流处理函数在一开始有mask_ack_irq给屏蔽掉了中断线了
	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
		desc->chip->unmask(irq);
out_unlock:
	spin_unlock(&desc->lock);
}
Beispiel #6
0
/**
 *	handle_fasteoi_irq - irq handler for transparent controllers
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Only a single callback will be issued to the chip: an ->eoi()
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irqaction *action;
    irqreturn_t action_ret;

    raw_spin_lock(&desc->lock);

    if (unlikely(desc->status & IRQ_INPROGRESS))
        goto out;

    desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * then mask it and get out of here:
     */
    action = desc->action;
    if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
        desc->status |= IRQ_PENDING;
        mask_irq(desc);
        goto out;
    }

    if (desc->status & IRQ_ONESHOT)
        mask_irq(desc);

    desc->status |= IRQ_INPROGRESS;
    desc->status &= ~IRQ_PENDING;
    raw_spin_unlock(&desc->lock);

    action_ret = handle_IRQ_event(irq, action);
    if (!noirqdebug)
        note_interrupt(irq, desc, action_ret);

    raw_spin_lock(&desc->lock);
    desc->status &= ~IRQ_INPROGRESS;
#ifdef CONFIG_IPIPE
    if (!(desc->status & IRQ_MASKED))
        desc->irq_data.chip->irq_unmask(&desc->irq_data);
out:
#else
out:
    desc->irq_data.chip->irq_eoi(&desc->irq_data);
#endif

    raw_spin_unlock(&desc->lock);
}
Beispiel #7
0
/**
 *	handle_percpu_IRQ - Per CPU local irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
void fastcall
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
	irqreturn_t action_ret;

	kstat_this_cpu.irqs[irq]++;

	if (desc->chip->ack)
		desc->chip->ack(irq);

	action_ret = handle_IRQ_event(irq, desc->action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	if (desc->chip->eoi)
		desc->chip->eoi(irq);
}
Beispiel #8
0
/**
 *	handle_percpu_irq - Per CPU local irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
	irqreturn_t action_ret;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (desc->irq_data.chip->irq_ack)
		desc->irq_data.chip->irq_ack(&desc->irq_data);

	action_ret = handle_IRQ_event(irq, desc->action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	if (desc->irq_data.chip->irq_eoi)
		desc->irq_data.chip->irq_eoi(&desc->irq_data);
}
Beispiel #9
0
/**
 *	handle_percpu_IRQ - Per CPU local irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
    irqreturn_t action_ret;

    kstat_incr_irqs_this_cpu(irq, desc);

    if (desc->chip->ack)
        desc->chip->ack(irq);

    action_ret = handle_IRQ_event(irq, desc->action);
    if (!noirqdebug)
        note_interrupt(irq, desc, action_ret);

    if (desc->chip->eoi) {
        desc->chip->eoi(irq);
        desc = irq_remap_to_desc(irq, desc);
    }
}
Beispiel #10
0
/**
 *	handle_fasteoi_irq - irq handler for transparent controllers
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Only a single callback will be issued to the chip: an ->eoi()
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
void fastcall
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
	unsigned int cpu = smp_processor_id();
	struct irqaction *action;
	irqreturn_t action_ret;

	spin_lock(&desc->lock);

	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out;

	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
	kstat_cpu(cpu).irqs[irq]++;

	/*
	 * If its disabled or no action available
	 * then mask it and get out of here:
	 */
	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
		desc->status |= IRQ_PENDING;
		if (desc->chip->mask)
			desc->chip->mask(irq);
		goto out;
	}

	desc->status |= IRQ_INPROGRESS;
	desc->status &= ~IRQ_PENDING;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;
out:
	desc->chip->eoi(irq);

	spin_unlock(&desc->lock);
}
Beispiel #11
0
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	irqreturn_t action_ret;

	spin_lock(&desc->lock);
	mask_ack_irq(desc, irq);

	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out_unlock;
	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
	kstat_incr_irqs_this_cpu(irq, desc);

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
	{
		printk(KERN_ERR "IRQ %d enter bad status!\n", irq);
		goto out_unlock;
	}

	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;

	if (unlikely(desc->status & IRQ_ONESHOT))
		desc->status |= IRQ_MASKED;
	else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
		desc->chip->unmask(irq);
out_unlock:
	spin_unlock(&desc->lock);
}
Beispiel #12
0
static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
{
	unsigned int cpu = smp_processor_id();
	struct irqaction *action;
	irqreturn_t action_ret;

	spin_lock(&desc->lock);

	BUG_ON(desc->status & IRQ_INPROGRESS);

	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
	kstat_cpu(cpu).irqs[irq]++;

	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
		goto out_mask;

	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);

	/* XXX: There is no direct way to access noirqdebug, so check
	 * unconditionally for spurious irqs...
	 * Maybe this function should go to kernel/irq/chip.c? */
	note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;

	if (desc->status & IRQ_DISABLED)
out_mask:
		desc->chip->mask(irq);

	/* ack unconditionally to unmask lower prio irqs */
	desc->chip->ack(irq);

	spin_unlock(&desc->lock);
}
Beispiel #13
0
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irqaction *action;
    irqreturn_t action_ret;

    raw_spin_lock(&desc->lock);
#ifndef CONFIG_IPIPE
    mask_ack_irq(desc);
#endif

    if (unlikely(desc->status & IRQ_INPROGRESS))
        goto out_unlock;
    desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * keep it masked and get out of here
     */
    action = desc->action;
    if (unlikely(!action || (desc->status & IRQ_DISABLED)))
        goto out_unlock;

    desc->status |= IRQ_INPROGRESS;
    raw_spin_unlock(&desc->lock);

    action_ret = handle_IRQ_event(irq, action);
    if (!noirqdebug)
        note_interrupt(irq, desc, action_ret);

    raw_spin_lock(&desc->lock);
    desc->status &= ~IRQ_INPROGRESS;

    if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
        unmask_irq(desc);
out_unlock:
    raw_spin_unlock(&desc->lock);
}
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
	unsigned int cpu = smp_processor_id();
	struct irqaction *action;
	irqreturn_t action_ret;

	spin_lock(&desc->lock);
	mask_ack_irq(desc, irq);

	if (unlikely(desc->status & IRQ_INPROGRESS))
		goto out_unlock;
	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
	kstat_cpu(cpu).irqs[irq]++;

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED)))
		goto out_unlock;

	desc->status |= IRQ_INPROGRESS;
	spin_unlock(&desc->lock);

	action_ret = handle_IRQ_event(irq, action);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	spin_lock(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;
	if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
		desc->chip->unmask(irq);
out_unlock:
	spin_unlock(&desc->lock);
}
/*
 * PIIX4-8259 master/virtual functions to handle interrupt requests
 * from legacy devices: floppy, parallel, serial, rtc.
 *
 * None of these get Cobalt APIC entries, neither do they have IDT
 * entries. These interrupts are purely virtual and distributed from
 * the 'master' interrupt source: CO_IRQ_8259.
 *
 * When the 8259 interrupts its handler figures out which of these
 * devices is interrupting and dispatches to its handler.
 *
 * CAREFUL: devices see the 'virtual' interrupt only. Thus disable/
 * enable_irq gets the right irq. This 'master' irq is never directly
 * manipulated by any driver.
 */
static irqreturn_t piix4_master_intr(int irq, void *dev_id)
{
	int realirq;
	irq_desc_t *desc;
	unsigned long flags;

	spin_lock_irqsave(&i8259A_lock, flags);

	/* Find out what's interrupting in the PIIX4 master 8259 */
	outb(0x0c, 0x20);		/* OCW3 Poll command */
	realirq = inb(0x20);

	/*
	 * Bit 7 == 0 means invalid/spurious
	 */
	if (unlikely(!(realirq & 0x80)))
		goto out_unlock;

	realirq &= 7;

	if (unlikely(realirq == 2)) {
		outb(0x0c, 0xa0);
		realirq = inb(0xa0);

		if (unlikely(!(realirq & 0x80)))
			goto out_unlock;

		realirq = (realirq & 7) + 8;
	}

	/* mask and ack interrupt */
	cached_irq_mask |= 1 << realirq;
	if (unlikely(realirq > 7)) {
		inb(0xa1);
		outb(cached_slave_mask, 0xa1);
		outb(0x60 + (realirq & 7), 0xa0);
		outb(0x60 + 2, 0x20);
	} else {
		inb(0x21);
		outb(cached_master_mask, 0x21);
		outb(0x60 + realirq, 0x20);
	}

	spin_unlock_irqrestore(&i8259A_lock, flags);

	desc = irq_desc + realirq;

	/*
	 * handle this 'virtual interrupt' as a Cobalt one now.
	 */
	kstat_cpu(smp_processor_id()).irqs[realirq]++;

	if (likely(desc->action != NULL))
		handle_IRQ_event(realirq, desc->action);

	if (!(desc->status & IRQ_DISABLED))
		enable_8259A_irq(realirq);

	return IRQ_HANDLED;

out_unlock:
	spin_unlock_irqrestore(&i8259A_lock, flags);
	return IRQ_NONE;
}
Beispiel #16
0
/**
 * __do_IRQ - original all in one highlevel IRQ handler
 * @irq:	the interrupt number
 *
 * __do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 *
 * This is the original x86 implementation which is used for every
 * interrupt type.
 */
unsigned int __do_IRQ(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	unsigned int status;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (CHECK_IRQ_PER_CPU(desc->status)) {
		irqreturn_t action_ret;

		/*
		 * No locking required for CPU-local interrupts:
		 */
		if (desc->chip->ack)
			desc->chip->ack(irq);
		if (likely(!(desc->status & IRQ_DISABLED))) {
			action_ret = handle_IRQ_event(irq, desc->action);
			if (!noirqdebug)
				note_interrupt(irq, desc, action_ret);
		}
		desc->chip->end(irq);
		return 1;
	}

	spin_lock(&desc->lock);
	if (desc->chip->ack)
		desc->chip->ack(irq);
	/*
	 * REPLAY is when Linux resends an IRQ that was dropped earlier
	 * WAITING is used by probe to mark irqs that are being tested
	 */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	 * Since we set PENDING, if another processor is handling
	 * a different instance of this same irq, the other processor
	 * will take care of it.
	 */
	if (unlikely(!action))
		goto out;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		irqreturn_t action_ret;

		spin_unlock(&desc->lock);

		action_ret = handle_IRQ_event(irq, action);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret);

		spin_lock(&desc->lock);
		if (likely(!(desc->status & IRQ_PENDING)))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;

out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->chip->end(irq);
	spin_unlock(&desc->lock);

	return 1;
}
Beispiel #17
0
/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
 *	is handled by the assosiacted event handler. If this happens it
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
	spin_lock(&desc->lock);
	/*
	需要同水平电流处理函数对比,这里并没有调用mask_ack函数,来屏蔽中断线.
	*/
	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);

	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	/*
	如果边缘电平的触发,然后引起了一个中断,处理函数会对该中断线设置为正在处理当中.
	也就是IRQ_INPROGRESS标记..此时判断判断IRQ_INPROGRESS如果置位,代表是又一个中断发生了
	因为是边缘触发的..那就设置status为PENDING挂机,等待处理.
	*/
	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
		    !desc->action)) {
		desc->status |= (IRQ_PENDING | IRQ_MASKED);
		//已经PENDING了一次处理,这时候特殊处理,屏蔽掉该中断..然后应答该中断线.
		//上面status设置为IRQ_MASKED,也可以证实这个情况.
		mask_ack_irq(desc, irq);
		desc = irq_remap_to_desc(irq, desc);
		goto out_unlock;
	}
	kstat_incr_irqs_this_cpu(irq, desc);
	/*
	当然..更一般的情况是下面这种...它会调用应答ack.!!注意注意...和水平触发的区别来了.
	水平触发是mask_ack!!!
	*/
	/* Start handling the irq */
	if (desc->chip->ack)
		desc->chip->ack(irq);
	desc = irq_remap_to_desc(irq, desc);

	/* Mark the IRQ currently in progress.*/
	desc->status |= IRQ_INPROGRESS;

	do {
		struct irqaction *action = desc->action;
		irqreturn_t action_ret;
		//如果该中断线没有中断处理函数..那屏蔽掉该中断线..然后退出.
		if (unlikely(!action)) {
			desc->chip->mask(irq);
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
		if (unlikely((desc->status &
			       (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
			      (IRQ_PENDING | IRQ_MASKED))) {
	//前面有看到..到中断处理过程中再次引起中断,就会挂机该中断,然后屏蔽中断线.
	//此时这里,就是重新设置为不屏蔽的..
			desc->chip->unmask(irq);
			desc->status &= ~IRQ_MASKED;
		}

		desc->status &= ~IRQ_PENDING;
		spin_unlock(&desc->lock);
		//释放锁...进入到处理中断函数中去.
		action_ret = handle_IRQ_event(irq, action);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret);
		//中断处理函数处理完成了...然后还要继续判断在处理过程中是否有新的中断产生.
		spin_lock(&desc->lock);
			//因为对于边缘触发的中断来说,它的处理过程,可能有引起了中断了的发生了.
			//此时会设置status带IRQ_PENDING标记.那就需要在继续执行一遍处理函数.
	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);

	desc->status &= ~IRQ_INPROGRESS;
out_unlock:
	spin_unlock(&desc->lock);
}
Beispiel #18
0
/*
 * Recovery handler for misrouted interrupts.
 */
static int try_one_irq(int irq, struct irq_desc *desc)
{
	struct irqaction *action;
	int ok = 0, work = 0;

	spin_lock(&desc->lock);
	/* Already running on another processor */
	if (desc->status & IRQ_INPROGRESS) {
		/*
		 * Already running: If it is shared get the other
		 * CPU to go looking for our mystery interrupt too
		 */
		if (desc->action && (desc->action->flags & IRQF_SHARED))
			desc->status |= IRQ_PENDING;
		spin_unlock(&desc->lock);
		return ok;
	}
	/* Honour the normal IRQ locking */
	desc->status |= IRQ_INPROGRESS;
	action = desc->action;
	spin_unlock(&desc->lock);

	while (action) {
		/* Only shared IRQ handlers are safe to call */
		if (action->flags & IRQF_SHARED) {
			if (action->handler(irq, action->dev_id) ==
				IRQ_HANDLED)
				ok = 1;
		}
		action = action->next;
	}
	local_irq_disable();
	/* Now clean up the flags */
	spin_lock(&desc->lock);
	action = desc->action;

	/*
	 * While we were looking for a fixup someone queued a real
	 * IRQ clashing with our walk:
	 */
	while ((desc->status & IRQ_PENDING) && action) {
		/*
		 * Perform real IRQ processing for the IRQ we deferred
		 */
		work = 1;
		spin_unlock(&desc->lock);
		handle_IRQ_event(irq, action);
		spin_lock(&desc->lock);
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;
	/*
	 * If we did actual work for the real IRQ line we must let the
	 * IRQ controller clean up too
	 */
	if (work && desc->chip && desc->chip->end)
		desc->chip->end(irq);
	spin_unlock(&desc->lock);

	return ok;
}
Beispiel #19
0
/**
 * __do_IRQ - original all in one highlevel IRQ handler
 * @irq:	the interrupt number
 * @regs:	pointer to a register structure
 *
 * __do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 *
 * This is the original x86 implementation which is used for every
 * interrupt type.
 */
fastcall notrace unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct irq_desc *desc = irq_desc + irq;
	struct irqaction *action;
	unsigned int status;

	kstat_this_cpu.irqs[irq]++;
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		irqreturn_t action_ret;

		/*
		 * No locking required for CPU-local interrupts:
		 */
		if (desc->chip->ack)
			desc->chip->ack(irq);
		action_ret = handle_IRQ_event(irq, regs, desc->action);
		desc->chip->end(irq);
		return 1;
	}
	/*
	 * If the task is currently running in user mode, don't
	 * detect soft lockups.  If CONFIG_DETECT_SOFTLOCKUP is not
	 * configured, this should be optimized out.
	 */
	if (user_mode(regs))
		touch_softlockup_watchdog();

	spin_lock(&desc->lock);
	if (desc->chip->ack)
		desc->chip->ack(irq);
	/*
	 * REPLAY is when Linux resends an IRQ that was dropped earlier
	 * WAITING is used by probe to mark irqs that are being tested
	 */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	 * Since we set PENDING, if another processor is handling
	 * a different instance of this same irq, the other processor
	 * will take care of it.
	 */
	if (unlikely(!action))
		goto out;

	/*
	 * hardirq redirection to the irqd process context:
	 */
	if (redirect_hardirq(desc))
		goto out_no_end;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		irqreturn_t action_ret;

		spin_unlock(&desc->lock);

		action_ret = handle_IRQ_event(irq, regs, action);

		spin_lock(&desc->lock);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret, regs);
		if (likely(!(desc->status & IRQ_PENDING)))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;

out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->chip->end(irq);
out_no_end:
	spin_unlock(&desc->lock);

	return 1;
}
Beispiel #20
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
{
	/*
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	int cpu = smp_processor_id();
	irq_desc_t *desc = irq_desc + irq;
	struct irqaction * action;
	unsigned int status;

	kstat.irqs[cpu][irq]++;
	spin_lock(&desc->lock);
	desc->handler->ack(irq);
	/*
	   REPLAY is when Linux resends an IRQ that was dropped earlier
	   WAITING is used by probe to mark irqs that are being tested
	   */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	   Since we set PENDING, if another processor is handling
	   a different instance of this same irq, the other processor
	   will take care of it.
	 */
	if (!action) {
		goto out;
	}

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		spin_unlock(&desc->lock);
		handle_IRQ_event(irq, regs, action);
		spin_lock(&desc->lock);

		if (!(desc->status & IRQ_PENDING))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;
out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->handler->end(irq);
	spin_unlock(&desc->lock);

	if (softirq_pending(cpu))
		do_softirq();
	return 1;
}