Example #1
0
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void
do_IRQ (struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	s390_idle_check();
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		/* Serve timer interrupts first. */
		clock_comparator_work();
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
	irb = (struct irb *) __LC_IRB;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		/*
		 * Non I/O-subchannel thin interrupts are processed differently
		 */
		if (tpi_info->adapter_IO == 1 &&
		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			/* Clear pending interrupt condition. */
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch(tpi_info->schid, irb) == 0) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
		}
		spin_unlock(sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (!MACHINE_IS_VM && tpi (NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
Example #2
0
static void __irq_entry indy_buserror_irq(void)
{
	int irq = SGI_BUSERR_IRQ;

	irq_enter();
	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
	ip22_be_interrupt(irq);
	irq_exit();
}
Example #3
0
File: irq.c Project: 1youhun1/linux
/*
 * "C" Entry point for any ARC ISR, called from low level vector handler
 * @irq is the vector number read from ICAUSE reg of on-chip intc
 */
void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();
	generic_handle_irq(irq);
	irq_exit();
	set_irq_regs(old_regs);
}
Example #4
0
/*==========================================================================*
 * Name:         smp_send_timer
 *
 * Description:  This routine executes on CPU which received
 *               'LOCAL_TIMER_IPI'.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    *regs - a pointer to the saved regster info
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_ipi_timer_interrupt(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	old_regs = set_irq_regs(regs);
	irq_enter();
	smp_local_timer_interrupt();
	irq_exit();
	set_irq_regs(old_regs);
}
Example #5
0
void indy_buserror_irq(struct pt_regs *regs)
{
	int irq = SGI_BUSERR_IRQ;

	irq_enter();
	kstat_this_cpu.irqs[irq]++;
	ip22_be_interrupt(irq, regs);
	irq_exit();
}
Example #6
0
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
	irq_enter();
	generic_smp_call_function_single_interrupt();
	inc_irq_stat(irq_call_count);
	irq_exit();

	return IRQ_HANDLED;
}
Example #7
0
static void indy_buserror_irq(void)
{
    int irq = SGI_BUSERR_IRQ;

    irq_enter();
    kstat_this_cpu.irqs[irq]++;
    ip22_be_interrupt(irq);
    irq_exit();
}
Example #8
0
void indy_r4k_timer_interrupt(struct pt_regs *regs)
{
	int irq = SGI_TIMER_IRQ;

	irq_enter();
	kstat_this_cpu.irqs[irq]++;
	timer_interrupt(irq, NULL, regs);
	irq_exit();
}
Example #9
0
/*
 * This function emulates a interrupt processing when a cpu is about to be
 * brought down.
 */
void ia64_process_pending_intr(void)
{
	ia64_vector vector;
	unsigned long saved_tpr;
	extern unsigned int vectors_in_migration[NR_IRQS];

	vector = ia64_get_ivr();

	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();

	 /*
	  * Perform normal interrupt style processing
	  */
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irq_this_cpu(irq);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irq_this_cpu(irq);
		} else {
			struct pt_regs *old_regs = set_irq_regs(NULL);

			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			/*
			 * Now try calling normal ia64_handle_irq as it would have got called
			 * from a real intr handler. Try passing null for pt_regs, hopefully
			 * it will work. I hope it works!.
			 * Probably could shared code.
			 */
			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d not being mapped "
				       "to any IRQ!!\n", __func__, vector,
				       smp_processor_id());
			} else {
				vectors_in_migration[irq]=0;
				generic_handle_irq(irq);
			}
			set_irq_regs(old_regs);

			/*
			 * Disable interrupts and send EOI
			 */
			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	irq_exit();
}
Example #10
0
void indy_r4k_timer_interrupt(void)
{
	int irq = SGI_TIMER_IRQ;

	irq_enter();
	kstat_this_cpu.irqs[irq]++;
	timer_interrupt(irq, NULL);
	irq_exit();
}
Example #11
0
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
	irq_enter();
	irq_work_run();
	inc_irq_stat(apic_irq_work_irqs);
	irq_exit();

	return IRQ_HANDLED;
}
Example #12
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

#ifdef CONFIG_HISI_RDR
#ifdef CONFIG_HISI_RDR_SWITCH
	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
#endif
#else
	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
#endif


	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		pr_warn_ratelimited("Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();

#ifdef CONFIG_HISI_RDR
#ifdef CONFIG_HISI_RDR_SWITCH
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);
#endif
#else
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);
#endif

	set_irq_regs(old_regs);
}
Example #13
0
File: cio.c Project: AllenDou/linux
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	__this_cpu_write(s390_idle.nohz_delay, 1);
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		/* Serve timer interrupts first. */
		clock_comparator_work();
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	irb = (struct irb *)&S390_lowcore.irb;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		if (tpi_info->adapter_IO) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			/* Clear pending interrupt condition. */
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch(tpi_info->schid, irb) == 0) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
			else
				kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		} else
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		spin_unlock(sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
Example #14
0
void indy_buserror_irq(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int irq = SGI_BUSERR_IRQ;

	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;
	be_ip22_interrupt(irq, regs);
	irq_exit(cpu, irq);
}
Example #15
0
asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
{
	struct pt_regs *oldregs = set_irq_regs(regs);

	irq_enter();
	generic_handle_irq(irq);
	irq_exit();

	set_irq_regs(oldregs);
}
asmlinkage void smp_threshold_interrupt(void)
{
	irq_enter();
	exit_idle();
	inc_irq_stat(irq_threshold_count);
	mce_threshold_vector();
	irq_exit();
	/* Ack only at the end to avoid potential reentry */
	ack_APIC_irq();
}
Example #17
0
asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
{
	struct pt_regs *oldregs = set_irq_regs(regs);

	irq_enter();
	__do_IRQ(irq);
	irq_exit();

	set_irq_regs(oldregs);
}
Example #18
0
asmlinkage void smp_threshold_interrupt(struct pt_regs *regs)
{
	irq_enter();
	msa_start_irq(THRESHOLD_APIC_VECTOR);
	exit_idle();
	inc_irq_stat(irq_threshold_count);
	mce_threshold_vector();
	msa_irq_exit(THRESHOLD_APIC_VECTOR, regs->cs != __KERNEL_CS);
	/* Ack only at the end to avoid potential reentry */
	ack_APIC_irq();
}
Example #19
0
void TAUException(struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	irq_enter();
	tau[cpu].interrupts++;

	TAUupdate(cpu);

	irq_exit();
}
Example #20
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	irq_enter();

	__DO_IRQ_SMTC_HOOK();
	__do_IRQ(irq, regs);

	irq_exit();

	return 1;
}
Example #21
0
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void
do_IRQ (struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;

	irq_enter ();
	asm volatile ("mc 0,0");
	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
		/**
		 * Make sure that the i/o interrupt did not "overtake"
		 * the last HZ timer interrupt.
		 */
		account_ticks(regs);
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
	irb = (struct irb *) __LC_IRB;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		/*
		 * Non I/O-subchannel thin interrupts are processed differently
		 */
		if (tpi_info->adapter_IO == 1 &&
		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
			do_adapter_IO();
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (sch)
			spin_lock(&sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch (tpi_info->irq, irb) == 0 && sch) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(&sch->dev);
		}
		if (sch)
			spin_unlock(&sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (!MACHINE_IS_VM && tpi (NULL) != 0);
	irq_exit ();
}
void doorbell_exception(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();

	smp_ipi_demux();

	irq_exit();
	set_irq_regs(old_regs);
}
Example #23
0
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
	struct pt_regs *oldregs = set_irq_regs(regs);
	int irq;

	irq_enter();
	irq = irq_find_mapping(NULL, hwirq);
	generic_handle_irq(irq);
	irq_exit();

	set_irq_regs(oldregs);
}
void timer_handler(int sig, union uml_pt_regs *regs)
{
	local_irq_disable();
	irq_enter();
	update_process_times(CHOOSE_MODE(
	                     (UPT_SC(regs) && user_context(UPT_SP(regs))),
			     (regs)->skas.is_user));
	irq_exit();
	local_irq_enable();
	if(current_thread->cpu == 0)
		timer_irq(regs);
}
Example #25
0
void indy_8254timer_irq(void)
{
        int cpu = smp_processor_id();
        int irq = 4;

        irq_enter(cpu);
        kstat.irqs[0][irq]++;
        printk("indy_8254timer_irq: Whoops, should not have gotten this IRQ\n");
        prom_getchar();
        prom_imode();
        irq_exit(cpu);
}
Example #26
0
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	struct irq_desc *desc = irq_desc + irq;
#ifndef CONFIG_IPIPE
	unsigned short pending, other_ints;
#endif
	old_regs = set_irq_regs(regs);

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (irq >= NR_IRQS)
		desc = &bad_irq_desc;

	irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: is there less than STACK_WARN free? */
	{
		long sp;

		sp = __get_SP() & (THREAD_SIZE-1);

		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
			dump_stack();
			printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
					" only %ld bytes free\n",
				__func__, irq, sp - sizeof(struct thread_info));
		}
	}
#endif
	generic_handle_irq(irq);

#ifndef CONFIG_IPIPE
	/*
	 * If we're the only interrupt running (ignoring IRQ15 which
	 * is for syscalls), lower our priority to IRQ14 so that
	 * softirqs run at that level.  If there's another,
	 * lower-level interrupt, irq_exit will defer softirqs to
	 * that. If the interrupt pipeline is enabled, we are already
	 * running at IRQ14 priority, so we don't need this code.
	 */
	CSYNC();
	pending = bfin_read_IPEND() & ~0x8000;
	other_ints = pending & (pending - 1);
	if (other_ints == 0)
		lower_to_irq14();
#endif /* !CONFIG_IPIPE */
	irq_exit();

	set_irq_regs(old_regs);
}
Example #27
0
asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
    ltt_ev_irq_entry(irq, !user_mode(regs));
    irq_enter();

    __do_IRQ((irq), (regs));

    ltt_ev_irq_exit();
    irq_exit();

    return 1;
}
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = IP27_TIMER_IRQ;

	irq_enter(cpu, irq);
	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	update_process_times(user_mode(regs));
#endif /* CONFIG_SMP */

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);
	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
}
Example #29
0
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
	irq_enter();
	generic_smp_call_function_single_interrupt();
#ifdef CONFIG_X86_32
	__get_cpu_var(irq_stat).irq_call_count++;
#else
	add_pda(irq_call_count, 1);
#endif
	irq_exit();

	return IRQ_HANDLED;
}
Example #30
0
/* Generic SGI handler for (spurious) 8254 interrupts */
void indy_8254timer_irq(void)
{
	int irq = SGI_8254_0_IRQ;
	ULONG cnt;
	char c;

	irq_enter();
	kstat_this_cpu.irqs[irq]++;
	printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
	ArcRead(0, &c, 1, &cnt);
	ArcEnterInteractiveMode();
	irq_exit();
}