Ejemplo n.º 1
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	/* high bit used in ret_from_ code  */
	unsigned vector = ~regs->orig_rax;
	unsigned irq;

	irq_show_regs_callback(smp_processor_id(), regs);

	exit_idle();
	irq_enter();
	irq = __get_cpu_var(vector_irq)[vector];

#ifdef CONFIG_EVENT_TRACE
	if (irq == trace_user_trigger_irq)
		user_trace_start();
#endif
	trace_special(regs->rip, irq, 0);

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	stack_overflow_check(regs);
#endif

	if (likely(irq < NR_IRQS))
		generic_handle_irq(irq);
	else {
		if (!disable_apic)
			ack_APIC_irq();

		if (printk_ratelimit())
			printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
				__func__, smp_processor_id(), vector);
	}

	irq_exit();

	set_irq_regs(old_regs);
	return 1;
}
Ejemplo n.º 2
0
void mips_timer_interrupt(struct pt_regs *regs)
{
	int irq = 63;
	unsigned long count;
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;

#ifdef CONFIG_PM
	printk(KERN_ERR "Unexpected CP0 interrupt\n");
	regs->cp0_status &= ~IE_IRQ5; /* disable CP0 interrupt */
	return;
#endif

	if (r4k_offset == 0)
		goto null;

	do {
		count = read_c0_count();
		timerhi += (count < timerlo);   /* Wrap around */
		timerlo = count;

		kstat.irqs[0][irq]++;
		do_timer(regs);
		r4k_cur += r4k_offset;
		ack_r4ktimer(r4k_cur);

	} while (((unsigned long)read_c0_count()
	         - r4k_cur) < 0x7fffffff);

	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
	return;

null:
	ack_r4ktimer(0);
}
Ejemplo n.º 3
0
/* Drop into the prom, with the chance to continue with the 'go'
 * prom command.
 */
void
prom_cmdline(void)
{
    unsigned long flags;

    __save_and_cli(flags);

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (1);
#endif

    /* We always arrive here via a serial interrupt.
     * So in order for everything to work reliably, even
     * on SMP, we need to drop the IRQ locks we hold.
     */
#ifdef CONFIG_SMP
    irq_exit(smp_processor_id(), 0);
    smp_capture();
#else
    local_irq_count(smp_processor_id())--;
#endif

    p1275_cmd ("enter", P1275_INOUT(0,0));

#ifdef CONFIG_SMP
    smp_release();
    irq_enter(smp_processor_id(), 0);
    spin_unlock_wait(&__br_write_locks[BR_GLOBALIRQ_LOCK].lock);
#else
    local_irq_count(smp_processor_id())++;
#endif

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (0);
#endif

    __restore_flags(flags);
}
Ejemplo n.º 4
0
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
    struct pt_regs *old_regs = set_irq_regs(regs);

    perf_mon_interrupt_in();
    irq_enter();

    if (unlikely(irq >= nr_irqs)) {
        if (printk_ratelimit())
            printk(KERN_WARNING "Bad IRQ%u\n", irq);
        ack_bad_irq(irq);
    } else {
        generic_handle_irq(irq);
    }


    irq_finish(irq);

    irq_exit();
    set_irq_regs(old_regs);
    perf_mon_interrupt_out();
}
Ejemplo n.º 5
0
Archivo: irq.c Proyecto: nhanh0/hah
/* FIXME: SMP, flags, bottom halves, rest */
void do_irq(struct irqaction *action, int irq, struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);

#ifdef DEBUG_IRQ
	if (irq != TIMER_IRQ)
#endif
	DBG_IRQ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq));
	if (action->handler == NULL)
		printk(KERN_ERR "No handler for interrupt %d !\n", irq);

	for(; action && action->handler; action = action->next) {
		action->handler(irq, action->dev_id, regs);
	}
	
	irq_exit(cpu, irq);

	/* don't need to care about unmasking and stuff */
	do_softirq();
}
Ejemplo n.º 6
0
/*
 * Local APIC timer interrupt. This is the most natural way for doing
 * local interrupts, but local timer interrupts can be emulated by
 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
 *
 * [ if a single-CPU system runs an SMP kernel then we call the local
 *   interrupt as well. Thus we cannot inline the local irq ... ]
 */
void smp_apic_timer_interrupt(struct pt_regs *regs)
{
	/*
	 * the NMI deadlock-detector uses this.
	 */
	add_pda(apic_timer_irqs, 1);

	/*
	 * NOTE! We'd better ACK the irq immediately,
	 * because timer handling can be slow.
	 */
	ack_APIC_irq();
	/*
	 * update_process_times() expects us to have done irq_enter().
	 * Besides, if we don't timer interrupts ignore the global
	 * interrupt lock, which is the WrongThing (tm) to do.
	 */
	exit_idle();
	irq_enter();
	smp_local_timer_interrupt(regs);
	irq_exit();
}
Ejemplo n.º 7
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	/*
	 * Disable interrupt in oops progress to avoid
	 * panic over panic in ISR.
	 */
	if (oops_in_progress && !smp_processor_id()) {
		set_irq_regs(old_regs);
		local_irq_disable();
		printk(KERN_ERR "In oops,"
			" interrupt is disabled on IRQ%u\n", irq);
		return;
	}

	perf_mon_interrupt_in();
	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	/* AT91 specific workaround */
	irq_finish(irq);

	irq_exit();
	set_irq_regs(old_regs);
	perf_mon_interrupt_out();
}
Ejemplo n.º 8
0
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;
        int ret, retval = 0;

        cpu = smp_processor_id();
        irq_enter();
	kstat_cpu(cpu).irqs[irq - FIRST_IRQ]++;
	action = irq_action[irq - FIRST_IRQ];

        if (action) {
                if (!(action->flags & SA_INTERRUPT))
                        local_irq_enable();
                do_random = 0;
                do {
			ret = action->handler(irq, action->dev_id, regs);
			if (ret == IRQ_HANDLED)
				do_random |= action->flags;
                        retval |= ret;
                        action = action->next;
                } while (action);

                if (retval != 1) {
			if (retval) {
				printk("irq event %d: bogus retval mask %x\n",
					irq, retval);
			} else {
				printk("irq %d: nobody cared\n", irq);
			}
		}

                if (do_random & SA_SAMPLE_RANDOM)
                        add_interrupt_randomness(irq);
		local_irq_disable();
        }
        irq_exit();
}
Ejemplo n.º 9
0
void do_multiple_IRQ(struct pt_regs* regs)
{
	int bit;
	unsigned masked;
	unsigned mask;
	unsigned ethmask = 0;

	
	mask = masked = *R_VECT_MASK_RD;

	
	mask &= ~(IO_MASK(R_VECT_MASK_RD, timer0));

	if (mask &
	    (IO_STATE(R_VECT_MASK_RD, dma0, active) |
	     IO_STATE(R_VECT_MASK_RD, dma1, active))) {
		ethmask = (IO_MASK(R_VECT_MASK_RD, dma0) |
			   IO_MASK(R_VECT_MASK_RD, dma1));
	}

	
	*R_VECT_MASK_CLR = (mask | ethmask);

	irq_enter();

	
	for (bit = 2; bit < 32; bit++) {
		if (masked & (1 << bit)) {
			do_IRQ(bit, regs);
		}
	}

	
	irq_exit();

	
	*R_VECT_MASK_SET = (masked | ethmask);
}
Ejemplo n.º 10
0
/*
 * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
 * come via this function.  Instead, they should provide their
 * own 'handler'
 */
asmlinkage void __exception_irq_entry
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
#ifdef CONFIG_BCM_KNLLOG_IRQ
	struct irq_desc *desc;
#endif

	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
#ifdef CONFIG_BCM_KNLLOG_IRQ
		desc = irq_desc + irq;
		if (gKnllogIrqSchedEnable & KNLLOG_IRQ)
			KNLLOG("in  [%d] (0x%x)\n", irq, (int)desc);
#endif
		generic_handle_irq(irq);
	}

#ifdef CONFIG_BCM_KNLLOG_IRQ
	if (gKnllogIrqSchedEnable & KNLLOG_IRQ)
		KNLLOG("out [%d] (0x%x)\n", irq, (int)desc);
#endif

	/* AT91 specific workaround */
	irq_finish(irq);

	irq_exit();
	set_irq_regs(old_regs);
}
Ejemplo n.º 11
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
		irq_exit();
	} else {
		msa_start_irq(irq);
		generic_handle_irq(irq);
		msa_irq_exit(irq, user_mode(regs));
	}

	set_irq_regs(old_regs);
}
Ejemplo n.º 12
0
void do_extint(struct pt_regs *regs, unsigned short code)
{
        ext_int_info_t *p;
        int index;

	irq_enter();
	asm volatile ("mc 0,0");
	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
		/**
		 * Make sure that the i/o interrupt did not "overtake"
		 * the last HZ timer interrupt.
		 */
		account_ticks(regs);
	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
        index = code & 0xff;
	for (p = ext_int_hash[index]; p; p = p->next) {
		if (likely(p->code == code)) {
			if (likely(p->handler))
				p->handler(regs, code);
		}
	}
	irq_exit();
}
Ejemplo n.º 13
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	////printk(KERN_ALERT "asm_do_IRQ. irq=%d\n", irq); // NCTUSS

	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
}
Ejemplo n.º 14
0
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();

	check_stack_overflow(irq);

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (irq >= NR_IRQS)
		handle_bad_irq(&bad_irq_desc);
	else
		generic_handle_irq(irq);

	maybe_lower_to_irq14();

	irq_exit();

	set_irq_regs(old_regs);
}
Ejemplo n.º 15
0
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	unsigned int irq;
	struct pt_regs *old_regs = set_irq_regs(regs);
	trace_hardirqs_off();

	irq_enter();
	irq = xintc_get_irq();
next_irq:
	BUG_ON(!irq);
	generic_handle_irq(irq);

	irq = xintc_get_irq();
	if (irq != -1U) {
		pr_debug("next irq: %d\n", irq);
		++concurrent_irq;
		goto next_irq;
	}

	irq_exit();
	set_irq_regs(old_regs);
	trace_hardirqs_on();
}
Ejemplo n.º 16
0
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	/* AT91 specific workaround */
	irq_finish(irq);

	irq_exit();
	set_irq_regs(old_regs);
}
Ejemplo n.º 17
0
asmlinkage void do_IRQ(int level, struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned int irq;
	unsigned long status_reg;

	local_irq_disable();

	old_regs = set_irq_regs(regs);

	irq_enter();

	irq = intc_readl(&intc0, INTCAUSE0 - 4 * level);
	generic_handle_irq(irq);

	status_reg = sysreg_read(SR);
	status_reg &= ~(SYSREG_BIT(I0M) | SYSREG_BIT(I1M)
			| SYSREG_BIT(I2M) | SYSREG_BIT(I3M));
	sysreg_write(SR, status_reg);

	irq_exit();

	set_irq_regs(old_regs);
}
Ejemplo n.º 18
0
asmlinkage void smp_call_function_interrupt(void)
{
	void (*func) (void *info) = call_data->func;
	void *info = call_data->info;
	int wait = call_data->wait;

	ack_APIC_irq();
	/*
	 * Notify initiating CPU that I've grabbed the data and am
	 * about to execute the function
	 */
	mb();
	atomic_inc(&call_data->started);
	/*
	 * At this point the info structure may be out of scope unless wait==1
	 */
	irq_enter();
	(*func)(info);
	irq_exit();
	if (wait) {
		mb();
		atomic_inc(&call_data->finished);
	}
}
Ejemplo n.º 19
0
void
handle_irq(int irq)
{	
	/* 
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	static unsigned int illegal_count=0;
	struct irq_desc *desc = irq_to_desc(irq);
	
	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
	    illegal_count < MAX_ILLEGAL_IRQS)) {
		irq_err_count++;
		illegal_count++;
		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
		       irq);
		return;
	}

	/*
	 * From here we must proceed with IPL_MAX. Note that we do not
	 * explicitly enable interrupts afterwards - some MILO PALcode
	 * (namely LX164 one) seems to have severe problems with RTI
	 * at IPL 0.
	 */
	local_irq_disable();
	irq_enter();
	generic_handle_irq_desc(irq, desc);
	irq_exit();
}
Ejemplo n.º 20
0
asmlinkage void do_IRQ(int irq)
{
    irq_enter();
    generic_handle_irq(irq);
    irq_exit();
}
Ejemplo n.º 21
0
/* Pretty sick eh? */
int prom_callback(long *args)
{
	struct console *cons, *saved_console = NULL;
	unsigned long flags;
	char *cmd;
	extern spinlock_t prom_entry_lock;

	if (!args)
		return -1;
	if (!(cmd = (char *)args[0]))
		return -1;

	/*
	 * The callback can be invoked on the cpu that first dropped 
	 * into prom_cmdline after taking the serial interrupt, or on 
	 * a slave processor that was smp_captured() if the 
	 * administrator has done a switch-cpu inside obp. In either 
	 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
	 */
	irq_exit();

	/* XXX Revisit the locking here someday.  This is a debugging
	 * XXX feature so it isnt all that critical.  -DaveM
	 */
	local_irq_save(flags);

	spin_unlock(&prom_entry_lock);
	cons = console_drivers;
	while (cons) {
		unregister_console(cons);
		cons->flags &= ~(CON_PRINTBUFFER);
		cons->next = saved_console;
		saved_console = cons;
		cons = console_drivers;
	}
	register_console(&prom_console);
	if (!strcmp(cmd, "sync")) {
		prom_printf("PROM `%s' command...\n", cmd);
		show_free_areas();
		if (current->pid != 0) {
			local_irq_enable();
			sys_sync();
			local_irq_disable();
		}
		args[2] = 0;
		args[args[1] + 3] = -1;
		prom_printf("Returning to PROM\n");
	} else if (!strcmp(cmd, "va>tte-data")) {
		unsigned long ctx, va;
		unsigned long tte = 0;
		long res = PROM_FALSE;

		ctx = args[3];
		va = args[4];
		if (ctx) {
			/*
			 * Find process owning ctx, lookup mapping.
			 */
			struct task_struct *p;
			struct mm_struct *mm = NULL;
			pgd_t *pgdp;
			pmd_t *pmdp;
			pte_t *ptep;

			for_each_process(p) {
				mm = p->mm;
				if (CTX_HWBITS(mm->context) == ctx)
					break;
			}
			if (!mm ||
			    CTX_HWBITS(mm->context) != ctx)
				goto done;

			pgdp = pgd_offset(mm, va);
			if (pgd_none(*pgdp))
				goto done;
			pmdp = pmd_offset(pgdp, va);
			if (pmd_none(*pmdp))
				goto done;

			/* Preemption implicitly disabled by virtue of
			 * being called from inside OBP.
			 */
			ptep = pte_offset_map(pmdp, va);
			if (pte_present(*ptep)) {
				tte = pte_val(*ptep);
				res = PROM_TRUE;
			}
			pte_unmap(ptep);
			goto done;
		}

		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
			/* Spitfire Errata #32 workaround */
			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
					     "flush	%%g6"
					     : /* No outputs */
					     : "r" (0),
					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));

			/*
			 * Locked down tlb entry.
			 */

			if (tlb_type == spitfire)
				tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
			else if (tlb_type == cheetah || tlb_type == cheetah_plus)
				tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);

			res = PROM_TRUE;
			goto done;
		}
Ejemplo n.º 22
0
Archivo: p4.c Proyecto: kzlin129/tt-gpl
fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
	irq_enter();
	vendor_thermal_interrupt(regs);
	irq_exit();
}
Ejemplo n.º 23
0
static void
common_shutdown_1(void *generic_ptr)
{
	struct halt_info *how = (struct halt_info *)generic_ptr;
	struct percpu_struct *cpup;
	unsigned long *pflags, flags;
	int cpuid = smp_processor_id();

	/* No point in taking interrupts anymore. */
	local_irq_disable();

	cpup = (struct percpu_struct *)
			((unsigned long)hwrpb + hwrpb->processor_offset
			 + hwrpb->processor_size * cpuid);
	pflags = &cpup->flags;
	flags = *pflags;

	/* Clear reason to "default"; clear "bootstrap in progress". */
	flags &= ~0x00ff0001UL;

#ifdef CONFIG_SMP
	/* Secondaries halt here. */
	if (cpuid != boot_cpuid) {
		flags |= 0x00040000UL; /* "remain halted" */
		*pflags = flags;
		set_cpu_present(cpuid, false);
		set_cpu_possible(cpuid, false);
		halt();
	}
#endif

	if (how->mode == LINUX_REBOOT_CMD_RESTART) {
		if (!how->restart_cmd) {
			flags |= 0x00020000UL; /* "cold bootstrap" */
		} else {
			/* For SRM, we could probably set environment
			   variables to get this to work.  We'd have to
			   delay this until after srm_paging_stop unless
			   we ever got srm_fixup working.

			   At the moment, SRM will use the last boot device,
			   but the file and flags will be the defaults, when
			   doing a "warm" bootstrap.  */
			flags |= 0x00030000UL; /* "warm bootstrap" */
		}
	} else {
		flags |= 0x00040000UL; /* "remain halted" */
	}
	*pflags = flags;

#ifdef CONFIG_SMP
	/* Wait for the secondaries to halt. */
	set_cpu_present(boot_cpuid, false);
	set_cpu_possible(boot_cpuid, false);
	while (cpumask_weight(cpu_present_mask))
		barrier();
#endif

	/* If booted from SRM, reset some of the original environment. */
	if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
		/* If we've gotten here after SysRq-b, leave interrupt
		   context before taking over the console. */
		if (in_interrupt())
			irq_exit();
		/* This has the effect of resetting the VGA video origin.  */
		take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
#endif
		pci_restore_srm_config();
		set_hae(srm_hae);
	}

	if (alpha_mv.kill_arch)
		alpha_mv.kill_arch(how->mode);

	if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
		/* Unfortunately, since MILO doesn't currently understand
		   the hwrpb bits above, we can't reliably halt the 
		   processor and keep it halted.  So just loop.  */
		return;
	}

	if (alpha_using_srm)
		srm_paging_stop();

	halt();
}
Ejemplo n.º 24
0
/*
 * The interrupt handling path, implemented in terms of HV interrupt
 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
 */
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
	int depth = __get_cpu_var(irq_depth)++;
	unsigned long original_irqs;
	unsigned long remaining_irqs;
	struct pt_regs *old_regs;

#if CHIP_HAS_IPI()
	/*
	 * Pending interrupts are listed in an SPR.  We might be
	 * nested, so be sure to only handle irqs that weren't already
	 * masked by a previous interrupt.  Then, mask out the ones
	 * we're going to handle.
	 */
	unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
	original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
	__insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
#else
	/*
	 * Hypervisor performs the equivalent of the Gx code above and
	 * then puts the pending interrupt mask into a system save reg
	 * for us to find.
	 */
	original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
#endif
	remaining_irqs = original_irqs;

	/* Track time spent here in an interrupt context. */
	old_regs = set_irq_regs(regs);
	irq_enter();

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: less than 1/8th stack free? */
	{
		long sp = stack_pointer - (long) current_thread_info();
		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
			pr_emerg("tile_dev_intr: "
			       "stack overflow: %ld\n",
			       sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif
	while (remaining_irqs) {
		unsigned long irq = __ffs(remaining_irqs);
		remaining_irqs &= ~(1UL << irq);

		/* Count device irqs; Linux IPIs are counted elsewhere. */
		if (irq != IRQ_RESCHEDULE)
			__get_cpu_var(irq_stat).irq_dev_intr_count++;

		generic_handle_irq(irq);
	}

	/*
	 * If we weren't nested, turn on all enabled interrupts,
	 * including any that were reenabled during interrupt
	 * handling.
	 */
	if (depth == 0)
		unmask_irqs(~__get_cpu_var(irq_disable_mask));

	__get_cpu_var(irq_depth)--;

	/*
	 * Track time spent against the current process again and
	 * process any softirqs if they are waiting.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
/*
 * handle_IRQ handles all hardware IRQ's.  Decoded IRQs should
 * not come via this function.  Instead, they should provide their
 * own 'handler'.  Used by platform code implementing C-based 1st
 * level decoding.
 */
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

#ifdef CONFIG_HISI_RDR
#ifndef CONFIG_HISI_RDR_SWITCH
	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
#endif
#endif

    /*y00145322*/

	unsigned int old_int_num = curr_int_num;

	curr_int_num = irq;

	if (NULL != int_switch_hook) {/*exc int hook func*/
		int_switch_hook(0, old_int_num, curr_int_num);
		int_switch_flag = 1;
	}
	

	irq_enter();

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(irq >= nr_irqs)) {
		if (printk_ratelimit())
			printk(KERN_WARNING "Bad IRQ%u\n", irq);
		ack_bad_irq(irq);
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();

#ifdef CONFIG_HISI_RDR
#ifndef CONFIG_HISI_RDR_SWITCH
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);
#endif
#endif

    /*y00145322*/
	/*call exception interrupt hook func*/
	if ((NULL != int_switch_hook) && (0 != int_switch_flag))
		int_switch_hook(1, old_int_num, curr_int_num);


	set_irq_regs(old_regs);
}
Ejemplo n.º 26
0
/*
 * That's where the IVT branches when we get an external
 * interrupt. This branches to the correct hardware IRQ handler via
 * function ptr.
 */
void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned long saved_tpr;

#if IRQ_DEBUG
	{
		unsigned long bsp, sp;

		/*
		 * Note: if the interrupt happened while executing in
		 * the context switch routine (ia64_switch_to), we may
		 * get a spurious stack overflow here.  This is
		 * because the register and the memory stack are not
		 * switched atomically.
		 */
		bsp = ia64_getreg(_IA64_REG_AR_BSP);
		sp = ia64_getreg(_IA64_REG_SP);

		if ((sp - bsp) < 1024) {
			static unsigned char count;
			static long last_time;

			if (time_after(jiffies, last_time + 5 * HZ))
				count = 0;
			if (++count < 5) {
				last_time = jiffies;
				printk("ia64_handle_irq: DANGER: less than "
				       "1KB of free stack space!!\n"
				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
			}
		}
	}
#endif /* IRQ_DEBUG */

	/*
	 * Always set TPR to limit maximum interrupt nesting depth to
	 * 16 (without this, it would be ~240, which could easily lead
	 * to kernel stack overflows).
	 */
	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d is not mapped "
				       "to any IRQ!\n", __func__, vector,
				       smp_processor_id());
			} else
				generic_handle_irq(irq);

			/*
			 * Disable interrupts and send EOI:
			 */
			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	/*
	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
	 * handler needs to be able to wait for further keyboard interrupts, which can't
	 * come through until ia64_eoi() has been done.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
Ejemplo n.º 27
0
static void ipi_timer(void)
{
	irq_enter();
	local_timer_interrupt();
	irq_exit();
}
Ejemplo n.º 28
0
asmlinkage __visible void smp_reboot_interrupt(void)
{
	ipi_entering_ack_irq();
	stop_this_cpu(NULL);
	irq_exit();
}
Ejemplo n.º 29
0
void hv_message_intr(struct pt_regs *regs, int intnum)
{
	/*
	 * We enter with interrupts disabled and leave them disabled,
	 * to match expectations of called functions (e.g.
	 * do_ccupdate_local() in mm/slab.c).  This is also consistent
	 * with normal call entry for device interrupts.
	 */

	int message[HV_MAX_MESSAGE_SIZE/sizeof(int)];
	HV_RcvMsgInfo rmi;
	int nmsgs = 0;

	/* Track time spent here in an interrupt context */
	struct pt_regs *old_regs = set_irq_regs(regs);
	irq_enter();

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	/* Debugging check for stack overflow: less than 1/8th stack free? */
	{
		long sp = stack_pointer - (long) current_thread_info();
		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
			pr_emerg("hv_message_intr: "
			       "stack overflow: %ld\n",
			       sp - sizeof(struct thread_info));
			dump_stack();
		}
	}
#endif

	while (1) {
		rmi = hv_receive_message(__get_cpu_var(msg_state),
					 (HV_VirtAddr) message,
					 sizeof(message));
#ifdef CONFIG_DATAPLANE
		if (dataplane_debug(smp_processor_id()))
			pr_warn("cpu %d: dataplane recv msg len %d, src %d\n",
				smp_processor_id(), rmi.msglen, rmi.source);
#endif

		if (rmi.msglen == 0)
			break;

		if (rmi.msglen < 0)
			panic("hv_receive_message failed: %d", rmi.msglen);

		++nmsgs;

		if (rmi.source == HV_MSG_TILE) {
			int tag;

			/* we just send tags for now */
			BUG_ON(rmi.msglen != sizeof(int));

			tag = message[0];
#ifdef CONFIG_SMP
			evaluate_message(message[0]);
#else
			panic("Received IPI message %d in UP mode", tag);
#endif
		} else if (rmi.source == HV_MSG_INTR) {
			HV_IntrMsg *him = (HV_IntrMsg *)message;
			struct hv_driver_cb *cb =
				(struct hv_driver_cb *)him->intarg;
			cb->callback(cb, him->intdata);
			__get_cpu_var(irq_stat).irq_hv_msg_count++;
		}
	}

	/*
	 * We shouldn't have gotten a message downcall with no
	 * messages available.
	 */
	if (nmsgs == 0)
		panic("Message downcall invoked with no messages!");

	/*
	 * Track time spent against the current process again and
	 * process any softirqs if they are waiting.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
Ejemplo n.º 30
0
Archivo: irq.c Proyecto: doniexun/xen
/* Dispatch an interrupt */
void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    perfc_incr(irqs);

    ASSERT(irq >= 16); /* SGIs do not come down this path */

    if (irq < 32)
        perfc_incr(ppis);
    else
        perfc_incr(spis);

    /* TODO: this_cpu(irq_count)++; */

    irq_enter();

    spin_lock(&desc->lock);
    desc->handler->ack(desc);

    if ( !desc->action )
    {
        printk("Unknown %s %#3.3x\n",
               is_fiq ? "FIQ" : "IRQ", irq);
        goto out;
    }

    if ( test_bit(_IRQ_GUEST, &desc->status) )
    {
        struct irq_guest *info = irq_get_guest_info(desc);

        perfc_incr(guest_irqs);
        desc->handler->end(desc);

        set_bit(_IRQ_INPROGRESS, &desc->status);

        /*
         * The irq cannot be a PPI, we only support delivery of SPIs to
         * guests.
	 */
        vgic_vcpu_inject_spi(info->d, info->virq);
        goto out_no_end;
    }

    set_bit(_IRQ_PENDING, &desc->status);

    /*
     * Since we set PENDING, if another processor is handling a different
     * instance of this same irq, the other processor will take care of it.
     */
    if ( test_bit(_IRQ_DISABLED, &desc->status) ||
         test_bit(_IRQ_INPROGRESS, &desc->status) )
        goto out;

    set_bit(_IRQ_INPROGRESS, &desc->status);

    while ( test_bit(_IRQ_PENDING, &desc->status) )
    {
        struct irqaction *action;

        clear_bit(_IRQ_PENDING, &desc->status);
        action = desc->action;

        spin_unlock_irq(&desc->lock);

        do
        {
            action->handler(irq, action->dev_id, regs);
            action = action->next;
        } while ( action );

        spin_lock_irq(&desc->lock);
    }

    clear_bit(_IRQ_INPROGRESS, &desc->status);

out:
    desc->handler->end(desc);
out_no_end:
    spin_unlock(&desc->lock);
    irq_exit();
}