asmlinkage void 
do_entInt(unsigned long type, unsigned long vector,
	  unsigned long la_ptr, struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	switch (type) {
	case 0:
#ifdef CONFIG_SMP
		handle_ipi(regs);
		return;
#else
		irq_err_count++;
		printk(KERN_CRIT "Interprocessor interrupt? "
		       "You must be kidding!\n");
#endif
		break;
	case 1:
		old_regs = set_irq_regs(regs);
#ifdef CONFIG_SMP
	  {
		long cpu;

		local_irq_disable();
		smp_percpu_timer_interrupt(regs);
		cpu = smp_processor_id();
		if (cpu != boot_cpuid) {
		        kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
		} else {
			handle_irq(RTC_IRQ);
		}
	  }
#else
		handle_irq(RTC_IRQ);
#endif
		set_irq_regs(old_regs);
		return;
	case 2:
		old_regs = set_irq_regs(regs);
		alpha_mv.machine_check(vector, la_ptr);
		set_irq_regs(old_regs);
		return;
	case 3:
		old_regs = set_irq_regs(regs);
		alpha_mv.device_interrupt(vector);
		set_irq_regs(old_regs);
		return;
	case 4:
		perf_irq(la_ptr, regs);
		return;
	default:
		printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
		       type, vector);
	}
	printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
}
Пример #2
0
/*
 * Possibly handle a performance counter interrupt.
 * Return true if the timer interrupt should not be checked
 */
static inline int handle_perf_irq (int r2)
{
	/*
	 * The performance counter overflow interrupt may be shared with the
	 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
	 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
	 * and we can't reliably determine if a counter interrupt has also
	 * happened (!r2) then don't check for a timer interrupt.
	 */
	return (cp0_perfcount_irq < 0) &&
		perf_irq() == IRQ_HANDLED &&
		!r2;
}
Пример #3
0
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
	const int r2 = cpu_has_mips_r2;
	struct clock_event_device *cd;
	int cpu = smp_processor_id();

	/*
	 * Suckage alert:
	 * Before R2 of the architecture there was no way to see if a
	 * performance counter interrupt was pending, so we have to run
	 * the performance counter interrupt handler anyway.
	 */
	if (handle_perf_irq(r2))
		goto out;

	/*
	 * The same applies to performance counter interrupts.  But with the
	 * above we now know that the reason we got here must be a timer
	 * interrupt.  Being the paranoiacs we are we check anyway.
	 */
	if (!r2 || (read_c0_cause() & (1 << 30))) {
		/* Clear Count/Compare Interrupt */
		write_c0_compare(read_c0_compare());
		cd = &per_cpu(mips_clockevent_device, cpu);
		cd->event_handler(cd);
	}
#ifdef CONFIG_OPROFILE_WASP
        //pkamath: oprofile-0.9.2 does not support interrupt based profiling.
        //Therefore we will check for a profiling event every timer interrupt.
        // Note that this may impact accuracy of the profile
        if (!r2 || (read_c0_cause() & (1 << 26)))
                perf_irq();
        //End of code changes
#endif

out:
	return IRQ_HANDLED;
}
Пример #4
0
static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
{
	perf_irq(get_irq_regs());
	return IRQ_HANDLED;
}
Пример #5
0
irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
{
	return perf_irq();
}
Пример #6
0
irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
{
	int cpu = smp_processor_id();

#ifdef CONFIG_MIPS_MT_SMTC
	/*
	 *  In an SMTC system, one Count/Compare set exists per VPE.
	 *  Which TC within a VPE gets the interrupt is essentially
	 *  random - we only know that it shouldn't be one with
	 *  IXMT set. Whichever TC gets the interrupt needs to
	 *  send special interprocessor interrupts to the other
	 *  TCs to make sure that they schedule, etc.
	 *
	 *  That code is specific to the SMTC kernel, not to
	 *  the a particular platform, so it's invoked from
	 *  the general MIPS timer_interrupt routine.
	 */

	int vpflags;

	/*
	 * We could be here due to timer interrupt,
	 * perf counter overflow, or both.
	 */
	if (read_c0_cause() & (1 << 26))
		perf_irq();

	if (read_c0_cause() & (1 << 30)) {
		/* If timer interrupt, make it de-assert */
		write_c0_compare (read_c0_count() - 1);
		/*
		 * DVPE is necessary so long as cross-VPE interrupts
		 * are done via read-modify-write of Cause register.
		 */
		vpflags = dvpe();
		clear_c0_cause(CPUCTR_IMASKBIT);
		evpe(vpflags);
		/*
		 * There are things we only want to do once per tick
		 * in an "MP" system.   One TC of each VPE will take
		 * the actual timer interrupt.  The others will get
		 * timer broadcast IPIs. We use whoever it is that takes
		 * the tick on VPE 0 to run the full timer_interrupt().
		 */
		if (cpu_data[cpu].vpe_id == 0) {
				timer_interrupt(irq, NULL);
				smtc_timer_broadcast(cpu_data[cpu].vpe_id);
				scroll_display_message();
		} else {
			write_c0_compare(read_c0_count() +
			                 (mips_hpt_frequency/HZ));
			local_timer_interrupt(irq, dev_id);
			smtc_timer_broadcast(cpu_data[cpu].vpe_id);
		}
	}
#else /* CONFIG_MIPS_MT_SMTC */
	int r2 = cpu_has_mips_r2;

	if (cpu == 0) {
		/*
		 * CPU 0 handles the global timer interrupt job and process
		 * accounting resets count/compare registers to trigger next
		 * timer int.
		 */
		if (!r2 || (read_c0_cause() & (1 << 26)))
			if (perf_irq())
				goto out;

		/* we keep interrupt disabled all the time */
		if (!r2 || (read_c0_cause() & (1 << 30)))
			timer_interrupt(irq, NULL);

		scroll_display_message();
	} else {
		/* Everyone else needs to reset the timer int here as
		   ll_local_timer_interrupt doesn't */
		/*
		 * FIXME: need to cope with counter underflow.
		 * More support needs to be added to kernel/time for
		 * counter/timer interrupts on multiple CPU's
		 */
		write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));

		/*
		 * Other CPUs should do profiling and process accounting
		 */
		local_timer_interrupt(irq, dev_id);
	}
out:
#endif /* CONFIG_MIPS_MT_SMTC */
	return IRQ_HANDLED;
}
Пример #7
0
void
PerformanceMonitorException(struct pt_regs *regs)
{
	perf_irq(regs);
}