Ejemplo n.º 1
0
fastcall notrace void smp_apic_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	/*
	 * the NMI deadlock-detector uses this.
	 */
	irq_stat[cpu].apic_timer_irqs++;

        trace_special(regs->eip, 0, 0);

	/*
	 * NOTE! We'd better ACK the irq immediately,
	 * because timer handling can be slow.
	 */
	ack_APIC_irq();
	/*
	 * update_process_times() expects us to have done irq_enter().
	 * Besides, if we don't timer interrupts ignore the global
	 * interrupt lock, which is the WrongThing (tm) to do.
	 */
	irq_enter();
	vst_wakeup(regs, 1);
	smp_local_timer_interrupt(regs);
	irq_exit();
}
Ejemplo n.º 2
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	/* high bit used in ret_from_ code  */
	unsigned vector = ~regs->orig_rax;
	unsigned irq;

	irq_show_regs_callback(smp_processor_id(), regs);

	exit_idle();
	irq_enter();
	irq = __get_cpu_var(vector_irq)[vector];

#ifdef CONFIG_EVENT_TRACE
	if (irq == trace_user_trigger_irq)
		user_trace_start();
#endif
	trace_special(regs->rip, irq, 0);

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	stack_overflow_check(regs);
#endif

	if (likely(irq < NR_IRQS))
		generic_handle_irq(irq);
	else {
		if (!disable_apic)
			ack_APIC_irq();

		if (printk_ratelimit())
			printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
				__func__, smp_processor_id(), vector);
	}

	irq_exit();

	set_irq_regs(old_regs);
	return 1;
}
Ejemplo n.º 3
0
/*
 * Reschedule call back. Trigger a reschedule pass so that
 * RT-overload balancing can pass tasks around.
 */
fastcall notrace void smp_reschedule_interrupt(struct pt_regs *regs)
{
	trace_special(regs->eip, 0, 0);
	ack_APIC_irq();
	set_tsk_need_resched(current);
}