/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_rax; unsigned irq; exit_idle(); irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; #ifdef CONFIG_DEBUG_STACKOVERFLOW stack_overflow_check(regs); #endif if (likely(irq < NR_IRQS)) generic_handle_irq(irq); else { if (!disable_apic) ack_APIC_irq(); if (printk_ratelimit()) printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); } irq_exit(); set_irq_regs(old_regs); return 1; }
/* NB. Interrupts are disabled on entry. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs) { unsigned long l1, l2; unsigned int l1i, l2i, port; int irq, cpu = smp_processor_id(); shared_info_t *s = HYPERVISOR_shared_info; vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; vcpu_info->evtchn_upcall_pending = 0; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master pending flag /before/ clearing selector flag. */ rmb(); #endif l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); while (l1 != 0) { l1i = __ffs(l1); l1 &= ~(1UL << l1i); while ((l2 = active_evtchns(cpu, s, l1i)) != 0) { l2i = __ffs(l2); port = (l1i * BITS_PER_LONG) + l2i; if ((irq = evtchn_to_irq[port]) != -1) do_IRQ(irq, regs); else { exit_idle(); evtchn_device_upcall(port); } } } }
/* * This interrupt should never happen with our APIC/SMP architecture */ asmlinkage void smp_error_interrupt(void) { unsigned int v, v1; exit_idle(); irq_enter(); /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); /* Here is what the APIC error bits mean: 0: Send CS error 1: Receive CS error 2: Send accept error 3: Receive accept error 4: Reserved 5: Send illegal vector 6: Received illegal vector 7: Illegal register address */ printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n", smp_processor_id(), v , v1); irq_exit(); }
/* * This interrupt should _never_ happen with our APIC/SMP architecture */ asmlinkage void smp_spurious_interrupt(void) { unsigned int v; exit_idle(); irq_enter(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); #if 0 static unsigned long last_warning; static unsigned long skipped; /* see sw-dev-man vol 3, chapter 7.4.13.5 */ if (time_before(last_warning+30*HZ,jiffies)) { printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n", smp_processor_id(), skipped); last_warning = jiffies; skipped = 0; } else { skipped++; } #endif irq_exit(); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { // 取得原来的寄存器 struct pt_regs *old_regs = set_irq_regs(regs); // 取得中断向量号 /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; unsigned irq; // 退出idle进程 exit_idle(); // 进入中断。使表示中断处理程序嵌套数量的计数器递增。 // 计数器是指当前进程thread_info结构的preempt_count字段 irq_enter(); // 中断线号与设备的中断号之间对应关系,有系统分配 // 分配表是一个per-cpu变量的vector_irq irq = __get_cpu_var(vector_irq)[vector]; if (!handle_irq(irq, regs)) { ack_APIC_irq(); if (printk_ratelimit()) pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", __func__, smp_processor_id(), vector, irq); } // 结束中断 irq_exit(); set_irq_regs(old_regs); return 1; }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; unsigned irq; irq_enter(); exit_idle(); irq = __this_cpu_read(vector_irq[vector]); if (!handle_irq(irq, regs)) { ack_APIC_irq(); if (printk_ratelimit()) pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", __func__, smp_processor_id(), vector, irq); } irq_exit(); set_irq_regs(old_regs); return 1; }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(lapic_events, cpu); /* * the NMI deadlock-detector uses this. */ add_pda(apic_timer_irqs, 1); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ exit_idle(); irq_enter(); evt->event_handler(evt); irq_exit(); set_irq_regs(old_regs); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; unsigned irq; irq_enter(); exit_idle(); irq = __this_cpu_read(vector_irq[vector]); if (!handle_irq(irq, regs)) { ack_APIC_irq(); if (irq != VECTOR_RETRIGGERED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", __func__, smp_processor_id(), vector, irq); } else { __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } irq_exit(); set_irq_regs(old_regs); return 1; }
asmlinkage void smp_threshold_interrupt(void) { irq_enter(); exit_idle(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); irq_exit(); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); }
asmlinkage void smp_threshold_interrupt(struct pt_regs *regs) { irq_enter(); msa_start_irq(THRESHOLD_APIC_VECTOR); exit_idle(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); msa_irq_exit(THRESHOLD_APIC_VECTOR, regs->cs != __KERNEL_CS); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); }
void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); exit_idle(); irq_enter(); local_apic_timer_interrupt(); irq_exit(); set_irq_regs(old_regs); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); exit_idle(); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); irq_exit(); set_irq_regs(old_regs); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc; irq_enter(); exit_idle(); desc = irq_to_desc(vmbus_irq); if (desc) generic_handle_irq_desc(vmbus_irq, desc); irq_exit(); set_irq_regs(old_regs); }
/* * This interrupt should _never_ happen with our APIC/SMP architecture */ asmlinkage void smp_spurious_interrupt(void) { unsigned int v; exit_idle(); irq_enter(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); irq_exit(); }
asmlinkage void smp_thermal_interrupt(void) { __u64 msr_val; ack_APIC_irq(); exit_idle(); irq_enter(); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); add_pda(irq_thermal_count, 1); irq_exit(); }
/* * Handler for POSTED_INTERRUPT_VECTOR. */ __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); inc_irq_stat(kvm_posted_intr_ipis); irq_exit(); set_irq_regs(old_regs); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc; irq_enter(); msa_start_irq(vmbus_irq); exit_idle(); desc = irq_to_desc(vmbus_irq); if (desc) generic_handle_irq_desc(vmbus_irq, desc); msa_irq_exit(vmbus_irq, regs->cs != __KERNEL_CS); set_irq_regs(old_regs); }
void smp_error_interrupt(struct pt_regs *regs) { u32 v, v1; exit_idle(); irq_enter(); v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); pr_debug("APIC error on CPU%d: %02x(%02x)\n", smp_processor_id(), v , v1); irq_exit(); }
void smp_spurious_interrupt(struct pt_regs *regs) { u32 v; exit_idle(); irq_enter(); v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); inc_irq_stat(irq_spurious_count); pr_info("spurious APIC interrupt on CPU#%d, " "should never happen.\n", smp_processor_id()); irq_exit(); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { /* 将栈顶地址保存到全局变量__irq_regs中,old_regs用于保存现在的__irq_regs值 */ struct pt_regs *old_regs = set_irq_regs(regs); /* 获取中断向量号,因为中断向量号是以取反方式保存的,这里再次取反 */ unsigned vector = ~regs->orig_ax; /* 中断向量号 */ unsigned irq; /* 硬中断计数器增加,硬中断计数器保存在preempt_count */ irq_enter(); /* 这里开始禁止调度,因为preempt_count不为0 */ /* 退出idle进程(如果当前进程是idle进程的情况下) */ exit_idle(); /* 根据中断向量号获取中断号 */ irq = __this_cpu_read(vector_irq[vector]); /* 主要函数是handle_irq,进行中断服务例程的处理 */ if (!handle_irq(irq, regs)) { /* EIO模式的应答 */ ack_APIC_irq(); /* 该中断号并没有发生过多次触发 */ if (irq != VECTOR_RETRIGGERED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", __func__, smp_processor_id(), vector, irq); } else { /* 将此中断向量号对应的vector_irq设置为未定义 */ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } /* 硬中断计数器减少,并检查是否有软中断需要执行,如果有,会设置CPU在开中断的状态下执行软中断处理 */ irq_exit(); /* 这里开始允许调度 */ /* 恢复原来的__irq_regs值 */ set_irq_regs(old_regs); return 1; }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ void smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); irq_exit(); set_irq_regs(old_regs); }
void smp_generic_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); exit_idle(); irq_enter(); inc_irq_stat(generic_irqs); if (generic_interrupt_extension) generic_interrupt_extension(); irq_exit(); set_irq_regs(old_regs); }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ void smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); msa_start_irq(X86_PLATFORM_IPI_VECTOR); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); msa_irq_exit(X86_PLATFORM_IPI_VECTOR, regs->cs != __KERNEL_CS); set_irq_regs(old_regs); }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ exit_idle(); irq_enter(); local_apic_timer_interrupt(); irq_exit(); set_irq_regs(old_regs); }
void watchdog_irq(void) { LED_GREEN::toggle(); if (TIMEOUT::count_down()) exit_idle(); }
void usci_tx_irq(void) { if (UART::handle_tx_irq()) { exit_idle(); } }
/* NB. Interrupts are disabled on entry. */ asmlinkage void evtchn_do_upcall(struct pt_regs *regs) { unsigned long l1, l2; unsigned long masked_l1, masked_l2; unsigned int l1i, l2i, start_l1i, start_l2i, port, count, i; int irq; unsigned int cpu = smp_processor_id(); shared_info_t *s = HYPERVISOR_shared_info; vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; exit_idle(); irq_enter(); do { /* Avoid a callback storm when we reenable delivery. */ vcpu_info->evtchn_upcall_pending = 0; /* Nested invocations bail immediately. */ if (unlikely(per_cpu(upcall_count, cpu)++)) break; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif /* * Handle timer interrupts before all others, so that all * hardirq handlers see an up-to-date system time even if we * have just woken from a long idle period. */ if ((irq = __get_cpu_var(virq_to_irq)[VIRQ_TIMER]) != -1) { port = evtchn_from_irq(irq); l1i = port / BITS_PER_LONG; l2i = port % BITS_PER_LONG; if (active_evtchns(cpu, s, l1i) & (1ul<<l2i)) do_IRQ(irq, regs); } l1 = xchg(&vcpu_info->evtchn_pending_sel, 0); start_l1i = l1i = per_cpu(current_l1i, cpu); start_l2i = per_cpu(current_l2i, cpu); for (i = 0; l1 != 0; i++) { masked_l1 = l1 & ((~0UL) << l1i); /* If we masked out all events, wrap to beginning. */ if (masked_l1 == 0) { l1i = l2i = 0; continue; } l1i = __ffs(masked_l1); l2 = active_evtchns(cpu, s, l1i); l2i = 0; /* usually scan entire word from start */ if (l1i == start_l1i) { /* We scan the starting word in two parts. */ if (i == 0) /* 1st time: start in the middle */ l2i = start_l2i; else /* 2nd time: mask bits done already */ l2 &= (1ul << start_l2i) - 1; } do { masked_l2 = l2 & ((~0UL) << l2i); if (masked_l2 == 0) break; l2i = __ffs(masked_l2); /* process port */ port = (l1i * BITS_PER_LONG) + l2i; if ((irq = evtchn_to_irq[port]) != -1) do_IRQ(irq, regs); else evtchn_device_upcall(port); l2i = (l2i + 1) % BITS_PER_LONG; /* Next caller starts at last processed + 1 */ per_cpu(current_l1i, cpu) = l2i ? l1i : (l1i + 1) % BITS_PER_LONG; per_cpu(current_l2i, cpu) = l2i; } while (l2i != 0); /* Scan start_l1i twice; all others once. */ if ((l1i != start_l1i) || (i != 0)) l1 &= ~(1UL << l1i); l1i = (l1i + 1) % BITS_PER_LONG; } /* If there were nested callbacks then we have more to do. */ count = per_cpu(upcall_count, cpu); per_cpu(upcall_count, cpu) = 0; } while (unlikely(count != 1)); irq_exit(); }
void USART1_IRQHandler(void) { if (UART::handle_irq()) exit_idle(); }
void SPI1_IRQHandler(void) { if (SPI::handle_irq()) exit_idle(); }
void RTC_IRQHandler(void) { RTCLK::handle_irq(); if (TIMEOUT::count_down()) exit_idle(); }