/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(unsigned int irq) { printk("unexpected IRQ trap at vector %02x\n", irq); /* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N * irq slots per priority level, and a 'hanging, unacked' IRQ * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. * But don't ack when the APIC is disabled. -AK */ if (!disable_apic) ack_APIC_irq(); }
asmlinkage void smp_thermal_interrupt(void) { __u64 msr_val; ack_APIC_irq(); exit_idle(); irq_enter(); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); add_pda(irq_thermal_count, 1); irq_exit(); }
/* * Handler for POSTED_INTERRUPT_VECTOR. */ __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); inc_irq_stat(kvm_posted_intr_ipis); irq_exit(); set_irq_regs(old_regs); }
/* * This interrupt should _never_ happen with our APIC/SMP architecture */ asmlinkage void smp_spurious_interrupt(void) { unsigned int v; exit_idle(); irq_enter(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); irq_exit(); }
asmlinkage #endif void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned int cpu; unsigned int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &flush_state[sender]; trace_irq_entry(sender, regs, NULL); if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); trace_irq_exit(IRQ_HANDLED); }
/* * This interrupt should _never_ happen with our APIC/SMP architecture */ asmlinkage void smp_spurious_interrupt(void) { unsigned long v; /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... * Spurious interrupts should not be ACKed. */ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); /* see sw-dev-man vol 3, chapter 7.4.13.5 */ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", smp_processor_id()); }
asmregparm unsigned int do_IRQ(struct pt_regs *regs) { /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; unsigned irq; irq_enter(); irq = vector_irq[vector]; if (!handle_irq(irq, regs)) { ack_APIC_irq(); } irq_exit(); /* Give scheduler a chance to run */ if (irq == 0) kt_sched_tick(); return 1; }
void xcall_slave(void) { cnt_ipi1++; xcall_slave2(); smp_mb(); /***********************************************/ /* We want to call ack_APIC_irq, but we */ /* inline the expansion because of the GPL */ /* symbol issue. */ /* Once we do this, we can have more IPI */ /* interrupts arrive (but not until we exit */ /* the interrupt routine and re-enable */ /* interrupts). */ /***********************************************/ /***********************************************/ /* Go direct to the assembler instruction. */ /* The APIC interface changed too much over */ /* the course of Linux kernel evolution, */ /* and some bits became GPL. There may be a */ /* price on non-standard APIC hardware, or */ /* paravirt kernels, but this seems to work */ /* for now. */ /***********************************************/ dtrace_ack_apic(); #if 0 /***********************************************/ /* Lots of ways to ack the APIC, but they */ /* all have problems. */ /***********************************************/ //native_apic_mem_write(APIC_EOI, 0); // *((volatile u32 *) (APIC_BASE + APIC_EOI)) = 0; # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) ack_APIC_irq(); # else x_apic->write(APIC_EOI, 0); # endif #endif }
/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(unsigned int irq) { printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); #ifdef CONFIG_X86_LOCAL_APIC /* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N * irq slots per priority level, and a 'hanging, unacked' IRQ * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. * But only ack when the APIC is enabled -AK */ if (cpu_has_apic) ack_APIC_irq(); #endif }
void smp_error_interrupt(struct pt_regs *regs) { u32 v, v1; exit_idle(); irq_enter(); v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); atomic_inc(&irq_err_count); pr_debug("APIC error on CPU%d: %02x(%02x)\n", smp_processor_id(), v , v1); irq_exit(); }
void smp_spurious_interrupt(struct pt_regs *regs) { u32 v; exit_idle(); irq_enter(); v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) ack_APIC_irq(); inc_irq_stat(irq_spurious_count); pr_info("spurious APIC interrupt on CPU#%d, " "should never happen.\n", smp_processor_id()); irq_exit(); }
/* Ping-pong interrupt handler */ void smp_popcorn_ipi_latency_interrupt(struct pt_regs *regs) { ack_APIC_irq(); //printk("Ping-pong IPI received!\n"); #if 0 if (my_cpu) { //printk("Sending IPI back to CPU 0...\n"); // send IPI back to sender apic->send_IPI_mask(cpumask_of(0), POPCORN_IPI_LATENCY_VECTOR); } else { //printk("Received ping-pong; reading end timestamp...\n"); rdtscll(tsc); done = 1; } #endif return; }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ void smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); msa_start_irq(X86_PLATFORM_IPI_VECTOR); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); msa_irq_exit(X86_PLATFORM_IPI_VECTOR, regs->cs != __KERNEL_CS); set_irq_regs(old_regs); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc * desc; /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; /* * NB: Unlike exception entries, IRQ entries do not reliably * handle context tracking in the low-level entry code. This is * because syscall entries execute briefly with IRQs on before * updating context tracking state, so we can take an IRQ from * kernel mode with CONTEXT_USER. The low-level entry code only * updates the context if we came from user mode, so we won't * switch to CONTEXT_KERNEL. We'll fix that once the syscall * code is cleaned up enough that we can cleanly defer enabling * IRQs. */ entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); desc = __this_cpu_read(vector_irq[vector]); if (!handle_irq(desc, regs)) { ack_APIC_irq(); if (desc != VECTOR_RETRIGGERED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); } else { __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); } } exiting_irq(); set_irq_regs(old_regs); return 1; }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ void smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); irq_exit(); set_irq_regs(old_regs); }
void smp_generic_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); exit_idle(); irq_enter(); inc_irq_stat(generic_irqs); if (generic_interrupt_extension) generic_interrupt_extension(); irq_exit(); set_irq_regs(old_regs); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { /* 将栈顶地址保存到全局变量__irq_regs中,old_regs用于保存现在的__irq_regs值 */ struct pt_regs *old_regs = set_irq_regs(regs); /* 获取中断向量号,因为中断向量号是以取反方式保存的,这里再次取反 */ unsigned vector = ~regs->orig_ax; /* 中断向量号 */ unsigned irq; /* 硬中断计数器增加,硬中断计数器保存在preempt_count */ irq_enter(); /* 这里开始禁止调度,因为preempt_count不为0 */ /* 退出idle进程(如果当前进程是idle进程的情况下) */ exit_idle(); /* 根据中断向量号获取中断号 */ irq = __this_cpu_read(vector_irq[vector]); /* 主要函数是handle_irq,进行中断服务例程的处理 */ if (!handle_irq(irq, regs)) { /* EIO模式的应答 */ ack_APIC_irq(); /* 该中断号并没有发生过多次触发 */ if (irq != VECTOR_RETRIGGERED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", __func__, smp_processor_id(), vector, irq); } else { /* 将此中断向量号对应的vector_irq设置为未定义 */ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); } } /* 硬中断计数器减少,并检查是否有软中断需要执行,如果有,会设置CPU在开中断的状态下执行软中断处理 */ irq_exit(); /* 这里开始允许调度 */ /* 恢复原来的__irq_regs值 */ set_irq_regs(old_regs); return 1; }
/* * Reschedule call back. KVM uses this interrupt to force a cpu out of * guest mode */ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); kvm_set_cpu_l1tf_flush_l1d(); if (trace_resched_ipi_enabled()) { /* * scheduler_ipi() might call irq_enter() as well, but * nested calls are fine. */ irq_enter(); trace_reschedule_entry(RESCHEDULE_VECTOR); scheduler_ipi(); trace_reschedule_exit(RESCHEDULE_VECTOR); irq_exit(); return; } scheduler_ipi(); }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ exit_idle(); irq_enter(); local_apic_timer_interrupt(); irq_exit(); set_irq_regs(old_regs); }
/* * Local APIC timer interrupt. This is the most natural way for doing * local interrupts, but local timer interrupts can be emulated by * broadcast interrupts too. [in case the hw doesn't support APIC timers] * * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ void smp_apic_timer_interrupt(struct pt_regs *regs) { /* * the NMI deadlock-detector uses this. */ add_pda(apic_timer_irqs, 1); /* * NOTE! We'd better ACK the irq immediately, * because timer handling can be slow. */ ack_APIC_irq(); /* * update_process_times() expects us to have done irq_enter(). * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(); smp_local_timer_interrupt(regs); irq_exit(); }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_rax; unsigned irq; irq_show_regs_callback(smp_processor_id(), regs); exit_idle(); irq_enter(); irq = __get_cpu_var(vector_irq)[vector]; #ifdef CONFIG_EVENT_TRACE if (irq == trace_user_trigger_irq) user_trace_start(); #endif trace_special(regs->rip, irq, 0); #ifdef CONFIG_DEBUG_STACKOVERFLOW stack_overflow_check(regs); #endif if (likely(irq < NR_IRQS)) generic_handle_irq(irq); else { if (!disable_apic) ack_APIC_irq(); if (printk_ratelimit()) printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); } irq_exit(); set_irq_regs(old_regs); return 1; }
static void ack_none(unsigned int irq) { /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves, it doesn't deserve * a generic callback i think. */ #ifdef CONFIG_X86 printk("unexpected IRQ trap at vector %02x\n", irq); #ifdef CONFIG_X86_LOCAL_APIC /* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N * irq slots per priority level, and a 'hanging, unacked' IRQ * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. */ ack_APIC_irq(); #endif #endif }
/* P4/Xeon Thermal transition interrupt handler */ static void intel_thermal_interrupt(struct pt_regs *regs) { u32 l, h; unsigned int cpu = smp_processor_id(); static unsigned long next[NR_CPUS]; ack_APIC_irq(); if (time_after(next[cpu], jiffies)) return; next[cpu] = jiffies + HZ*5; rdmsr(MSR_IA32_THERM_STATUS, l, h); if (l & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); add_taint(TAINT_MACHINE_CHECK); } else { printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); } }
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) { int cpu; int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &per_cpu(flush_state, sender); if (!cpu_isset(cpu, f->flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); cpu_clear(cpu, f->flush_cpumask); add_pda(irq_tlb_count, 1); }
/* * Spurious interrupts should _never_ happen with our APIC/SMP architecture. */ void spurious_interrupt(struct cpu_user_regs *regs) { /* * Check if this is a vectored interrupt (most likely, as this is probably * a request to dump local CPU state). Vectored interrupts are ACKed; * spurious interrupts are not. */ if (apic_isr_read(SPURIOUS_APIC_VECTOR)) { ack_APIC_irq(); if (this_cpu(state_dump_pending)) { this_cpu(state_dump_pending) = 0; dump_execstate(regs); goto out; } } /* see sw-dev-man vol 3, chapter 7.4.13.5 */ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should " "never happen.\n", smp_processor_id()); out: ; }
void APIC_error_interrupt(void) // called from "error_apic" { unsigned long v, v1; v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); /* Here is what the APIC error bits mean: 0: Send CS error 1: Receive CS error 2: Send accept error 3: Receive accept error 4: Reserved 5: Send illegal vector 6: Received illegal vector 7: Illegal register address */ printf("APIC error: %02lx(%02lx): \"", v, v1); if (v1 & 0x01) printf("Send CS error"); if (v1 & 0x02) printf("Receive CS error"); if (v1 & 0x04) printf("Send accept error"); if (v1 & 0x08) printf("Receive accept error"); if (v1 & 0x10) printf("Reserved"); if (v1 & 0x20) printf("Send illegal vector"); if (v1 & 0x40) printf("Received illegal vector"); if (v1 & 0x80) printf("Illegal register addres"); printf("\" on CPU%d\n", get_processor_id()); sys_panic("APIC error on CPU%d\n", get_processor_id()); }
asmlinkage void smp_call_function_interrupt(void) { void (*func) (void *info) = call_data->func; void *info = call_data->info; int wait = call_data->wait; ack_APIC_irq(); /* * Notify initiating CPU that I've grabbed the data and am * about to execute the function */ mb(); atomic_inc(&call_data->started); /* * At this point the info structure may be out of scope unless wait==1 */ (*func)(info); if (wait) { mb(); atomic_inc(&call_data->finished); } }
static void intel_thermal_interrupt(struct cpu_user_regs *regs) { uint64_t msr_content; unsigned int cpu = smp_processor_id(); static DEFINE_PER_CPU(s_time_t, next); ack_APIC_irq(); if (NOW() < per_cpu(next, cpu)) return; per_cpu(next, cpu) = NOW() + MILLISECS(5000); rdmsrl(MSR_IA32_THERM_STATUS, msr_content); if (msr_content & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); add_taint(TAINT_MACHINE_CHECK); } else { printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); } }
/* P4/Xeon Thermal transition interrupt handler */ static void intel_thermal_interrupt(struct cpu_user_regs *regs) { u32 l, h; unsigned int cpu = smp_processor_id(); static s_time_t next[NR_CPUS]; ack_APIC_irq(); if (NOW() > next[cpu]) return; next[cpu] = NOW() + MILLISECS(5000); rdmsr(MSR_IA32_THERM_STATUS, l, h); if (l & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); add_taint(TAINT_MACHINE_CHECK); } else { printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); } }
void error_interrupt(struct cpu_user_regs *regs) { unsigned long v, v1; /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); /* Here is what the APIC error bits mean: 0: Send CS error 1: Receive CS error 2: Send accept error 3: Receive accept error 4: Reserved 5: Send illegal vector 6: Received illegal vector 7: Illegal register address */ printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", smp_processor_id(), v , v1); }