/* * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. */ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_ack_irq(); inc_irq_stat(kvm_posted_intr_wakeup_ipis); kvm_posted_intr_wakeup_handler(); exiting_irq(); set_irq_regs(old_regs); }
__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_ack_irq(); trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); __smp_x86_platform_ipi(); trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); exiting_irq(); set_irq_regs(old_regs); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_irq(); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); exiting_irq(); set_irq_regs(old_regs); }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_ack_irq(); trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); exiting_irq(); set_irq_regs(old_regs); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_irq(); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED) ack_APIC_irq(); exiting_irq(); set_irq_regs(old_regs); }
__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) { /* * Need to call irq_enter() before calling the trace point. * __smp_reschedule_interrupt() calls irq_enter/exit() too (in * scheduler_ipi(). This is OK, since those functions are allowed * to nest. */ ipi_entering_ack_irq(); trace_reschedule_entry(RESCHEDULE_VECTOR); __smp_reschedule_interrupt(); trace_reschedule_exit(RESCHEDULE_VECTOR); exiting_irq(); /* * KVM uses this interrupt to force a cpu out of guest mode */ }
/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc * desc; /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; /* * NB: Unlike exception entries, IRQ entries do not reliably * handle context tracking in the low-level entry code. This is * because syscall entries execute briefly with IRQs on before * updating context tracking state, so we can take an IRQ from * kernel mode with CONTEXT_USER. The low-level entry code only * updates the context if we came from user mode, so we won't * switch to CONTEXT_KERNEL. We'll fix that once the syscall * code is cleaned up enough that we can cleanly defer enabling * IRQs. */ entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); desc = __this_cpu_read(vector_irq[vector]); if (!handle_irq(desc, regs)) { ack_APIC_irq(); if (desc != VECTOR_RETRIGGERED) { pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", __func__, smp_processor_id(), vector); } else { __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); } } exiting_irq(); set_irq_regs(old_regs); return 1; }
__visible void smp_call_function_single_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); __smp_call_function_single_interrupt(); exiting_irq(); }
void smp_call_function_single_interrupt(struct pt_regs *regs) { smp_entering_irq(); __smp_call_function_single_interrupt(); exiting_irq(); }
__visible void smp_irq_work_interrupt(struct pt_regs *regs) { irq_work_entering_irq(); __smp_irq_work_interrupt(); exiting_irq(); }