/* * do_cio_interrupt() handles all normal I/O device IRQ's */ static irqreturn_t do_cio_interrupt(int irq, void *dummy) { struct tpi_info *tpi_info; struct subchannel *sch; struct irb *irb; set_cpu_flag(CIF_NOHZ_DELAY); tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; trace_s390_cio_interrupt(tpi_info); irb = this_cpu_ptr(&cio_irb); sch = (struct subchannel *)(unsigned long) tpi_info->intparm; if (!sch) { /* Clear pending interrupt condition. */ inc_irq_stat(IRQIO_CIO); tsch(tpi_info->schid, irb); return IRQ_HANDLED; } spin_lock(sch->lock); /* Store interrupt response block to lowcore. */ if (tsch(tpi_info->schid, irb) == 0) { /* Keep subchannel information word up to date. */ memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw)); /* Call interrupt handler if there is one. */ if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else inc_irq_stat(IRQIO_CIO); } else inc_irq_stat(IRQIO_CIO); spin_unlock(sch->lock); return IRQ_HANDLED; }
/* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ void __smp_x86_platform_ipi(void) { inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); }
/* * Reschedule call back. */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { inc_irq_stat(irq_resched_count); scheduler_ipi(); return IRQ_HANDLED; }
static void timing_alert_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { inc_irq_stat(IRQEXT_TLA); if (param32 & 0x00038000) stp_timing_alert((struct stp_irq_parm *) ¶m32); }
/* CPU-measurement alerts for the counter facility */ static void cpumf_measurement_alert(struct ext_code ext_code, unsigned int alert, unsigned long unused) { struct cpu_cf_events *cpuhw; if (!(alert & CPU_MF_INT_CF_MASK)) return; inc_irq_stat(IRQEXT_CMC); cpuhw = this_cpu_ptr(&cpu_cf_events); /* Measurement alerts are shared and might happen when the PMU * is not reserved. Ignore these alerts in this case. */ if (!(cpuhw->flags & PMU_F_RESERVED)) return; /* counter authorization change alert */ if (alert & CPU_MF_INT_CF_CACA) qctri(&cpuhw->info); /* loss of counter data alert */ if (alert & CPU_MF_INT_CF_LCDA) pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); /* loss of MT counter data alert */ if (alert & CPU_MF_INT_CF_MTDA) pr_warn("CPU[%i] MT counter data was lost\n", smp_processor_id()); /* store alert for special handling by in-kernel users */ atomic64_or(alert, &cpuhw->alert); }
void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned long cpu; cpu = get_cpu(); if (!cpu_isset(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { if (flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); } else leave_mm(cpu); } ack_APIC_irq(); smp_mb__before_clear_bit(); cpu_clear(cpu, flush_cpumask); smp_mb__after_clear_bit(); out: put_cpu_no_resched(); inc_irq_stat(irq_tlb_count); }
/* * Use cio_tsch to update the subchannel status and call the interrupt handler * if status had been pending. Called with the subchannel's lock held. */ void cio_tsch(struct subchannel *sch) { struct irb *irb; int irq_context; irb = this_cpu_ptr(&cio_irb); /* Store interrupt response block to lowcore. */ if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ return; memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); /* Call interrupt handler with updated status. */ irq_context = in_interrupt(); if (!irq_context) { local_bh_disable(); irq_enter(); } kstat_incr_irq_this_cpu(IO_INTERRUPT); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); else inc_irq_stat(IRQIO_CIO); if (!irq_context) { irq_exit(); _local_bh_enable(); } }
static void clock_comparator_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { inc_irq_stat(IRQEXT_CLK); if (S390_lowcore.clock_comparator == -1ULL) set_clock_comparator(S390_lowcore.clock_comparator); }
void smp_call_function_single_interrupt(struct pt_regs *regs) { //l4/ack_APIC_irq(); irq_enter(); generic_smp_call_function_single_interrupt(); inc_irq_stat(irq_call_count); irq_exit(); }
/* * Reschedule call back. Nothing to do, * all the work is done automatically when * we return from the interrupt. */ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); /* * KVM uses this interrupt to force a cpu out of guest mode */ }
/* * Handler for POSTED_INTERRUPT_NESTED_VECTOR. */ __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_ack_irq(); inc_irq_stat(kvm_posted_intr_nested_ipis); exiting_irq(); set_irq_regs(old_regs); }
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) { irq_enter(); generic_smp_call_function_single_interrupt(); inc_irq_stat(irq_call_count); irq_exit(); return IRQ_HANDLED; }
__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r) { ipi_entering_ack_irq(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); inc_irq_stat(irq_call_count); generic_smp_call_function_single_interrupt(); trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); exiting_irq(); }
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) { irq_enter(); irq_work_run(); inc_irq_stat(apic_irq_work_irqs); irq_exit(); return IRQ_HANDLED; }
asmlinkage void smp_threshold_interrupt(void) { irq_enter(); exit_idle(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); irq_exit(); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); }
asmlinkage void smp_threshold_interrupt(struct pt_regs *regs) { irq_enter(); msa_start_irq(THRESHOLD_APIC_VECTOR); exit_idle(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); msa_irq_exit(THRESHOLD_APIC_VECTOR, regs->cs != __KERNEL_CS); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_irq(); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); exiting_irq(); set_irq_regs(old_regs); }
dotraplinkage notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code) { nmi_enter(); inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); nmi_exit(); }
static int perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) { int handled = 0; handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs); handled += perf_ibs_handle_irq(&perf_ibs_op, regs); if (handled) inc_irq_stat(apic_perf_irqs); return handled; }
/* * Reschedule call back. Nothing to do, * all the work is done automatically when * we return from the interrupt. */ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); /* LITMUS^RT needs this interrupt to proper reschedule * on this cpu */ set_tsk_need_resched(current); inc_irq_stat(irq_resched_count); TS_SEND_RESCHED_END; /* * KVM uses this interrupt to force a cpu out of guest mode */ }
/* * Handler for X86_PLATFORM_IPI_VECTOR. */ __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_ack_irq(); trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR); inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) x86_platform_ipi_callback(); trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR); exiting_irq(); set_irq_regs(old_regs); }
asmlinkage #endif void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned int cpu; unsigned int sender; union smp_flush_state *f; cpu = smp_processor_id(); #ifdef CONFIG_X86_32 if (current->active_mm) load_user_cs_desc(cpu, current->active_mm); #endif /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &flush_state[sender]; if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); }
void hyperv_vector_handler(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); entering_irq(); inc_irq_stat(irq_hv_callback_count); if (vmbus_handler) vmbus_handler(); if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED) ack_APIC_irq(); exiting_irq(); set_irq_regs(old_regs); }
u32 native_safe_apic_wait_icr_idle(void) { u32 send_status; int timeout; timeout = 0; do { send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY; if (!send_status) break; inc_irq_stat(icr_read_retry_count); udelay(100); } while (timeout++ < 1000); return send_status; }
asmlinkage void smp_thermal_interrupt(void) { __u64 msr_val; ack_APIC_irq(); exit_idle(); irq_enter(); rdmsrl(MSR_IA32_THERM_STATUS, msr_val); if (therm_throt_process(msr_val & 1)) mce_log_therm_throt_event(smp_processor_id(), msr_val); inc_irq_stat(irq_thermal_count); irq_exit(); }
/* * Handler for POSTED_INTERRUPT_VECTOR. */ __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); irq_enter(); exit_idle(); inc_irq_stat(kvm_posted_intr_ipis); irq_exit(); set_irq_regs(old_regs); }
static int perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) { u64 stamp = sched_clock(); int handled = 0; handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs); handled += perf_ibs_handle_irq(&perf_ibs_op, regs); if (handled) inc_irq_stat(apic_perf_irqs); perf_sample_event_took(sched_clock() - stamp); return handled; }
static void chsc_subchannel_irq(struct subchannel *sch) { struct chsc_private *private = dev_get_drvdata(&sch->dev); struct chsc_request *request = private->request; struct irb *irb = this_cpu_ptr(&cio_irb); CHSC_LOG(4, "irb"); CHSC_LOG_HEX(4, irb, sizeof(*irb)); inc_irq_stat(IRQIO_CSC); /* Copy irb to provided request and set done. */ if (!request) { CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", sch->schid.ssid, sch->schid.sch_no); return; } private->request = NULL;
static void local_apic_timer_interrupt(void) { int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(lapic_events, cpu); if (!evt->event_handler) { pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu); lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt); return; } inc_irq_stat(apic_timer_irqs); evt->event_handler(evt); }