static void ip27_do_irq_mask0(struct irq_desc *desc) { cpuid_t cpu = smp_processor_id(); unsigned long *mask = per_cpu(irq_enable_mask, cpu); u64 pend0; /* copied from Irix intpend0() */ pend0 = LOCAL_HUB_L(PI_INT_PEND0); pend0 &= mask[0]; /* Pick intrs we should look at */ if (!pend0) return; #ifdef CONFIG_SMP if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); scheduler_ipi(); } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); generic_smp_call_function_interrupt(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); generic_smp_call_function_interrupt(); } else #endif generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE); LOCAL_HUB_L(PI_INT_PEND0); }
void titan_mailbox_irq(void) { int cpu = smp_processor_id(); unsigned long status; switch (cpu) { case 0: status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); if (status & 0x4) scheduler_ipi(); break; case 1: status = OCD_READ(RM9000x2_OCD_INTP1STATUS3); OCD_WRITE(RM9000x2_OCD_INTP1CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); if (status & 0x4) scheduler_ipi(); break; } }
irqreturn_t smp_ipi_demux(void) { struct cpu_messages *info = &__get_cpu_var(ipi_message); unsigned int all; mb(); /* order any irq clear */ do { all = xchg_local(&info->messages, 0); #ifdef __BIG_ENDIAN if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) generic_smp_call_function_interrupt(); if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) scheduler_ipi(); if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) generic_smp_call_function_single_interrupt(); if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) debug_ipi_action(0, NULL); #else #error Unsupported ENDIAN #endif } while (info->messages); return IRQ_HANDLED; }
/* * Main handler for inter-processor interrupts */ void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } switch (ipinr) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu); irq_exit(); break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); tick_receive_broadcast(); irq_exit(); break; #endif #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: irq_enter(); irq_work_run(); irq_exit(); break; #endif #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL case IPI_WAKEUP: WARN_ONCE(!acpi_parking_protocol_valid(cpu), "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", cpu); break; #endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); }
/* * Reschedule call back. */ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) { inc_irq_stat(irq_resched_count); scheduler_ipi(); return IRQ_HANDLED; }
/* sync-free variant. Callers should ensure synchronization */ irqreturn_t smp_ipi_demux_relaxed(void) { struct cpu_messages *info; unsigned long all; info = this_cpu_ptr(&ipi_message); do { all = xchg(&info->messages, 0); #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) /* * Must check for PPC_MSG_RM_HOST_ACTION messages * before PPC_MSG_CALL_FUNCTION messages because when * a VM is destroyed, we call kick_all_cpus_sync() * to ensure that any pending PPC_MSG_RM_HOST_ACTION * messages have completed before we free any VCPUs. */ if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) kvmppc_xics_ipi_action(); #endif if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) scheduler_ipi(); if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) tick_broadcast_ipi_handler(); #ifdef CONFIG_NMI_IPI if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) nmi_ipi_action(0, NULL); #endif } while (info->messages); return IRQ_HANDLED; }
void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); set_irq_regs(old_regs); }
void smp_resched_interrupt(void) { irq_enter(); scheduler_ipi(); local_cpu_data().irq_resched_count++; irq_exit(); /* */ }
void smp_resched_interrupt(void) { irq_enter(); scheduler_ipi(); local_cpu_data().irq_resched_count++; irq_exit(); /* re-schedule routine called by interrupt return code. */ }
/* * Main handler for inter-processor interrupts */ void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { trace_ipi_entry(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } switch (ipinr) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); break; case IPI_CALL_FUNC_SINGLE: irq_enter(); generic_smp_call_function_single_interrupt(); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu); irq_exit(); break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); tick_receive_broadcast(); irq_exit(); break; #endif #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: irq_enter(); irq_work_run(); irq_exit(); break; #endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } if ((unsigned)ipinr < NR_IPI) trace_ipi_exit(ipi_types[ipinr]); set_irq_regs(old_regs); }
/* * Reschedule call back. */ void smp_reschedule_interrupt(struct pt_regs *regs) { //l4/ack_APIC_irq(); inc_irq_stat(irq_resched_count); scheduler_ipi(); /* * KVM uses this interrupt to force a cpu out of guest mode */ }
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { #ifdef CONFIG_MIPS_VPE_APSP_API_CMP if (aprp_hook) aprp_hook(); #endif scheduler_ipi(); return IRQ_HANDLED; }
/* * Reschedule call back. KVM uses this interrupt to force a cpu out of * guest mode */ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); kvm_set_cpu_l1tf_flush_l1d(); if (trace_resched_ipi_enabled()) { /* * scheduler_ipi() might call irq_enter() as well, but * nested calls are fine. */ irq_enter(); trace_reschedule_entry(RESCHEDULE_VECTOR); scheduler_ipi(); trace_reschedule_exit(RESCHEDULE_VECTOR); irq_exit(); return; } scheduler_ipi(); }
/* * Main handler for inter-processor interrupts */ void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); exynos_ss_irq(ipinr, handle_IPI, irqs_disabled(), ESS_FLAG_IN); switch (ipinr) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); break; case IPI_CALL_FUNC_SINGLE: irq_enter(); generic_smp_call_function_single_interrupt(); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu, regs); irq_exit(); break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); tick_receive_broadcast(); irq_exit(); break; #endif case IPI_WAKEUP: break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } exynos_ss_irq(ipinr, handle_IPI, irqs_disabled(), ESS_FLAG_OUT); set_irq_regs(old_regs); }
/* * Main handler for inter-processor interrupts */ void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { trace_ipi_entry(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } switch (ipinr) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); mt_trace_ISR_start(ipinr); generic_smp_call_function_interrupt(); mt_trace_ISR_end(ipinr); irq_exit(); break; case IPI_CALL_FUNC_SINGLE: irq_enter(); mt_trace_ISR_start(ipinr); generic_smp_call_function_single_interrupt(); mt_trace_ISR_end(ipinr); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); mt_trace_ISR_start(ipinr); ipi_cpu_stop(cpu); mt_trace_ISR_end(ipinr); irq_exit(); break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } if ((unsigned)ipinr < NR_IPI) trace_ipi_exit(ipi_types[ipinr]); set_irq_regs(old_regs); }
void titan_mailbox_irq(void) { int cpu = smp_processor_id(); unsigned long status; switch (cpu) { case 0: status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); <<<<<<< HEAD if (status & 0x4) scheduler_ipi(); ======= >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a break;
static void do_ext_call_interrupt(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { unsigned long bits; kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++; /* * handle bit signal external calls */ bits = xchg(&S390_lowcore.ext_call_fast, 0); if (test_bit(ec_schedule, &bits)) scheduler_ipi(); if (test_bit(ec_call_function, &bits)) generic_smp_call_function_interrupt(); if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); }
static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; }
irqreturn_t smp_ipi_demux(void) { struct cpu_messages *info = &__get_cpu_var(ipi_message); unsigned int all; mb(); /* order any irq clear */ do { all = xchg(&info->messages, 0); if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) scheduler_ipi(); if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE)) generic_smp_call_function_single_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) debug_ipi_action(0, NULL); } while (info->messages); return IRQ_HANDLED; }
irqreturn_t smp_ipi_demux(void) { struct cpu_messages *info = this_cpu_ptr(&ipi_message); unsigned int all; mb(); /* order any irq clear */ do { all = xchg(&info->messages, 0); if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) scheduler_ipi(); if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) tick_broadcast_ipi_handler(); if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) debug_ipi_action(0, NULL); } while (info->messages); return IRQ_HANDLED; }
void smp_message_recv(unsigned int msg) { switch (msg) { case SMP_MSG_FUNCTION: generic_smp_call_function_interrupt(); break; case SMP_MSG_RESCHEDULE: scheduler_ipi(); break; case SMP_MSG_FUNCTION_SINGLE: generic_smp_call_function_single_interrupt(); break; case SMP_MSG_TIMER: ipi_timer(); break; default: printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", smp_processor_id(), __func__, msg); break; } }
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) { struct ipi_data *bfin_ipi_data; unsigned int cpu = smp_processor_id(); unsigned long pending; unsigned long msg; platform_clear_ipi(cpu, IRQ_SUPPLE_1); bfin_ipi_data = &__get_cpu_var(bfin_ipi); smp_mb(); while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { msg = 0; do { msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); switch (msg) { case BFIN_IPI_TIMER: ipi_timer(); break; case BFIN_IPI_RESCHEDULE: scheduler_ipi(); break; case BFIN_IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case BFIN_IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; case BFIN_IPI_CPU_STOP: ipi_cpu_stop(cpu); break; } } while (msg < BITS_PER_LONG); smp_mb(); } return IRQ_HANDLED; }
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) { struct ipi_data *bfin_ipi_data; unsigned int cpu = smp_processor_id(); unsigned long pending; unsigned long msg; platform_clear_ipi(cpu, IRQ_SUPPLE_1); smp_rmb(); bfin_ipi_data = this_cpu_ptr(&bfin_ipi); while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) { msg = 0; do { msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); switch (msg) { case BFIN_IPI_TIMER: ipi_timer(); break; case BFIN_IPI_RESCHEDULE: scheduler_ipi(); break; case BFIN_IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case BFIN_IPI_CPU_STOP: ipi_cpu_stop(cpu); break; default: goto out; } atomic_dec(&bfin_ipi_data->count); } while (msg < BITS_PER_LONG); } out: return IRQ_HANDLED; }
void handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); switch (ipinr) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); BUG(); break; } }
static irqreturn_t xen_resched_handler(int irq, void *dev_id) { scheduler_ipi(); return IRQ_HANDLED; }
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) { scheduler_ipi(); return IRQ_HANDLED; }
static irqreturn_t reschedule_action(int irq, void *data) { scheduler_ipi(); return IRQ_HANDLED; }
irqreturn_t __irq_entry ipi_interrupt(int irq, void *dev_id) { int this_cpu = smp_processor_id(); struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); unsigned long ops; unsigned long flags; /* Count this now; we may make a call that never returns. */ p->ipi_count++; mb(); /* Order interrupt and bit testing. */ for (;;) { spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); spin_lock_irqsave(lock, flags); ops = p->pending_ipi; p->pending_ipi = 0; spin_unlock_irqrestore(lock, flags); mb(); /* Order bit clearing and data access. */ if (!ops) break; while (ops) { unsigned long which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_NOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); break; case IPI_RESCHEDULE: smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); scheduler_ipi(); break; case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); generic_smp_call_function_single_interrupt(); break; case IPI_CPU_START: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break; case IPI_CPU_STOP: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); halt_processor(); break; case IPI_CPU_TEST: smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); return IRQ_NONE; } /* Switch */ /* let in any pending interrupts */ local_irq_enable(); local_irq_disable(); } /* while (ops) */ } return IRQ_HANDLED; }
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id) { scheduler_ipi(); return IRQ_HANDLED; }
/* * Reschedule call back. */ static inline void __smp_reschedule_interrupt(void) { inc_irq_stat(irq_resched_count); scheduler_ipi(); }