void hv_message_intr(struct pt_regs *regs, int intnum) { /* * We enter with interrupts disabled and leave them disabled, * to match expectations of called functions (e.g. * do_ccupdate_local() in mm/slab.c). This is also consistent * with normal call entry for device interrupts. */ int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; HV_RcvMsgInfo rmi; int nmsgs = 0; /* Track time spent here in an interrupt context */ struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: less than 1/8th stack free? */ { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("hv_message_intr: " "stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (1) { HV_MsgState *state = this_cpu_ptr(&msg_state); rmi = hv_receive_message(*state, (HV_VirtAddr) message, sizeof(message)); if (rmi.msglen == 0) break; if (rmi.msglen < 0) panic("hv_receive_message failed: %d", rmi.msglen); ++nmsgs; if (rmi.source == HV_MSG_TILE) { int tag; /* we just send tags for now */ BUG_ON(rmi.msglen != sizeof(int)); tag = message[0]; #ifdef CONFIG_SMP evaluate_message(message[0]); #else panic("Received IPI message %d in UP mode", tag); #endif } else if (rmi.source == HV_MSG_INTR) { HV_IntrMsg *him = (HV_IntrMsg *)message; struct hv_driver_cb *cb = (struct hv_driver_cb *)him->intarg; cb->callback(cb, him->intdata); __get_cpu_var(irq_stat).irq_hv_msg_count++; } } /* * We shouldn't have gotten a message downcall with no * messages available. */ if (nmsgs == 0) panic("Message downcall invoked with no messages!"); /* * Track time spent against the current process again and * process any softirqs if they are waiting. */ irq_exit(); set_irq_regs(old_regs); }
/* * The interrupt handling path, implemented in terms of HV interrupt * emulation on TILEPro, and IPI hardware on TILE-Gx. * Entered with interrupts disabled. */ void tile_dev_intr(struct pt_regs *regs, int intnum) { int depth = __get_cpu_var(irq_depth)++; unsigned long original_irqs; unsigned long remaining_irqs; struct pt_regs *old_regs; #if CHIP_HAS_IPI() /* * Pending interrupts are listed in an SPR. We might be * nested, so be sure to only handle irqs that weren't already * masked by a previous interrupt. Then, mask out the ones * we're going to handle. */ unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); #else /* * Hypervisor performs the equivalent of the Gx code above and * then puts the pending interrupt mask into a system save reg * for us to find. */ original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); #endif remaining_irqs = original_irqs; /* Track time spent here in an interrupt context. */ old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: less than 1/8th stack free? */ { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("tile_dev_intr: " "stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (remaining_irqs) { unsigned long irq = __ffs(remaining_irqs); remaining_irqs &= ~(1UL << irq); /* Count device irqs; Linux IPIs are counted elsewhere. */ if (irq != IRQ_RESCHEDULE) __get_cpu_var(irq_stat).irq_dev_intr_count++; generic_handle_irq(irq); } /* * If we weren't nested, turn on all enabled interrupts, * including any that were reenabled during interrupt * handling. */ if (depth == 0) unmask_irqs(~__get_cpu_var(irq_disable_mask)); __get_cpu_var(irq_depth)--; /* * Track time spent against the current process again and * process any softirqs if they are waiting. */ irq_exit(); set_irq_regs(old_regs); }
/* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static unsigned char count; static long last_time; if (time_after(jiffies, last_time + 5 * HZ)) count = 0; if (++count < 5) { last_time = jiffies; printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); }
void hv_message_intr(struct pt_regs *regs, int intnum) { int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; HV_RcvMsgInfo rmi; int nmsgs = 0; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("hv_message_intr: " "stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (1) { rmi = hv_receive_message(__get_cpu_var(msg_state), (HV_VirtAddr) message, sizeof(message)); if (rmi.msglen == 0) break; if (rmi.msglen < 0) panic("hv_receive_message failed: %d", rmi.msglen); ++nmsgs; if (rmi.source == HV_MSG_TILE) { int tag; BUG_ON(rmi.msglen != sizeof(int)); tag = message[0]; #ifdef CONFIG_SMP evaluate_message(message[0]); #else panic("Received IPI message %d in UP mode", tag); #endif } else if (rmi.source == HV_MSG_INTR) { HV_IntrMsg *him = (HV_IntrMsg *)message; struct hv_driver_cb *cb = (struct hv_driver_cb *)him->intarg; cb->callback(cb, him->intdata); __get_cpu_var(irq_stat).irq_hv_msg_count++; } } if (nmsgs == 0) panic("Message downcall invoked with no messages!"); irq_exit(); set_irq_regs(old_regs); }