/* * Remove an irq from the disabled mask. If we're in an interrupt * context, defer enabling the HW interrupt until we leave. */ void enable_percpu_irq(unsigned int irq) { get_cpu_var(irq_disable_mask) &= ~(1UL << irq); if (__get_cpu_var(irq_depth) == 0) unmask_irqs(1UL << irq); put_cpu_var(irq_disable_mask); }
/* * Remove an irq from the disabled mask. If we're in an interrupt * context, defer enabling the HW interrupt until we leave. */ static void tile_irq_chip_enable(struct irq_data *d) { get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); if (__get_cpu_var(irq_depth) == 0) unmask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); }
void __cpuinit setup_irq_regs(void) { /* Enable interrupt delivery. */ unmask_irqs(~0UL); #if CHIP_HAS_IPI() arch_local_irq_unmask(INT_IPI_K); #endif }
/* * The interrupt handling path, implemented in terms of HV interrupt * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. */ void tile_dev_intr(struct pt_regs *regs, int intnum) { int depth = __get_cpu_var(irq_depth)++; unsigned long original_irqs; unsigned long remaining_irqs; struct pt_regs *old_regs; #if CHIP_HAS_IPI() /* * Pending interrupts are listed in an SPR. We might be * nested, so be sure to only handle irqs that weren't already * masked by a previous interrupt. Then, mask out the ones * we're going to handle. */ unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); #else /* * Hypervisor performs the equivalent of the Gx code above and * then puts the pending interrupt mask into a system save reg * for us to find. */ original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); #endif remaining_irqs = original_irqs; /* Track time spent here in an interrupt context. */ old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: less than 1/8th stack free? */ { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("tile_dev_intr: " "stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (remaining_irqs) { unsigned long irq = __ffs(remaining_irqs); remaining_irqs &= ~(1UL << irq); /* Count device irqs; Linux IPIs are counted elsewhere. */ if (irq != IRQ_RESCHEDULE) __get_cpu_var(irq_stat).irq_dev_intr_count++; generic_handle_irq(irq); } /* * If we weren't nested, turn on all enabled interrupts, * including any that were reenabled during interrupt * handling. */ if (depth == 0) unmask_irqs(~__get_cpu_var(irq_disable_mask)); __get_cpu_var(irq_depth)--; /* * Track time spent against the current process again and * process any softirqs if they are waiting. */ irq_exit(); set_irq_regs(old_regs); }
/* * For per-cpu interrupts, we need to avoid unmasking any interrupts * that we disabled via disable_percpu_irq(). */ static void tile_irq_chip_eoi(struct irq_data *d) { if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) unmask_irqs(1UL << d->irq); }
/* Unmask an interrupt. */ static void tile_irq_chip_unmask(struct irq_data *d) { unmask_irqs(1UL << d->irq); }
/* * For per-cpu interrupts, we need to avoid unmasking any interrupts * that we disabled via disable_percpu_irq(). */ static void tile_irq_chip_eoi(unsigned int irq) { if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) unmask_irqs(1UL << irq); }
/* Unmask an interrupt. */ static void tile_irq_chip_unmask(unsigned int irq) { unmask_irqs(1UL << irq); }