/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_cpu_irq_ack(unsigned int irq) { /* Only necessary for soft interrupts */ clear_c0_cause(1 << (irq - mips_cpu_irq_base + 8)); mask_mips_irq(irq); }
/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_mt_cpu_irq_ack(struct irq_data *d) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); mask_mips_irq(d); }
static void mips_cpu_irq_disable(unsigned int irq) { unsigned long flags; local_irq_save(flags); mask_mips_irq(irq); local_irq_restore(flags); }
/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_cpu_irq_ack(unsigned int irq) { mask_mips_irq(irq); }