/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_mt_cpu_irq_ack(unsigned int irq) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); evpe(vpflags); mask_mips_mt_irq(irq); }
irqreturn_t smtc_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { int cpu = smp_processor_id(); int vpflags; if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); vpflags = dvpe(); clear_c0_cause(0x100<<7); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } return IRQ_HANDLED; }
/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_mt_cpu_irq_ack(struct irq_data *d) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); mask_mips_irq(d); }
/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_mt_cpu_irq_ack(unsigned int irq) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); mask_mips_mt_irq(irq); }
static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); unmask_mips_irq(d); return 0; }
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); evpe(vpflags); mips_mt_cpu_irq_enable(irq); return 0; }
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) { unsigned int vpflags = dvpe(); clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); unmask_mips_mt_irq(irq); return 0; }
void _interrupt_sp(void) { unsigned long flags; local_irq_save(flags); dvpe(); settc(1); write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0); evpe(EVPE_ENABLE); local_irq_restore(flags); }
static inline void mask_mips_mt_irq(unsigned int irq) { unsigned int vpflags = dvpe(); int cpu_irq = 0; if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1)) cpu_irq = 1; clear_c0_status(0x100 << cpu_irq); irq_disable_hazard(); evpe(vpflags); }
/* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ static void mips_mt_cpu_irq_ack(unsigned int irq) { unsigned int vpflags = dvpe(); int cpu_irq = 0; if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1)) cpu_irq = 1; clear_c0_cause(0x100 << cpu_irq); evpe(vpflags); mask_mips_mt_irq(irq); }
/* Interrupt handler may be called before rtlx_init has otherwise had a chance to run. */ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) { int i; unsigned int flags, vpeflags; /* Ought not to be strictly necessary for SMTC builds */ local_irq_save(flags); vpeflags = dvpe(); set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); irq_enable_hazard(); evpe(vpeflags); local_irq_restore(flags); for (i = 0; i < RTLX_CHANNELS; i++) { wake_up(&channel_wqs[i].lx_queue); wake_up(&channel_wqs[i].rt_queue); } return IRQ_HANDLED; }
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) { unsigned int vpflags = dvpe(); int cpu_irq = 0; if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1)) cpu_irq = 1; VPint(CR_INTC_IMR) |= (1 << (irq-1)); if (irq == SI_SWINT_INT0) VPint(CR_INTC_IMR) |= (1 << (SI_SWINT1_INT0-1)); else if (irq == SI_SWINT_INT1) VPint(CR_INTC_IMR) |= (1 << (SI_SWINT1_INT1-1)); clear_c0_cause(0x100 << cpu_irq); evpe(vpflags); unmask_mips_mt_irq(irq); return 0; }
/* * Interrupt handler may be called before rtlx_init has otherwise had * a chance to run. */ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) { unsigned int vpeflags; unsigned long flags; int i; local_irq_save(flags); vpeflags = dvpe(); set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); irq_enable_hazard(); evpe(vpeflags); local_irq_restore(flags); for (i = 0; i < RTLX_CHANNELS; i++) { wake_up(&channel_wqs[i].lx_queue); wake_up(&channel_wqs[i].rt_queue); } return IRQ_HANDLED; }
static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int cnt; int res; #ifdef CONFIG_MIPS_MT_SMTC { unsigned long flags, vpflags; local_irq_save(flags); vpflags = dvpe(); #endif cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; #ifdef CONFIG_MIPS_MT_SMTC evpe(vpflags); local_irq_restore(flags); } #endif return res; }
irqreturn_t sim_timer_interrupt(int irq, void *dev_id) { #ifdef CONFIG_SMP int cpu = smp_processor_id(); /* * CPU 0 handles the global timer interrupt job * resets count/compare registers to trigger next timer int. */ #ifndef CONFIG_MIPS_MT_SMTC if (cpu == 0) { timer_interrupt(irq, dev_id); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); } #else /* SMTC */ /* * In SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the simulation platform, so it's invoked from * the general MIPS timer_interrupt routine. * * We have a problem in that the interrupt vector code * had to turn off the timer IM bit to avoid redundant * entries, but we may never get to mips_cpu_irq_end * to turn it back on again if the scheduler gets * involved. So we clear the pending timer here, * and re-enable the mask... */ int vpflags = dvpe(); write_c0_compare (read_c0_count() - 1); clear_c0_cause(0x100 << cp0_compare_irq); set_c0_status(0x100 << cp0_compare_irq); irq_enable_hazard(); evpe(vpflags); if (cpu_data[cpu].vpe_id == 0) timer_interrupt(irq, dev_id); else write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); smtc_timer_broadcast(cpu_data[cpu].vpe_id); #endif /* CONFIG_MIPS_MT_SMTC */ /* * every CPU should do profiling and process accounting */ local_timer_interrupt (irq, dev_id); return IRQ_HANDLED; #else return timer_interrupt (irq, dev_id); #endif }
irqreturn_t mips_timer_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); #ifdef CONFIG_MIPS_MT_SMTC /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ int vpflags; /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ if (read_c0_cause() & (1 << 26)) perf_irq(); if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); /* * DVPE is necessary so long as cross-VPE interrupts * are done via read-modify-write of Cause register. */ vpflags = dvpe(); clear_c0_cause(CPUCTR_IMASKBIT); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); smtc_timer_broadcast(cpu_data[cpu].vpe_id); scroll_display_message(); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ if (!r2 || (read_c0_cause() & (1 << 26))) if (perf_irq()) goto out; /* we keep interrupt disabled all the time */ if (!r2 || (read_c0_cause() & (1 << 30))) timer_interrupt(irq, NULL); scroll_display_message(); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); /* * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } out: #endif /* CONFIG_MIPS_MT_SMTC */ return IRQ_HANDLED; }