irqreturn_t smtc_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { int cpu = smp_processor_id(); int vpflags; if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); vpflags = dvpe(); clear_c0_cause(0x100<<7); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } return IRQ_HANDLED; }
asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs) { int cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; /* we keep interrupt disabled all the time */ local_timer_interrupt(irq, NULL, regs); irq_exit(cpu, irq); if (softirq_pending(cpu)) do_softirq(); }
irqreturn_t __weak timer_interrupt(int irq, void *dev_id) { /* ack timer interrupt and try to set next interrupt */ avr32_timer_ack(); /* * Call the generic timer interrupt handler */ write_seqlock(&xtime_lock); do_timer(1); write_sequnlock(&xtime_lock); /* * In UP mode, we call local_timer_interrupt() to do profiling * and process accounting. * * SMP is not supported yet. */ local_timer_interrupt(irq, dev_id); return IRQ_HANDLED; }
irqreturn_t sim_timer_interrupt(int irq, void *dev_id) { #ifdef CONFIG_SMP int cpu = smp_processor_id(); /* * CPU 0 handles the global timer interrupt job * resets count/compare registers to trigger next timer int. */ #ifndef CONFIG_MIPS_MT_SMTC if (cpu == 0) { timer_interrupt(irq, dev_id); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); } #else /* SMTC */ /* * In SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the simulation platform, so it's invoked from * the general MIPS timer_interrupt routine. * * We have a problem in that the interrupt vector code * had to turn off the timer IM bit to avoid redundant * entries, but we may never get to mips_cpu_irq_end * to turn it back on again if the scheduler gets * involved. So we clear the pending timer here, * and re-enable the mask... */ int vpflags = dvpe(); write_c0_compare (read_c0_count() - 1); clear_c0_cause(0x100 << cp0_compare_irq); set_c0_status(0x100 << cp0_compare_irq); irq_enable_hazard(); evpe(vpflags); if (cpu_data[cpu].vpe_id == 0) timer_interrupt(irq, dev_id); else write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); smtc_timer_broadcast(cpu_data[cpu].vpe_id); #endif /* CONFIG_MIPS_MT_SMTC */ /* * every CPU should do profiling and process accounting */ local_timer_interrupt (irq, dev_id); return IRQ_HANDLED; #else return timer_interrupt (irq, dev_id); #endif }
irqreturn_t mips_timer_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); #ifdef CONFIG_MIPS_MT_SMTC /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ int vpflags; /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ if (read_c0_cause() & (1 << 26)) perf_irq(); if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); /* * DVPE is necessary so long as cross-VPE interrupts * are done via read-modify-write of Cause register. */ vpflags = dvpe(); clear_c0_cause(CPUCTR_IMASKBIT); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); smtc_timer_broadcast(cpu_data[cpu].vpe_id); scroll_display_message(); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ if (!r2 || (read_c0_cause() & (1 << 26))) if (perf_irq()) goto out; /* we keep interrupt disabled all the time */ if (!r2 || (read_c0_cause() & (1 << 30))) timer_interrupt(irq, NULL); scroll_display_message(); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); /* * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } out: #endif /* CONFIG_MIPS_MT_SMTC */ return IRQ_HANDLED; }
static void ipi_timer(void) { irq_enter(); local_timer_interrupt(); irq_exit(); }
/* * ipi_interrupt() * Handle an Interprocessor Interrupt. */ static irqreturn_t ipi_interrupt(int irq, void *dev_id) { int cpuid = smp_processor_id(); struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid); unsigned long ops; /* * Count this now; we may make a call that never returns. */ p->ipi_count++; /* * We are about to process all ops. If another cpu has stated * that we need an IPI, we will have already processed it. By * clearing our smp_needs_ipi, and processing all ops, * we reduce the number of IPI interrupts. However, this introduces * the possibility that smp_needs_ipi will be clear and the soft irq * will have gone off; so we need to make the get_affinity() path * tolerant of spurious interrupts. */ spin_lock(&smp_ipi_lock); smp_needs_ipi &= ~(1 << p->tid); spin_unlock(&smp_ipi_lock); for (;;) { /* * Read the set of IPI commands we should handle. */ spinlock_t *lock = &per_cpu(ipi_lock, cpuid); spin_lock(lock); ops = p->ipi_pending; p->ipi_pending = 0; spin_unlock(lock); /* * If we have no IPI commands to execute, break out. */ if (!ops) { break; } /* * Execute the set of commands in the ops word, one command * at a time in no particular order. Strip of each command * as we execute it. */ while (ops) { unsigned long which = ffz(~ops); ops &= ~(1 << which); BUG_ON(!irqs_disabled()); switch (which) { case IPI_NOP: smp_debug(100, KERN_INFO "cpu[%d]: " "IPI_NOP\n", cpuid); break; case IPI_RESCHEDULE: /* * Reschedule callback. Everything to be * done is done by the interrupt return path. */ smp_debug(200, KERN_INFO "cpu[%d]: " "IPI_RESCHEDULE\n", cpuid); break; case IPI_CALL_FUNC: smp_debug(100, KERN_INFO "cpu[%d]: " "IPI_CALL_FUNC\n", cpuid); generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: smp_debug(100, KERN_INFO "cpu[%d]: " "IPI_CALL_FUNC_SINGLE\n", cpuid); generic_smp_call_function_single_interrupt(); break; case IPI_CPU_STOP: smp_debug(100, KERN_INFO "cpu[%d]: " "IPI_CPU_STOP\n", cpuid); smp_halt_processor(); break; #if !defined(CONFIG_LOCAL_TIMERS) case IPI_CPU_TIMER: smp_debug(100, KERN_INFO "cpu[%d]: " "IPI_CPU_TIMER\n", cpuid); #if defined(CONFIG_GENERIC_CLOCKEVENTS) local_timer_interrupt(); #else update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); #endif #endif break; default: printk(KERN_CRIT "cpu[%d]: " "Unknown IPI: %lu\n", cpuid, which); return IRQ_NONE; } } } return IRQ_HANDLED; }
/* * high-level timer interrupt service routines. This function * is set as irqaction->handler and is invoked through do_IRQ. */ void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { if (mips_cpu.options & MIPS_CPU_COUNTER) { unsigned int count; /* * The cycle counter is only 32 bit which is good for about * a minute at current count rates of upto 150MHz or so. */ count = read_32bit_cp0_register(CP0_COUNT); timerhi += (count < timerlo); /* Wrap around */ timerlo = count; /* * set up for next timer interrupt - no harm if the machine * is using another timer interrupt source. * Note that writing to COMPARE register clears the interrupt */ write_32bit_cp0_register (CP0_COMPARE, count + cycles_per_jiffy); } /* * call the generic timer interrupt handling */ do_timer(regs); /* * If we have an externally synchronized Linux clock, then update * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ read_lock (&xtime_lock); if ((time_status & STA_UNSYNC) == 0 && xtime.tv_sec > last_rtc_update + 660 && xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 && xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) { if (rtc_set_time(xtime.tv_sec) == 0) { last_rtc_update = xtime.tv_sec; } else { last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ } } read_unlock (&xtime_lock); /* * If jiffies has overflowed in this timer_interrupt we must * update the timer[hi]/[lo] to make fast gettimeoffset funcs * quotient calc still valid. -arca */ if (!jiffies) { timerhi = timerlo = 0; } #if !defined(CONFIG_SMP) /* * In UP mode, we call local_timer_interrupt() to do profiling * and process accouting. * * In SMP mode, local_timer_interrupt() is invoked by appropriate * low-level local timer interrupt handler. */ local_timer_interrupt(0, NULL, regs); #else /* CONFIG_SMP */ if (emulate_local_timer_interrupt) { /* * this is the place where we send out inter-process * interrupts and let each CPU do its own profiling * and process accouting. * * Obviously we need to call local_timer_interrupt() for * the current CPU too. */ panic("Not implemented yet!!!"); } #endif /* CONFIG_SMP */ }