static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == time_keeper_id) xtime_update(1); local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; }
void pcpu_initclock(void) { PCPU_SET(clockadj, 0); PCPU_SET(clock, ia64_get_itc()); ia64_set_itm(PCPU_GET(clock) + ia64_clock_reload); ia64_set_itv(CLOCK_VECTOR); /* highest priority class */ ia64_srlz_d(); }
/* * Encapsulate access to the itm structure for SMP. */ void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); unsigned long shift = 0, delta; /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); delta = local_cpu_data->itm_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: */ if (cpu) { unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } local_cpu_data->itm_next = ia64_get_itc() + delta + shift; ia64_set_itm(local_cpu_data->itm_next); }
static void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; u64 vector; cpu = get_cpu(); local_irq_save(flags); /* * Schedule a timer interrupt, wait until it's reported, and see if * SAL_CACHE_FLUSH drops it. */ ia64_set_itv(IA64_TIMER_VECTOR); ia64_set_itm(ia64_get_itc() + 1000); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); ia64_sal_cache_flush(3); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); ia64_eoi(); WARN_ON(vector != IA64_TIMER_VECTOR); } else { sal_cache_flush_drops_interrupts = 1; printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " "PAL_CACHE_FLUSH will be used instead\n"); ia64_eoi(); } local_irq_restore(flags); put_cpu(); }
static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (unlikely(cpu_is_offline(smp_processor_id()))) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); if (paravirt_do_steal_accounting(&new_itm)) goto skip_process_time_accounting; while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == time_keeper_id) { /* * Here we are in the timer irq handler. We have irqs locally * disabled, but we don't know if the timer_bh is running on * another CPU. We need to avoid to SMP race by acquiring the * xtime_lock. */ write_seqlock(&xtime_lock); do_timer(1); local_cpu_data->itm_next = new_itm; write_sequnlock(&xtime_lock); } else local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } skip_process_time_accounting: do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * do_timer() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; }
static irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) { unsigned long new_itm; if (unlikely(cpu_is_offline(smp_processor_id()))) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id, regs); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING, regs); while (1) { #ifdef CONFIG_SMP /* * For UP, this is done in do_timer(). Weird, but * fixing that would require updates to all * platforms. */ update_process_times(user_mode(regs)); #endif new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == TIME_KEEPER_ID) { /* * Here we are in the timer irq handler. We have irqs locally * disabled, but we don't know if the timer_bh is running on * another CPU. We need to avoid to SMP race by acquiring the * xtime_lock. */ write_seqlock(&xtime_lock); do_timer(regs); local_cpu_data->itm_next = new_itm; write_sequnlock(&xtime_lock); } else local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; } do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * do_timer() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; }
void interrupt(u_int64_t vector, struct trapframe *framep) { struct thread *td; volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; td = curthread; atomic_add_int(&td->td_intr_nesting_level, 1); /* * Handle ExtINT interrupts by generating an INTA cycle to * read the vector. */ if (vector == 0) { vector = ib->ib_inta; printf("ExtINT interrupt: vector=%ld\n", vector); } if (vector == 255) {/* clock interrupt */ /* CTR0(KTR_INTR, "clock interrupt"); */ cnt.v_intr++; #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; #else intrcnt[INTRCNT_CLOCK]++; #endif critical_enter(); #ifdef SMP clks[PCPU_GET(cpuid)]++; /* Only the BSP runs the real clock */ if (PCPU_GET(cpuid) == 0) { #endif handleclock(framep); /* divide hz (1024) by 8 to get stathz (128) */ if ((++schedclk2 & 0x7) == 0) statclock((struct clockframe *)framep); #ifdef SMP } else { ia64_set_itm(ia64_get_itc() + itm_reload); mtx_lock_spin(&sched_lock); hardclock_process(curthread, TRAPF_USERMODE(framep)); if ((schedclk2 & 0x7) == 0) statclock_process(curkse, TRAPF_PC(framep), TRAPF_USERMODE(framep)); mtx_unlock_spin(&sched_lock); } #endif critical_exit(); #ifdef SMP } else if (vector == ipi_vector[IPI_AST]) { asts[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { rdvs[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); } else if (vector == ipi_vector[IPI_STOP]) { u_int32_t mybit = PCPU_GET(cpumask); CTR1(KTR_SMP, "IPI_STOP, cpuid=%d", PCPU_GET(cpuid)); savectx(PCPU_GET(pcb)); stopped_cpus |= mybit; while ((started_cpus & mybit) == 0) /* spin */; started_cpus &= ~mybit; stopped_cpus &= ~mybit; if (PCPU_GET(cpuid) == 0 && cpustop_restartfunc != NULL) { void (*f)(void) = cpustop_restartfunc; cpustop_restartfunc = NULL; (*f)(); } } else if (vector == ipi_vector[IPI_TEST]) { CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid)); mp_ipi_test++; #endif } else { ints[PCPU_GET(cpuid)]++; ia64_dispatch_intr(framep, vector); } atomic_subtract_int(&td->td_intr_nesting_level, 1); }