static inline irqreturn_t timer_interrupt(int irq, void *dev_id) { struct pt_regs *regs = get_irq_regs(); /* acknowledge the timer irq */ #ifdef USE_CASCADE_TIMERS *R_TIMER_CTRL = IO_FIELD( R_TIMER_CTRL, timerdiv1, 0) | IO_FIELD( R_TIMER_CTRL, timerdiv0, 0) | IO_STATE( R_TIMER_CTRL, i1, clr) | IO_STATE( R_TIMER_CTRL, tm1, run) | IO_STATE( R_TIMER_CTRL, clksel1, cascade0) | IO_STATE( R_TIMER_CTRL, i0, clr) | IO_STATE( R_TIMER_CTRL, tm0, run) | IO_STATE( R_TIMER_CTRL, clksel0, c6250kHz); #else *R_TIMER_CTRL = r_timer_ctrl_shadow | IO_STATE(R_TIMER_CTRL, i0, clr); #endif /* reset watchdog otherwise it resets us! */ reset_watchdog(); /* Update statistics. */ update_process_times(user_mode(regs)); /* call the real timer interrupt handler */ xtime_update(1); cris_do_profile(regs); /* Save profiling information */ return IRQ_HANDLED; }
irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long next; next = get_linux_timer(); again: while ((signed long)(get_ccount() - next) > 0) { profile_tick(CPU_PROFILING); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif xtime_update(1); /* */ /* */ next += CCOUNT_PER_JIFFY; set_linux_timer(next); } /* */ platform_heartbeat(); /* */ if ((signed long)(get_ccount() - next) > 0) goto again; return IRQ_HANDLED; }
/* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { xtime_update(1); update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); #ifdef CONFIG_HEARTBEAT /* use power LED as a heartbeat instead -- much more useful for debugging -- based on the version for PReP by Cort */ /* acts like an actual heart beat -- ie thump-thump-pause... */ if (mach_heartbeat) { static unsigned cnt = 0, period = 0, dist = 0; if (cnt == 0 || cnt == dist) mach_heartbeat( 1 ); else if (cnt == 7 || cnt == dist+7) mach_heartbeat( 0 ); if (++cnt > period) { cnt = 0; /* The hyperbolic function below modifies the heartbeat period * length in dependency of the current (5min) load. It goes * through the points f(0)=126, f(1)=86, f(5)=51, * f(inf)->30. */ period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30; dist = period / 4; } } #endif /* CONFIG_HEARTBEAT */ return IRQ_HANDLED; }
irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long next; next = get_linux_timer(); again: while ((signed long)(get_ccount() - next) > 0) { profile_tick(CPU_PROFILING); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif xtime_update(1); /* Linux handler in kernel/time/timekeeping */ /* Note that writing CCOMPARE clears the interrupt. */ next += CCOUNT_PER_JIFFY; set_linux_timer(next); } /* Allow platform to do something useful (Wdog). */ platform_heartbeat(); /* Make sure we didn't miss any tick... */ if ((signed long)(get_ccount() - next) > 0) goto again; return IRQ_HANDLED; }
static irqreturn_t timer_interrupt(int irq, void *dummy) { xtime_update(1); update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); #ifdef CONFIG_HEARTBEAT /* */ /* */ if (mach_heartbeat) { static unsigned cnt = 0, period = 0, dist = 0; if (cnt == 0 || cnt == dist) mach_heartbeat( 1 ); else if (cnt == 7 || cnt == dist+7) mach_heartbeat( 0 ); if (++cnt > period) { cnt = 0; /* */ period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30; dist = period / 4; } } #endif /* */ return IRQ_HANDLED; }
/* * We keep time on PA-RISC Linux by using the Interval Timer which is * a pair of registers; one is read-only and one is write-only; both * accessed through CR16. The read-only register is 32 or 64 bits wide, * and increments by 1 every CPU clock tick. The architecture only * guarantees us a rate between 0.5 and 2, but all implementations use a * rate of 1. The write-only register is 32-bits wide. When the lowest * 32 bits of the read-only register compare equal to the write-only * register, it raises a maskable external interrupt. Each processor has * an Interval Timer of its own and they are not synchronised. * * We want to generate an interrupt every 1/HZ seconds. So we program * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data * is programmed with the intended time of the next tick. We can be * held off for an arbitrarily long period of time by interrupts being * disabled, so we may miss one or more ticks. */ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) { unsigned long now; unsigned long next_tick; unsigned long ticks_elapsed = 0; unsigned int cpu = smp_processor_id(); struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); /* gcc can optimize for "read-only" case with a local clocktick */ unsigned long cpt = clocktick; profile_tick(CPU_PROFILING); /* Initialize next_tick to the old expected tick time. */ next_tick = cpuinfo->it_value; /* Calculate how many ticks have elapsed. */ do { ++ticks_elapsed; next_tick += cpt; now = mfctl(16); } while (next_tick - now > cpt); /* Store (in CR16 cycles) up to when we are accounting right now. */ cpuinfo->it_value = next_tick; /* Go do system house keeping. */ if (cpu == 0) xtime_update(ticks_elapsed); update_process_times(user_mode(get_irq_regs())); /* Skip clockticks on purpose if we know we would miss those. * The new CR16 must be "later" than current CR16 otherwise * itimer would not fire until CR16 wrapped - e.g 4 seconds * later on a 1Ghz processor. We'll account for the missed * ticks on the next timer interrupt. * We want IT to fire modulo clocktick even if we miss/skip some. * But those interrupts don't in fact get delivered that regularly. * * "next_tick - now" will always give the difference regardless * if one or the other wrapped. If "now" is "bigger" we'll end up * with a very large unsigned number. */ while (next_tick - mfctl(16) > cpt) next_tick += cpt; /* Program the IT when to deliver the next interrupt. * Only bottom 32-bits of next_tick are writable in CR16! * Timer interrupt will be delivered at least a few hundred cycles * after the IT fires, so if we are too close (<= 500 cycles) to the * next cycle, simply skip it. */ if (next_tick - mfctl(16) <= 500) next_tick += cpt; mtctl(next_tick, 16); return IRQ_HANDLED; }
/* * Kernel system timer support. */ void timer_tick(void) { profile_tick(CPU_PROFILING); xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif }
static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } platform_timer_interrupt(irq, dev_id); new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); profile_tick(CPU_PROFILING); while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; if (smp_processor_id() == time_keeper_id) xtime_update(1); local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; }
void advance_ticks(int ticks, int frac, int repeat) { int i; if (test_params.nohz) { for (i = 0; i < repeat; i++) { simtsc_frac += (ticks * test_params.clock_freq << FRAC_BITS) / (HZ * frac); simtsc += simtsc_frac >> FRAC_BITS; simtsc_frac -= simtsc_frac >> FRAC_BITS << FRAC_BITS; xtime_update(ticks); } } else { for (i = 0; i < repeat * ticks / frac; i++) {
/* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ irqreturn_t arch_timer_interrupt(int irq, void *dummy) { if (current->pid) profile_tick(CPU_PROFILING); xtime_update(1); update_process_times(user_mode(get_irq_regs())); return(IRQ_HANDLED); }
irqreturn_t timer_interrupt(int irq, void *dummy) { xtime_update(1); #ifdef CONFIG_IPIPE update_root_process_times(get_irq_regs()); #else update_process_times(user_mode(get_irq_regs())); #endif profile_tick(CPU_PROFILING); return IRQ_HANDLED; }
/* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ irqreturn_t timer_interrupt(int irq, void *dummy) { /* Clear the interrupt condition */ outw(0, timer_membase + ALTERA_TIMER_STATUS_REG); nios2_timer_count += NIOS2_TIMER_PERIOD; profile_tick(CPU_PROFILING); xtime_update(1); update_process_times(user_mode(get_irq_regs())); return IRQ_HANDLED; }
static irqreturn_t timer_interrupt(int dummy, void *dev_id) { #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif clear_clock_irq(); xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif return IRQ_HANDLED; }
static irqreturn_t sun3_int5(int irq, void *dev_id) { #ifdef CONFIG_SUN3 intersil_clear(); #endif *sun3_intreg |= (1 << irq); #ifdef CONFIG_SUN3 intersil_clear(); #endif xtime_update(1); update_process_times(user_mode(get_irq_regs())); if (!(kstat_cpu(0).irqs[irq] % 20)) sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]); return IRQ_HANDLED; }
/* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { profile_tick(CPU_PROFILING); xtime_update(1); #ifdef CONFIG_HEARTBEAT static unsigned short n; n++; __set_LEDS(n); #endif /* CONFIG_HEARTBEAT */ update_process_times(user_mode(get_irq_regs())); return IRQ_HANDLED; }
static irqreturn_t sun3_int5(int irq, void *dev_id) { unsigned int cnt; #ifdef CONFIG_SUN3 intersil_clear(); #endif #ifdef CONFIG_SUN3 intersil_clear(); #endif xtime_update(1); update_process_times(user_mode(get_irq_regs())); cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 20)) sun3_leds(led_pattern[cnt % 160 / 20]); return IRQ_HANDLED; }
/* * timer_tick() * Kernel system timer support. Needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick. */ static irqreturn_t timer_tick(int irq, void *dummy) { int ticks; BUG_ON(!irqs_disabled()); ticks = timer_reset(timervector, frequency); xtime_update(ticks); update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); #if defined(CONFIG_SMP) smp_send_timer_all(); #endif return(IRQ_HANDLED); }
/* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dev_id) { #ifndef CONFIG_SMP profile_tick(CPU_PROFILING); #endif xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif /* As we return to user mode fire off the other CPU schedulers.. this is basically because we don't yet share IRQ's around. This message is rigged to be safe on the 386 - basically it's a hack, so don't look closely for now.. */ #ifdef CONFIG_SMP smp_local_timer_interrupt(); smp_send_timer(); #endif return IRQ_HANDLED; }
static irqreturn_t sun3_int5(int irq, void *dev_id) { unsigned long flags; unsigned int cnt; local_irq_save(flags); #ifdef CONFIG_SUN3 intersil_clear(); #endif sun3_disable_irq(5); sun3_enable_irq(5); #ifdef CONFIG_SUN3 intersil_clear(); #endif xtime_update(1); update_process_times(user_mode(get_irq_regs())); cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 20)) sun3_leds(led_pattern[cnt % 160 / 20]); local_irq_restore(flags); return IRQ_HANDLED; }
static unsigned long consider_steal_time(unsigned long new_itm) { unsigned long stolen, blocked; unsigned long delta_itm = 0, stolentick = 0; int cpu = smp_processor_id(); struct vcpu_runstate_info runstate; struct task_struct *p = current; get_runstate_snapshot(&runstate); /* * Check for vcpu migration effect * In this case, itc value is reversed. * This causes huge stolen value. * This function just checks and reject this effect. */ if (!time_after_eq(runstate.time[RUNSTATE_blocked], per_cpu(xen_blocked_time, cpu))) blocked = 0; if (!time_after_eq(runstate.time[RUNSTATE_runnable] + runstate.time[RUNSTATE_offline], per_cpu(xen_stolen_time, cpu))) stolen = 0; if (!time_after(delta_itm + new_itm, ia64_get_itc())) stolentick = ia64_get_itc() - new_itm; do_div(stolentick, NS_PER_TICK); stolentick++; do_div(stolen, NS_PER_TICK); if (stolen > stolentick) stolen = stolentick; stolentick -= stolen; do_div(blocked, NS_PER_TICK); if (blocked > stolentick) blocked = stolentick; if (stolen > 0 || blocked > 0) { account_steal_ticks(stolen); account_idle_ticks(blocked); run_local_timers(); rcu_check_callbacks(cpu, user_mode(get_irq_regs())); scheduler_tick(); run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); if (cpu == time_keeper_id) xtime_update(stolen + blocked); local_cpu_data->itm_next = delta_itm + new_itm; per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } return delta_itm; }