Ejemplo n.º 1
0
irqreturn_t timer_interrupt(int irq, void *dummy)
{
	xtime_update(1);

#ifdef CONFIG_IPIPE
	update_root_process_times(get_irq_regs());
#else
	update_process_times(user_mode(get_irq_regs()));
#endif
	profile_tick(CPU_PROFILING);

	return IRQ_HANDLED;
}
Ejemplo n.º 2
0
/*
 * We rearm the timer until we get disabled by the idle code.
 * Called with interrupts disabled and timer->base->cpu_base->lock held.
 */
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * Do not call, when we are not in irq context and have
	 * no valid regs pointer
	 */
	if (regs) {
		/*
		 * When we are idle and the tick is stopped, we have to touch
		 * the watchdog as we might not schedule for a really long
		 * time. This happens on complete idle SMP systems while
		 * waiting on the login prompt. We also increment the "start of
		 * idle" jiffy stamp so the idle accounting adjustment we do
		 * when we go busy again does not account too much ticks.
		 */
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_root_process_times(regs);
		profile_tick(CPU_PROFILING);
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Ejemplo n.º 3
0
/*
 * Periodic tick
 */
static void tick_periodic(int cpu)
{
	if (tick_do_timer_cpu == cpu) {
		write_seqlock(&xtime_lock);

		/* Keep track of the next tick event */
		tick_next_period = ktime_add(tick_next_period, tick_period);

		do_timer(1);
		write_sequnlock(&xtime_lock);
	}

	update_root_process_times(get_irq_regs());
	profile_tick(CPU_PROFILING);
}
Ejemplo n.º 4
0
irqreturn_t timer_interrupt(int irq, void *dummy)
{
	/* last time the cmos clock got updated */
	static long last_rtc_update;

	write_seqlock(&xtime_lock);
#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
	/*
	 * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
	 * enabled.
	 */
	if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
#endif
		do_timer(1);

		/*
		 * If we have an externally synchronized Linux clock, then update
		 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
		 * called as close as possible to 500 ms before the new second starts.
		 */
		if (ntp_synced() &&
		    xtime.tv_sec > last_rtc_update + 660 &&
		    (xtime.tv_nsec / NSEC_PER_USEC) >=
		    500000 - ((unsigned)TICK_SIZE) / 2
		    && (xtime.tv_nsec / NSEC_PER_USEC) <=
		    500000 + ((unsigned)TICK_SIZE) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				/* Do it again in 60s. */
				last_rtc_update = xtime.tv_sec - 600;
		}
#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
		set_gptimer_status(0, TIMER_STATUS_TIMIL0);
	}
#endif
	write_sequnlock(&xtime_lock);

#ifdef CONFIG_IPIPE
	update_root_process_times(get_irq_regs());
#else
	update_process_times(user_mode(get_irq_regs()));
#endif
	profile_tick(CPU_PROFILING);

	return IRQ_HANDLED;
}
Ejemplo n.º 5
0
/*
 * The nohz low res interrupt handler
 */
static void tick_nohz_handler(struct clock_event_device *dev)
{
	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
	struct pt_regs *regs = get_irq_regs();
	int cpu = smp_processor_id();
	ktime_t now = ktime_get();

	dev->next_event.tv64 = KTIME_MAX;

	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * When we are idle and the tick is stopped, we have to touch
	 * the watchdog as we might not schedule for a really long
	 * time. This happens on complete idle SMP systems while
	 * waiting on the login prompt. We also increment the "start
	 * of idle" jiffy stamp so the idle accounting adjustment we
	 * do when we go busy again does not account too much ticks.
	 */
	if (ts->tick_stopped) {
		touch_softlockup_watchdog();
		ts->idle_jiffies++;
	}

	update_root_process_times(regs);
	profile_tick(CPU_PROFILING);

	while (tick_nohz_reprogram(ts, now)) {
		now = ktime_get();
		tick_do_update_jiffies64(now);
	}
}
Ejemplo n.º 6
0
irqreturn_t timer_interrupt(int irq, void *dummy)
{
	/* last time the cmos clock got updated */
	static long last_rtc_update;

	write_raw_seqlock(&xtime_lock);
	do_timer(1);

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / NSEC_PER_USEC) >=
	    500000 - ((unsigned)TICK_SIZE) / 2
	    && (xtime.tv_nsec / NSEC_PER_USEC) <=
	    500000 + ((unsigned)TICK_SIZE) / 2) {
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			/* Do it again in 60s. */
			last_rtc_update = xtime.tv_sec - 600;
	}
	write_raw_sequnlock(&xtime_lock);

#ifdef CONFIG_IPIPE
	update_root_process_times(get_irq_regs());
#else
	update_process_times(user_mode(get_irq_regs()));
#endif
	profile_tick(CPU_PROFILING);

	return IRQ_HANDLED;
}