예제 #1
0
void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now = mfctl(16);
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	/* initialize next_tick to time at last clocktick */

	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#endif
		if (cpu == 0) {
			extern int pc_in_user_space;
			write_lock(&xtime_lock);
#ifndef CONFIG_SMP
			if (!user_mode(regs))
				parisc_do_profile(regs->iaoq[0]);
			else
				parisc_do_profile(&pc_in_user_space);
#endif
			do_timer(regs);
			write_unlock(&xtime_lock);
		}
	}
    
#ifdef CONFIG_CHASSIS_LCD_LED
	/* Only schedule the led tasklet on cpu 0, and only if it
	 * is enabled.
	 */
	if (cpu == 0 && !atomic_read(&led_tasklet.count))
		tasklet_schedule(&led_tasklet);
#endif

	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);
}
예제 #2
0
static void
timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	unsigned long new_itm;

	new_itm = local_cpu_data->itm_next;

	if (!time_after(ia64_get_itc(), new_itm))
		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
		       ia64_get_itc(), new_itm);

	while (1) {
		/*
		 * Do kernel PC profiling here.  We multiply the instruction number by
		 * four so that we can use a prof_shift of 2 to get instruction-level
		 * instead of just bundle-level accuracy.
		 */
		if (!user_mode(regs))
			do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);

#ifdef CONFIG_SMP
		smp_do_timer(regs);
#endif
		new_itm += local_cpu_data->itm_delta;

		if (smp_processor_id() == 0) {
			/*
			 * Here we are in the timer irq handler. We have irqs locally
			 * disabled, but we don't know if the timer_bh is running on
			 * another CPU. We need to avoid to SMP race by acquiring the
			 * xtime_lock.
			 */
			write_lock(&xtime_lock);
			do_timer(regs);
			local_cpu_data->itm_next = new_itm;
			write_unlock(&xtime_lock);
		} else
			local_cpu_data->itm_next = new_itm;

		if (time_after(new_itm, ia64_get_itc()))
			break;
	}

	do {
	    /*
	     * If we're too close to the next clock tick for comfort, we increase the
	     * saftey margin by intentionally dropping the next tick(s).  We do NOT update
	     * itm.next because that would force us to call do_timer() which in turn would
	     * let our clock run too fast (with the potentially devastating effect of
	     * losing monotony of time).
	     */
	    while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
	      new_itm += local_cpu_data->itm_delta;
	    ia64_set_itm(new_itm);
	    /* double check, in case we got hit by a (slow) PMI: */
	} while (time_after_eq(ia64_get_itc(), new_itm));
}
예제 #3
0
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now;
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	profile_tick(CPU_PROFILING, regs);

	now = mfctl(16);
	/* initialize next_tick to time at last clocktick */
	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#else
		update_process_times(user_mode(regs));
#endif
		if (cpu == 0) {
			write_seqlock(&xtime_lock);
			do_timer(regs);
			write_sequnlock(&xtime_lock);
		}
	}
    
	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);

	return IRQ_HANDLED;
}