/*
 * handle_timer_tick() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
void handle_timer_tick(void)
{
	if (current->pid)
		profile_tick(CPU_PROFILING);

#ifdef CONFIG_HEARTBEAT
	if (sh_mv.mv_heartbeat != NULL)
		sh_mv.mv_heartbeat();
#endif

	/*
	 * Here we are in the timer irq handler. We just have irqs locally
	 * disabled but we don't know if the timer_bh is running on the other
	 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
	 * the irq version of write_lock because as just said we have irq
	 * locally disabled. -arca
	 */
	write_seqlock(&xtime_lock);
	do_timer(1);

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
		if (rtc_sh_set_time(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			/* do it again in 60s */
			last_rtc_update = xtime.tv_sec - 600;
	}
	write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
}
Example #2
0
inline void smp_local_timer_interrupt(struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	x86_do_profile(regs);

	if (--per_cpu(prof_counter, cpu) <= 0) {
		/*
		 * The multiplier may have changed since the last time we got
		 * to this point as a result of the user writing to
		 * /proc/profile. In this case we need to adjust the APIC
		 * timer accordingly.
		 *
		 * Interrupts are already masked off at this point.
		 */
		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
		if (per_cpu(prof_counter, cpu) !=
					per_cpu(prof_old_multiplier, cpu)) {
			__setup_APIC_LVTT(
					calibration_result/
					per_cpu(prof_counter, cpu));
			per_cpu(prof_old_multiplier, cpu) =
						per_cpu(prof_counter, cpu);
		}

#ifdef CONFIG_SMP
		update_process_times(user_mode(regs));
#endif
	}

	/*
	 * We take the 'long' return path, and there every subsystem
	 * grabs the apropriate locks (kernel lock/ irq lock).
	 *
	 * we might want to decouple profiling from the 'long path',
	 * and do the profiling totally in assembly.
	 *
	 * Currently this isn't too much of an issue (performance wise),
	 * we can take more than 100K local irqs per second on a 100 MHz P5.
	 */
}
Example #3
0
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
	unsigned long flags;
	unsigned int cnt;

	local_irq_save(flags);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	sun3_disable_irq(5);
	sun3_enable_irq(5);
#ifdef CONFIG_SUN3
	intersil_clear();
#endif
	xtime_update(1);
	update_process_times(user_mode(get_irq_regs()));
	cnt = kstat_irqs_cpu(irq, 0);
	if (!(cnt % 20))
		sun3_leds(led_pattern[cnt % 160 / 20]);
	local_irq_restore(flags);
	return IRQ_HANDLED;
}
Example #4
0
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
#ifndef CONFIG_SMP
	profile_tick(CPU_PROFILING, regs);
#endif
	do_timer(regs);

#ifndef CONFIG_SMP
	update_process_times(user_mode(regs));
#endif
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	write_seqlock(&xtime_lock);
	if (ntp_synced()
		&& xtime.tv_sec > last_rtc_update + 660
		&& (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
		&& (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
	{
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else	/* do it again in 60 s */
			last_rtc_update = xtime.tv_sec - 600;
	}
	write_sequnlock(&xtime_lock);
	/* As we return to user mode fire off the other CPU schedulers..
	   this is basically because we don't yet share IRQ's around.
	   This message is rigged to be safe on the 386 - basically it's
	   a hack, so don't look closely for now.. */

#ifdef CONFIG_SMP
	smp_local_timer_interrupt(regs);
	smp_send_timer();
#endif

	return IRQ_HANDLED;
}
Example #5
0
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "xtime_update()" routine every clocktick
 */
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
	profile_tick(CPU_PROFILING);
#endif
	xtime_update(1);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif
	/* As we return to user mode fire off the other CPU schedulers..
	   this is basically because we don't yet share IRQ's around.
	   This message is rigged to be safe on the 386 - basically it's
	   a hack, so don't look closely for now.. */

#ifdef CONFIG_SMP
	smp_local_timer_interrupt();
	smp_send_timer();
#endif

	return IRQ_HANDLED;
}
Example #6
0
irqreturn_t timer_interrupt (int irq, void *dev_id)
{

	unsigned long next;

	next = get_linux_timer();

again:
	while ((signed long)(get_ccount() - next) > 0) {

		profile_tick(CPU_PROFILING);
#ifndef CONFIG_SMP
		update_process_times(user_mode(get_irq_regs()));
#endif

		write_seqlock(&xtime_lock);

		do_timer(1); /* Linux handler in kernel/timer.c */

		/* Note that writing CCOMPARE clears the interrupt. */

		next += CCOUNT_PER_JIFFY;
		set_linux_timer(next);

		write_sequnlock(&xtime_lock);
	}

	/* Allow platform to do something useful (Wdog). */

	platform_heartbeat();

	/* Make sure we didn't miss any tick... */

	if ((signed long)(get_ccount() - next) > 0)
		goto again;

	return IRQ_HANDLED;
}
Example #7
0
File: time.c Project: 274914765/C
irqreturn_t timer_interrupt(int irq, void *dummy)
{
    /* last time the cmos clock got updated */
    static long last_rtc_update;

    write_seqlock(&xtime_lock);

    do_timer(1);

    profile_tick(CPU_PROFILING);

    /*
     * If we have an externally synchronized Linux clock, then update
     * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
     * called as close as possible to 500 ms before the new second starts.
     */

    if (ntp_synced() &&
        xtime.tv_sec > last_rtc_update + 660 &&
        (xtime.tv_nsec / NSEC_PER_USEC) >=
        500000 - ((unsigned)TICK_SIZE) / 2
        && (xtime.tv_nsec / NSEC_PER_USEC) <=
        500000 + ((unsigned)TICK_SIZE) / 2) {
        if (set_rtc_mmss(xtime.tv_sec) == 0)
            last_rtc_update = xtime.tv_sec;
        else
            /* Do it again in 60s. */
            last_rtc_update = xtime.tv_sec - 600;
    }
    write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
    update_process_times(user_mode(get_irq_regs()));
#endif

    return IRQ_HANDLED;
}
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	if (regs) {
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);

		if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) {

			update_rq_stats();

			wakeup_user();
		}
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Example #9
0
/*
 * There are a lot of conceptually broken versions of the MIPS timer interrupt
 * handler floating around.  This one is rather different, but the algorithm
 * is probably more robust.
 */
void mips_timer_interrupt(struct pt_regs *regs)
{
        int irq = 7; /* FIX ME */

	if (r4k_offset == 0) {
            goto null;
        }

	do {
		kstat_this_cpu.irqs[irq]++;
		do_timer(regs);
#ifndef CONFIG_SMP
		update_process_times(user_mode(regs));
#endif
		r4k_cur += r4k_offset;
		ack_r4ktimer(r4k_cur);

	} while (((unsigned long)read_c0_count()
                    - r4k_cur) < 0x7fffffff);
	return;

null:
	ack_r4ktimer(0);
}
Example #10
0
/*
 * timer_interrupt() needs to keep up the real-time clock,
 * as well as call the "do_timer()" routine every clocktick
 */
irqreturn_t timer_interrupt(int irq, void *dummy)
{
	/* last time the cmos clock got updated */
	static long last_rtc_update = 0;

	/* Clear the interrupt condition */
	outw(0, timer_membase + ALTERA_TIMER_STATUS_REG);
	nios2_timer_count += NIOS2_TIMER_PERIOD;

	write_seqlock(&xtime_lock);

	do_timer(1);
	profile_tick(CPU_PROFILING);
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2) {
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			last_rtc_update = xtime.tv_sec - 600;	/* do it again in 60 s */
	}

	write_sequnlock(&xtime_lock);

#ifndef CONFIG_SMP
	update_process_times(user_mode(get_irq_regs()));
#endif

	return (IRQ_HANDLED);
}
Example #11
0
static void gt64120_irq(int irq, void *dev_id, struct pt_regs *regs)
{
	unsigned int irq_src, int_high_src, irq_src_mask, int_high_src_mask;
	int handled = 0;

	irq_src = GT_READ(GT_INTRCAUSE_OFS);
	irq_src_mask = GT_READ(GT_INTRMASK_OFS);
	int_high_src = GT_READ(GT_HINTRCAUSE_OFS);
	int_high_src_mask = GT_READ(GT_HINTRMASK_OFS);
	irq_src = irq_src & irq_src_mask;
	int_high_src = int_high_src & int_high_src_mask;

	if (irq_src & 0x00000800) {	/* Check for timer interrupt */
		handled = 1;
		irq_src &= ~0x00000800;
		do_timer(regs);
#ifndef CONFIG_SMP
		update_process_times(user_mode(regs));
#endif
	}

	GT_WRITE(GT_INTRCAUSE_OFS, 0);
	GT_WRITE(GT_HINTRCAUSE_OFS, 0);
}
/*
 * We rearm the timer until we get disabled by the idle code.
 * Called with interrupts disabled and timer->base->cpu_base->lock held.
 */
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
		tick_do_timer_cpu = cpu;
#endif

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * Do not call, when we are not in irq context and have
	 * no valid regs pointer
	 */
	if (regs) {
		/*
		 * When we are idle and the tick is stopped, we have to touch
		 * the watchdog as we might not schedule for a really long
		 * time. This happens on complete idle SMP systems while
		 * waiting on the login prompt. We also increment the "start of
		 * idle" jiffy stamp so the idle accounting adjustment we do
		 * when we go busy again does not account too much ticks.
		 */
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);

#ifdef CONFIG_MTK_SCHED_RQAVG_US
		if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) {

			/*
			 * update run queue statistics
			 */
			update_rq_stats();

#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
			/*
			 * wakeup user if needed
			 */
			wakeup_user();
#endif /* CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ */
		}
#endif /* CONFIG_MTK_SCHED_RQAVG_US */
	}

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Example #13
0
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = 7;				/* XXX Assign number */

	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	{
		int user = user_mode(regs);

		/*
		 * update_process_times() expects us to have done irq_enter().
		 * Besides, if we don't timer interrupts ignore the global
		 * interrupt lock, which is the WrongThing (tm) to do.
		 * Picked from i386 code.
		 */
		irq_enter(cpu, 0);
		update_process_times(user);
		irq_exit(cpu, 0);
	}
#endif /* CONFIG_SMP */
	
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);

	if (softirq_pending(cpu))
		do_softirq();
}
Example #14
0
/*
 * We rearm the timer until we get disabled by the idle code
 * Called with interrupts disabled and timer->base->cpu_base->lock held.
 */
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
	struct tick_sched *ts =
		container_of(timer, struct tick_sched, sched_timer);
	struct hrtimer_cpu_base *base = timer->base->cpu_base;
	struct pt_regs *regs = get_irq_regs();
	ktime_t now = ktime_get();
	int cpu = smp_processor_id();

#ifdef CONFIG_NO_HZ
	/*
	 * Check if the do_timer duty was dropped. We don't care about
	 * concurrency: This happens only when the cpu in charge went
	 * into a long sleep. If two cpus happen to assign themself to
	 * this duty, then the jiffies update is still serialized by
	 * xtime_lock.
	 */
	if (unlikely(tick_do_timer_cpu == -1))
		tick_do_timer_cpu = cpu;
#endif

	/* Check, if the jiffies need an update */
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * Do not call, when we are not in irq context and have
	 * no valid regs pointer
	 */
	if (regs) {
		/*
		 * When we are idle and the tick is stopped, we have to touch
		 * the watchdog as we might not schedule for a really long
		 * time. This happens on complete idle SMP systems while
		 * waiting on the login prompt. We also increment the "start of
		 * idle" jiffy stamp so the idle accounting adjustment we do
		 * when we go busy again does not account too much ticks.
		 */
		if (ts->tick_stopped) {
			touch_softlockup_watchdog();
			ts->idle_jiffies++;
		}
		/*
		 * update_process_times() might take tasklist_lock, hence
		 * drop the base lock. sched-tick hrtimers are per-CPU and
		 * never accessible by userspace APIs, so this is safe to do.
		 */
		spin_unlock(&base->lock);
		update_process_times(user_mode(regs));
		profile_tick(CPU_PROFILING);
		spin_lock(&base->lock);
	}

	/* Do not restart, when we are in the idle loop */
	if (ts->tick_stopped)
		return HRTIMER_NORESTART;

	hrtimer_forward(timer, now, tick_period);

	return HRTIMER_RESTART;
}
Example #15
0
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
    unsigned long new_itm;

    if (unlikely(cpu_is_offline(smp_processor_id()))) {
        return IRQ_HANDLED;
    }

    platform_timer_interrupt(irq, dev_id);

    new_itm = local_cpu_data->itm_next;

    if (!time_after(ia64_get_itc(), new_itm))
        printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
               ia64_get_itc(), new_itm);

    profile_tick(CPU_PROFILING);

    while (1) {
        update_process_times(user_mode(get_irq_regs()));

        new_itm += local_cpu_data->itm_delta;

        if (smp_processor_id() == time_keeper_id) {
            /*
             * Here we are in the timer irq handler. We have irqs locally
             * disabled, but we don't know if the timer_bh is running on
             * another CPU. We need to avoid to SMP race by acquiring the
             * xtime_lock.
             */
            write_seqlock(&xtime_lock);
            do_timer(1);
            local_cpu_data->itm_next = new_itm;
            write_sequnlock(&xtime_lock);
        } else
            local_cpu_data->itm_next = new_itm;

        if (time_after(new_itm, ia64_get_itc()))
            break;

        /*
         * Allow IPIs to interrupt the timer loop.
         */
        local_irq_enable();
        local_irq_disable();
    }

    do {
        /*
         * If we're too close to the next clock tick for
         * comfort, we increase the safety margin by
         * intentionally dropping the next tick(s).  We do NOT
         * update itm.next because that would force us to call
         * do_timer() which in turn would let our clock run
         * too fast (with the potentially devastating effect
         * of losing monotony of time).
         */
        while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
            new_itm += local_cpu_data->itm_delta;
        ia64_set_itm(new_itm);
        /* double check, in case we got hit by a (slow) PMI: */
    } while (time_after_eq(ia64_get_itc(), new_itm));
    return IRQ_HANDLED;
}
Example #16
0
/*
 * local_timer_interrupt() does profiling and process accounting on a
 * per-CPU basis.
 *
 * In UP mode, it is invoked from the (global) timer_interrupt.
 */
void local_timer_interrupt(int irq, void *dev_id)
{
	if (current->pid)
		profile_tick(CPU_PROFILING);
	update_process_times(user_mode(get_irq_regs()));
}
Example #17
0
/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 * We set it up to overflow again in 1/HZ seconds.
 */
void timer_interrupt(struct pt_regs * regs)
{
	int next_dec;
	unsigned long cpu = smp_processor_id();
	unsigned jiffy_stamp = last_jiffy_stamp(cpu);
	extern void do_IRQ(struct pt_regs *);

	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);

	MARK(kernel_trap_entry, "%d struct pt_regs %p", regs->trap, regs);

	irq_enter();

	while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
		jiffy_stamp += tb_ticks_per_jiffy;
		
		profile_tick(CPU_PROFILING, regs);
		update_process_times(user_mode(regs));

	  	if (smp_processor_id())
			continue;

		/* We are in an interrupt, no need to save/restore flags */
		write_seqlock(&xtime_lock);
		tb_last_stamp = jiffy_stamp;
#ifdef CONFIG_LTT
		ltt_reset_timestamp();
#endif //CONFIG_LTT
		do_timer(regs);

		/*
		 * update the rtc when needed, this should be performed on the
		 * right fraction of a second. Half or full second ?
		 * Full second works on mk48t59 clocks, others need testing.
		 * Note that this update is basically only used through
		 * the adjtimex system calls. Setting the HW clock in
		 * any other way is a /dev/rtc and userland business.
		 * This is still wrong by -0.5/+1.5 jiffies because of the
		 * timer interrupt resolution and possible delay, but here we
		 * hit a quantization limit which can only be solved by higher
		 * resolution timers and decoupling time management from timer
		 * interrupts. This is also wrong on the clocks
		 * which require being written at the half second boundary.
		 * We should have an rtc call that only sets the minutes and
		 * seconds like on Intel to avoid problems with non UTC clocks.
		 */
		if ( ppc_md.set_rtc_time && ntp_synced() &&
		     xtime.tv_sec - last_rtc_update >= 659 &&
		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
		     jiffies - wall_jiffies == 1) {
		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
				last_rtc_update = xtime.tv_sec+1;
			else
				/* Try again one minute later */
				last_rtc_update += 60;
		}
		write_sequnlock(&xtime_lock);
	}
	if ( !disarm_decr[smp_processor_id()] )
		set_dec(next_dec);
	last_jiffy_stamp(cpu) = jiffy_stamp;

	if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
		ppc_md.heartbeat();

	irq_exit();

 	trace_kernel_trap_exit();
	MARK(kernel_trap_exit, MARK_NOARGS);
}
Example #18
0
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
    unsigned long new_itm;

    if (cpu_is_offline(smp_processor_id())) {
        return IRQ_HANDLED;
    }

    platform_timer_interrupt(irq, dev_id);

    new_itm = local_cpu_data->itm_next;

    if (!time_after(ia64_get_itc(), new_itm))
        printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
               ia64_get_itc(), new_itm);

    profile_tick(CPU_PROFILING);

    if (paravirt_do_steal_accounting(&new_itm))
        goto skip_process_time_accounting;

    while (1) {
        update_process_times(user_mode(get_irq_regs()));

        new_itm += local_cpu_data->itm_delta;

        if (smp_processor_id() == time_keeper_id)
            xtime_update(1);

        local_cpu_data->itm_next = new_itm;

        if (time_after(new_itm, ia64_get_itc()))
            break;

        /*
         * Allow IPIs to interrupt the timer loop.
         */
        local_irq_enable();
        local_irq_disable();
    }

skip_process_time_accounting:

    do {
        /*
         * If we're too close to the next clock tick for
         * comfort, we increase the safety margin by
         * intentionally dropping the next tick(s).  We do NOT
         * update itm.next because that would force us to call
         * xtime_update() which in turn would let our clock run
         * too fast (with the potentially devastating effect
         * of losing monotony of time).
         */
        while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
            new_itm += local_cpu_data->itm_delta;
        ia64_set_itm(new_itm);
        /* double check, in case we got hit by a (slow) PMI: */
    } while (time_after_eq(ia64_get_itc(), new_itm));
    return IRQ_HANDLED;
}
Example #19
0
/*
 * ipi_interrupt()
 *	Handle an Interprocessor Interrupt.
 */
static irqreturn_t ipi_interrupt(int irq, void *dev_id)
{
	int cpuid = smp_processor_id();
	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
	unsigned long ops;

	/*
	 * Count this now; we may make a call that never returns.
	 */
	p->ipi_count++;

	/*
	 * We are about to process all ops.  If another cpu has stated
	 * that we need an IPI, we will have already processed it.  By
	 * clearing our smp_needs_ipi, and processing all ops,
	 * we reduce the number of IPI interrupts.  However, this introduces
	 * the possibility that smp_needs_ipi will be clear and the soft irq
	 * will have gone off; so we need to make the get_affinity() path
	 * tolerant of spurious interrupts.
	 */
	spin_lock(&smp_ipi_lock);
	smp_needs_ipi &= ~(1 << p->tid);
	spin_unlock(&smp_ipi_lock);

	for (;;) {
		/*
		 * Read the set of IPI commands we should handle.
		 */
		spinlock_t *lock = &per_cpu(ipi_lock, cpuid);
		spin_lock(lock);
		ops = p->ipi_pending;
		p->ipi_pending = 0;
		spin_unlock(lock);

		/*
		 * If we have no IPI commands to execute, break out.
		 */
		if (!ops) {
			break;
		}

		/*
		 * Execute the set of commands in the ops word, one command
		 * at a time in no particular order.  Strip of each command
		 * as we execute it.
		 */
		while (ops) {
			unsigned long which = ffz(~ops);
			ops &= ~(1 << which);

			BUG_ON(!irqs_disabled());
			switch (which) {
			case IPI_NOP:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_NOP\n", cpuid);
				break;

			case IPI_RESCHEDULE:
				/*
				 * Reschedule callback.  Everything to be
				 * done is done by the interrupt return path.
				 */
				smp_debug(200, KERN_INFO "cpu[%d]: "
					  "IPI_RESCHEDULE\n", cpuid);
				break;

			case IPI_CALL_FUNC:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CALL_FUNC\n", cpuid);
				generic_smp_call_function_interrupt();
				break;

			case IPI_CALL_FUNC_SINGLE:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CALL_FUNC_SINGLE\n", cpuid);
				generic_smp_call_function_single_interrupt();
				break;

			case IPI_CPU_STOP:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CPU_STOP\n", cpuid);
				smp_halt_processor();
				break;

#if !defined(CONFIG_LOCAL_TIMERS)
			case IPI_CPU_TIMER:
				smp_debug(100, KERN_INFO "cpu[%d]: "
					  "IPI_CPU_TIMER\n", cpuid);
#if defined(CONFIG_GENERIC_CLOCKEVENTS)
				local_timer_interrupt();
#else
				update_process_times(user_mode(get_irq_regs()));
				profile_tick(CPU_PROFILING);
#endif
#endif
				break;

			default:
				printk(KERN_CRIT "cpu[%d]: "
					  "Unknown IPI: %lu\n", cpuid, which);

				return IRQ_NONE;
			}
		}
	}
	return IRQ_HANDLED;
}