Пример #1
0
asmlinkage void do_softirq()
{
	int cpu = smp_processor_id();
	__u32 pending;
	long flags;
	__u32 mask;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = softirq_pending(cpu);

	if (pending) {
		struct softirq_action *h;

		mask = ~pending;
		local_bh_disable();
restart:
		/* Reset the pending bitmask before enabling irqs */
		softirq_pending(cpu) = 0;

		local_irq_enable();

		h = softirq_vec;

		do {
			if (pending & 1)
				h->action(h);
			h++;
			pending >>= 1;
		} while (pending);

		local_irq_disable();

		pending = softirq_pending(cpu);
		if (pending & mask) {
			mask &= ~pending;
			goto restart;
		}
		__local_bh_enable();

		if (pending)
			wakeup_softirqd(cpu);
	}

	local_irq_restore(flags);
}
Пример #2
0
void mwait_idle_with_hints(unsigned int eax, unsigned int ecx)
{
    unsigned int cpu = smp_processor_id();
    s_time_t expires = per_cpu(timer_deadline, cpu);

    if ( boot_cpu_has(X86_FEATURE_CLFLUSH_MONITOR) )
    {
        mb();
        clflush((void *)&mwait_wakeup(cpu));
        mb();
    }

    __monitor((void *)&mwait_wakeup(cpu), 0, 0);
    smp_mb();

    /*
     * Timer deadline passing is the event on which we will be woken via
     * cpuidle_mwait_wakeup. So check it now that the location is armed.
     */
    if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) )
    {
        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
        __mwait(eax, ecx);
        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
    }

    if ( expires <= NOW() && expires > 0 )
        raise_softirq(TIMER_SOFTIRQ);
}
Пример #3
0
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;

        cpu = smp_processor_id();
        irq_enter(cpu);
	kstat.irqs[cpu][irq]++;

	action = irq_action[irq];
        if (action) {
                if (!(action->flags & SA_INTERRUPT))
                        __sti();
                action = irq_action[irq];
                do_random = 0;
                do {
                        do_random |= action->flags;
                        action->handler(irq, action->dev_id, regs);
                        action = action->next;
                } while (action);
                if (do_random & SA_SAMPLE_RANDOM)
                        add_interrupt_randomness(irq);
                __cli();
        }
        irq_exit(cpu);

	if (softirq_pending(cpu))
                do_softirq();

        /* unmasking and bottom half handling is done magically for us. */
}
Пример #4
0
void smp_apic_timer_interrupt(struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	/*
	 * the NMI deadlock-detector uses this.
	 */
	apic_timer_irqs[cpu]++;

	/*
	 * NOTE! We'd better ACK the irq immediately,
	 * because timer handling can be slow.
	 */
	ack_APIC_irq();
	/*
	 * update_process_times() expects us to have done irq_enter().
	 * Besides, if we don't timer interrupts ignore the global
	 * interrupt lock, which is the WrongThing (tm) to do.
	 */
	irq_enter(cpu, 0);
	smp_local_timer_interrupt(regs);
	irq_exit(cpu, 0);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #5
0
asmlinkage void do_softirq(void)
{
    unsigned int i, cpu;
    unsigned long pending;

	/* SCHEDULE_SOFTIRQ may move to anothor processor */
	while(1) {
		cpu = smp_processor_id();

		if ( (pending = softirq_pending(cpu)) == 0 )
			break;

        i = find_first_set_bit(pending);
        clear_bit(i, &softirq_pending(cpu));
        (*softirq_handlers[i])();
    };
}
Пример #6
0
void indy_r4k_timer_interrupt (struct pt_regs *regs)
{
	static const int INDY_R4K_TIMER_IRQ = 7;
	int cpu = smp_processor_id();

	r4k_timer_interrupt (INDY_R4K_TIMER_IRQ, NULL, regs);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #7
0
void cpumask_raise_softirq(cpumask_t mask, unsigned int nr)
{
    int cpu;

    for_each_cpu_mask(cpu, mask)
        if ( test_and_set_bit(nr, &softirq_pending(cpu)) )
            cpu_clear(cpu, mask);

    smp_send_event_check_mask(&mask);
}
Пример #8
0
void
LinuxMode::doSoftIRQ(uval disabled)
{
    uval old = mode;
    if (disabled) Scheduler::Enable();
    while (softirq_pending(cpu)) {
	mode = Undefined;  // Thread mode is undefined to allow
			   // do_softirq to set thread mode via a
			   // local_bh_disable

	//bhDisabled should not be set --- it could be set accidentally
	// by syscalls on loopback device.
	flags &= ~LinuxMode::bhDisabled;

	TraceOSLinuxBH(cpu, softirq_pending(cpu), 0);
	// Now we know we're the only thread running on this thing
	do_softirq();
    };
    mode = old;
    if (disabled) Scheduler::Disable();
}
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = IP27_TIMER_IRQ;

	irq_enter(cpu, irq);
	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	update_process_times(user_mode(regs));
#endif /* CONFIG_SMP */

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);
	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #10
0
void
LinuxMode::freeCPU()
{

    TraceOSLinuxEnd(cpu, softirq_pending(cpu), 0);

    uval local;
    do {
	local = availCPU;
    } while (!CompareAndStoreSynced(&availCPU, local, local | (1ULL<<cpu)));

    cpuControl->V();
}
Пример #11
0
static int ksoftirqd(void * __bind_cpu)
{
	int bind_cpu = (int) (long) __bind_cpu;
	int cpu = cpu_logical_map(bind_cpu);

	daemonize();
	current->nice = 19;
	sigfillset(&current->blocked);

	/* Migrate to the right CPU */
	current->cpus_allowed = 1UL << cpu;
	while (smp_processor_id() != cpu)
		schedule();

	sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);

	__set_current_state(TASK_INTERRUPTIBLE);
	mb();

	ksoftirqd_task(cpu) = current;

	for (;;) {
		if (!softirq_pending(cpu))
			schedule();

		__set_current_state(TASK_RUNNING);

		while (softirq_pending(cpu)) {
			do_softirq();
			if (current->need_resched)
				schedule();
		}

		__set_current_state(TASK_INTERRUPTIBLE);
	}
}
Пример #12
0
asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
{
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;

	/* we keep interrupt disabled all the time */
	timer_interrupt(irq, NULL, regs);
	
	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #13
0
asmlinkage void do_softirq(void)
{
    unsigned int i, cpu;
    unsigned long pending;

    for ( ; ; )
    {
        /*
         * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move
         * us to another processor.
         */
        cpu = smp_processor_id();

        if ( rcu_pending(cpu) )
            rcu_check_callbacks(cpu);

        if ( (pending = softirq_pending(cpu)) == 0 )
            break;

        i = find_first_set_bit(pending);
        clear_bit(i, &softirq_pending(cpu));
        (*softirq_handlers[i])();
    }
}
Пример #14
0
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;
        int retval = 0;

        cpu = smp_processor_id();
        irq_enter();
	kstat_cpu(cpu).irqs[irq]++;

	action = irq_action[irq];
        if (action) {
                if (!(action->flags & SA_INTERRUPT))
                        local_irq_enable();
                action = irq_action[irq];
                do_random = 0;
                do {
                        do_random |= action->flags;
                        retval |= action->handler(irq, action->dev_id, regs);
                        action = action->next;
                } while (action);

                if (retval != 1) {
			if (retval) {
				printk("irq event %d: bogus retval mask %x\n",
					irq, retval);
			} else {
				printk("irq %d: nobody cared\n", irq);
			}
		}

                if (do_random & SA_SAMPLE_RANDOM)
                        add_interrupt_randomness(irq);
		local_irq_disable();
        }
        irq_exit();

	if (softirq_pending(cpu))
                do_softirq();

        /* unmasking and bottom half handling is done magically for us. */
}
Пример #15
0
void mips_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int irq = MIPS_CPU_TIMER_IRQ;

	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;
	timer_interrupt(irq, NULL, regs);

	if ((timer_tick_count++ % HZ) == 0) {
		mips_display_message(&display_string[display_count++]);
		if (display_count == MAX_DISPLAY_COUNT)
		        display_count = 0;

	}

	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #16
0
void mips_timer_interrupt(struct pt_regs *regs)
{
	int irq = 63;
	unsigned long count;
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;

#ifdef CONFIG_PM
	printk(KERN_ERR "Unexpected CP0 interrupt\n");
	regs->cp0_status &= ~IE_IRQ5; /* disable CP0 interrupt */
	return;
#endif

	if (r4k_offset == 0)
		goto null;

	do {
		count = read_c0_count();
		timerhi += (count < timerlo);   /* Wrap around */
		timerlo = count;

		kstat.irqs[0][irq]++;
		do_timer(regs);
		r4k_cur += r4k_offset;
		ack_r4ktimer(r4k_cur);

	} while (((unsigned long)read_c0_count()
	         - r4k_cur) < 0x7fffffff);

	irq_exit(cpu, irq);

	if (softirq_pending(cpu))
		do_softirq();
	return;

null:
	ack_r4ktimer(0);
}
Пример #17
0
void vmx_realmode(struct cpu_user_regs *regs)
{
    struct vcpu *curr = current;
    struct hvm_emulate_ctxt hvmemul_ctxt;
    struct segment_register *sreg;
    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
    unsigned long intr_info;
    unsigned int emulations = 0;

    /* Get-and-clear VM_ENTRY_INTR_INFO. */
    __vmread(VM_ENTRY_INTR_INFO, &intr_info);
    if ( intr_info & INTR_INFO_VALID_MASK )
        __vmwrite(VM_ENTRY_INTR_INFO, 0);

    hvm_emulate_prepare(&hvmemul_ctxt, regs);

    if ( vio->io_state == HVMIO_completed )
        realmode_emulate_one(&hvmemul_ctxt);

    /* Only deliver interrupts into emulated real mode. */
    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
         (intr_info & INTR_INFO_VALID_MASK) )
    {
        realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt);
        intr_info = 0;
    }

    curr->arch.hvm_vmx.vmx_emulate = 1;
    while ( curr->arch.hvm_vmx.vmx_emulate &&
            !softirq_pending(smp_processor_id()) &&
            (vio->io_state == HVMIO_none) )
    {
        /*
         * Check for pending interrupts only every 16 instructions, because
         * hvm_local_events_need_delivery() is moderately expensive, and only
         * in real mode, because we don't emulate protected-mode IDT vectoring.
         */
        if ( unlikely(!(++emulations & 15)) &&
             curr->arch.hvm_vmx.vmx_realmode && 
             hvm_local_events_need_delivery(curr) )
            break;

        realmode_emulate_one(&hvmemul_ctxt);

        /* Stop emulating unless our segment state is not safe */
        if ( curr->arch.hvm_vmx.vmx_realmode )
            curr->arch.hvm_vmx.vmx_emulate = 
                (curr->arch.hvm_vmx.vm86_segment_mask != 0);
        else
            curr->arch.hvm_vmx.vmx_emulate = 
                 ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3)
                  || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3));
    }

    /* Need to emulate next time if we've started an IO operation */
    if ( vio->io_state != HVMIO_none )
        curr->arch.hvm_vmx.vmx_emulate = 1;

    if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
    {
        /*
         * Cannot enter protected mode with bogus selector RPLs and DPLs.
         * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For
         * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
         */
        sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
        sreg->attr.fields.dpl = sreg->sel & 3;
        hvmemul_ctxt.seg_reg_dirty |=
            (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
            (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
    }

    hvm_emulate_writeback(&hvmemul_ctxt);

    /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */
    if ( intr_info & INTR_INFO_VALID_MASK )
        __vmwrite(VM_ENTRY_INTR_INFO, intr_info);
}
Пример #18
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
{
	/*
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	int cpu = smp_processor_id();
	irq_desc_t *desc = irq_desc + irq;
	struct irqaction * action;
	unsigned int status;

	kstat.irqs[cpu][irq]++;
	spin_lock(&desc->lock);
	desc->handler->ack(irq);
	/*
	   REPLAY is when Linux resends an IRQ that was dropped earlier
	   WAITING is used by probe to mark irqs that are being tested
	   */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	   Since we set PENDING, if another processor is handling
	   a different instance of this same irq, the other processor
	   will take care of it.
	 */
	if (!action) {
		goto out;
	}

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		spin_unlock(&desc->lock);
		handle_IRQ_event(irq, regs, action);
		spin_lock(&desc->lock);

		if (!(desc->status & IRQ_PENDING))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;
out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->handler->end(irq);
	spin_unlock(&desc->lock);

	if (softirq_pending(cpu))
		do_softirq();
	return 1;
}
Пример #19
0
/*
 * do_IRQ handles all normal device IRQ's
 */
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
    struct irqdesc * desc;
    struct irqaction * action;
    int cpu;
#ifdef CONFIG_ILATENCY
    {
        extern void interrupt_overhead_start(void);

        interrupt_overhead_start();
    }
#endif /* CONFIG_ILATENCY */

    irq = fixup_irq(irq);

    /*
     * Some hardware gives randomly wrong interrupts.  Rather
     * than crashing, do something sensible.
     */
    if (irq >= NR_IRQS)
        goto bad_irq;

    /* this is called recursively in some cases, so... */
    if (!in_irq())
        preempt_lock_start(-99);

    desc = irq_desc + irq;

    TRACE_IRQ_ENTRY(irq, !(user_mode(regs)));

    spin_lock(&irq_controller_lock);
    desc->mask_ack(irq);
    spin_unlock(&irq_controller_lock);

    cpu = smp_processor_id();
    irq_enter(cpu, irq);
    kstat.irqs[cpu][irq]++;
    desc->triggered = 1;

    /* Return with this interrupt masked if no action */
    action = desc->action;

    if (action) {
        int status = 0;

        if (desc->nomask) {
            spin_lock(&irq_controller_lock);
            desc->unmask(irq);
            spin_unlock(&irq_controller_lock);
        }

        if (!(action->flags & SA_INTERRUPT))
            local_irq_enable();

#ifdef CONFIG_ILATENCY
        {
            extern void interrupt_overhead_stop(void);

            interrupt_overhead_stop();
        }
#endif /* CONFIG_ILATENCY */
        do {
            status |= action->flags;
            action->handler(irq, action->dev_id, regs);
            action = action->next;
        } while (action);

        if (status & SA_SAMPLE_RANDOM)
            add_interrupt_randomness(irq);
        local_irq_disable();

        if (!desc->nomask && desc->enabled) {
            spin_lock(&irq_controller_lock);
            desc->unmask(irq);
            spin_unlock(&irq_controller_lock);
        }
    }

    /*
     * Debug measure - hopefully we can continue if an
     * IRQ lockup problem occurs...
     */
    check_irq_lock(desc, irq, regs);

    irq_exit(cpu, irq);
    TRACE_IRQ_EXIT();

    if (!in_irq())
        preempt_lock_stop();

    if (softirq_pending(cpu))
        do_softirq();
#ifdef CONFIG_ILATENCY
    /*
     * until entry.S gets this call do it here.
     */
    intr_ret_from_exception();
#endif /* CONFIG_ILATENCY */
    return;

bad_irq:
    irq_err_count += 1;
    printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
    return;
}
Пример #20
0
void rt_timer_interrupt(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int cpuA = ((cputoslice(cpu)) == 0);
	int irq = 7;				/* XXX Assign number */

	write_lock(&xtime_lock);

again:
	LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0);	/* Ack  */
	ct_cur[cpu] += CYCLES_PER_JIFFY;
	LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);

	if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
		goto again;

	kstat.irqs[cpu][irq]++;		/* kstat only for bootcpu? */

	if (cpu == 0)
		do_timer(regs);

#ifdef CONFIG_SMP
	{
		int user = user_mode(regs);

		/*
		 * update_process_times() expects us to have done irq_enter().
		 * Besides, if we don't timer interrupts ignore the global
		 * interrupt lock, which is the WrongThing (tm) to do.
		 * Picked from i386 code.
		 */
		irq_enter(cpu, 0);
		update_process_times(user);
		irq_exit(cpu, 0);
	}
#endif /* CONFIG_SMP */
	
	/*
	 * If we have an externally synchronized Linux clock, then update
	 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to when a second starts.
	 */
	if ((time_status & STA_UNSYNC) == 0 &&
	    xtime.tv_sec > last_rtc_update + 660) {
		if (xtime.tv_usec >= 1000000 - ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec + 1) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		} else if (xtime.tv_usec <= ((unsigned) tick) / 2) {
			if (set_rtc_mmss(xtime.tv_sec) == 0)
				last_rtc_update = xtime.tv_sec;
			else    
				last_rtc_update = xtime.tv_sec - 600;
		}
        }

	write_unlock(&xtime_lock);

	if (softirq_pending(cpu))
		do_softirq();
}
Пример #21
0
void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
    if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
        smp_send_event_check_cpu(cpu);
}
Пример #22
0
void raise_softirq(unsigned int nr)
{
    set_bit(nr, &softirq_pending(smp_processor_id()));
}
Пример #23
0
/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 * We set it up to overflow again in 1/HZ seconds.
 */
int timer_interrupt(struct pt_regs * regs)
{
	int next_dec;
	unsigned long cpu = smp_processor_id();
	unsigned jiffy_stamp = last_jiffy_stamp(cpu);
	extern void do_IRQ(struct pt_regs *);

	if (atomic_read(&ppc_n_lost_interrupts) != 0)
		do_IRQ(regs);

	hardirq_enter(cpu);

	while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) < 0) {
		jiffy_stamp += tb_ticks_per_jiffy;
		if (!user_mode(regs))
			ppc_do_profile(instruction_pointer(regs));
		if (unlikely(!heartbeat_count(cpu)--)
				&& heartbeat_reset(cpu)) {
			ppc_md.heartbeat();
			heartbeat_count(cpu) = heartbeat_reset(cpu);
		}
	  	if (cpu)
			continue;

		/* We are in an interrupt, no need to save/restore flags */
		write_lock(&xtime_lock);
		tb_last_stamp = jiffy_stamp;
		do_timer(regs);

		/*
		 * update the rtc when needed, this should be performed on the
		 * right fraction of a second. Half or full second ?
		 * Full second works on mk48t59 clocks, others need testing.
		 * Note that this update is basically only used through
		 * the adjtimex system calls. Setting the HW clock in
		 * any other way is a /dev/rtc and userland business.
		 * This is still wrong by -0.5/+1.5 jiffies because of the
		 * timer interrupt resolution and possible delay, but here we
		 * hit a quantization limit which can only be solved by higher
		 * resolution timers and decoupling time management from timer
		 * interrupts. This is also wrong on the clocks
		 * which require being written at the half second boundary.
		 * We should have an rtc call that only sets the minutes and
		 * seconds like on Intel to avoid problems with non UTC clocks.
		 */
		if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 &&
		     xtime.tv_sec - last_rtc_update >= 659 &&
		     abs(xtime.tv_usec - (1000000-1000000/HZ)) < 500000/HZ &&
		     jiffies - wall_jiffies == 1) {
		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
				last_rtc_update = xtime.tv_sec+1;
			else
				/* Try again one minute later */
				last_rtc_update += 60;
		}
		write_unlock(&xtime_lock);
	}
	if (!disarm_decr[cpu])
		set_dec(next_dec);
	last_jiffy_stamp(cpu) = jiffy_stamp;

#ifdef CONFIG_SMP
	smp_local_timer_interrupt(regs);
#endif /* CONFIG_SMP */

	hardirq_exit(cpu);

	if (softirq_pending(cpu))
		do_softirq();

	return 1; /* lets ret_from_int know we can do checks */
}