irqreturn_t smtc_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int vpflags;

	if (read_c0_cause() & (1 << 30)) {
		/* If timer interrupt, make it de-assert */
		write_c0_compare (read_c0_count() - 1);

                vpflags = dvpe();
                clear_c0_cause(0x100<<7);
                evpe(vpflags);

		/*
		 * There are things we only want to do once per tick
		 * in an "MP" system.   One TC of each VPE will take
		 * the actual timer interrupt.  The others will get
		 * timer broadcast IPIs. We use whoever it is that takes
		 * the tick on VPE 0 to run the full timer_interrupt().
		 */
		if (cpu_data[cpu].vpe_id == 0) {
				timer_interrupt(irq, NULL, regs);
				smtc_timer_broadcast(cpu_data[cpu].vpe_id);

		} else {
			write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
			local_timer_interrupt(irq, dev_id, regs);
			smtc_timer_broadcast(cpu_data[cpu].vpe_id);
		}
	}

	return IRQ_HANDLED;
}
示例#2
0
int c0_compare_int_usable(void)
{
    unsigned int delta;
    unsigned int cnt;

#ifdef CONFIG_KVM_GUEST
    return 1;
#endif

    /*
     * IP7 already pending?	 Try to clear it by acking the timer.
     */
    if (c0_compare_int_pending()) {
        cnt = read_c0_count();
        write_c0_compare(cnt);
        back_to_back_c0_hazard();
        while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
            if (!c0_compare_int_pending())
                break;
        if (c0_compare_int_pending())
            return 0;
    }

    for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
        cnt = read_c0_count();
        cnt += delta;
        write_c0_compare(cnt);
        back_to_back_c0_hazard();
        if ((int)(read_c0_count() - cnt) < 0)
            break;
        /* increase delta if the timer was already expired */
    }

    while ((int)(read_c0_count() - cnt) <= 0)
        ;	/* Wait for expiry  */

    while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
        if (c0_compare_int_pending())
            break;
    if (!c0_compare_int_pending())
        return 0;
    cnt = read_c0_count();
    write_c0_compare(cnt);
    back_to_back_c0_hazard();
    while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
        if (!c0_compare_int_pending())
            break;
    if (c0_compare_int_pending())
        return 0;

    /*
     * Feels like a real count / compare timer.
     */
    return 1;
}
int c0_compare_int_usable(void)
{
	unsigned int delta;
	unsigned int cnt;

	/*
                                                              
  */
	if (c0_compare_int_pending()) {
		cnt = read_c0_count();
		write_c0_compare(cnt);
		back_to_back_c0_hazard();
		while (read_c0_count() < (cnt  + COMPARE_INT_SEEN_TICKS))
			if (!c0_compare_int_pending())
				break;
		if (c0_compare_int_pending())
			return 0;
	}

	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
		cnt = read_c0_count();
		cnt += delta;
		write_c0_compare(cnt);
		back_to_back_c0_hazard();
		if ((int)(read_c0_count() - cnt) < 0)
		    break;
		/*                                                 */
	}

	while ((int)(read_c0_count() - cnt) <= 0)
		;	/*                  */

	while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
		if (c0_compare_int_pending())
			break;
	if (!c0_compare_int_pending())
		return 0;
	cnt = read_c0_count();
	write_c0_compare(cnt);
	back_to_back_c0_hazard();
	while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
		if (!c0_compare_int_pending())
			break;
	if (c0_compare_int_pending())
		return 0;

	/*
                                            
  */
	return 1;
}
示例#4
0
static irqreturn_t cc_timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
	u32 count;

	/*
	 * The cycle counter is only 32 bit which is good for about
	 * a minute at current count rates of upto 150MHz or so.
	 */
	count = read_c0_count();
	timerhi += (count < timerlo);	/* Wrap around */
	timerlo = count;

	write_c0_compare((u32) (count + cc_interval));
	kstat_this_cpu.irqs[irq]++;
	do_timer(regs);

	if (!jiffies) {
		/*
		 * If jiffies has overflowed in this timer_interrupt we must
		 * update the timer[hi]/[lo] to make do_fast_gettimeoffset()
		 * quotient calc still valid. -arca
		 */
		timerhi = timerlo = 0;
	}
	return IRQ_HANDLED;
}
示例#5
0
void __init ip32_timer_setup (struct irqaction *irq)
{
	u64 crime_time;
	u32 cc_tick;


	write_c0_count(0);
	irq->handler = cc_timer_interrupt;

	printk("Calibrating system timer... ");

	crime_time = crime_read_64(CRIME_TIME) & CRIME_TIME_MASK;
	cc_tick = read_c0_count();

	while ((crime_read_64 (CRIME_TIME) & CRIME_TIME_MASK) - crime_time
		< WAIT_MS * 1000000 / CRIME_NS_PER_TICK)
		;
	cc_tick = read_c0_count() - cc_tick;
	cc_interval = cc_tick / HZ * (1000 / WAIT_MS);
	/*
	 * The round-off seems unnecessary; in testing, the error of the
	 * above procedure is < 100 ticks, which means it gets filtered
	 * out by the HZ adjustment.
	 */
	cc_interval = (cc_interval / PER_MHZ) * PER_MHZ;

	printk("%d MHz CPU detected\n", (int) (cc_interval / PER_MHZ));

	setup_irq (CLOCK_IRQ, irq);
#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
	/* Set ourselves up for future interrupts */
	write_c0_compare(read_c0_count() + cc_interval);
        change_c0_status(ST0_IM, ALLINTS);
	local_irq_enable();
}
示例#6
0
文件: time.c 项目: ivucica/linux
void __init plat_timer_setup(struct irqaction *irq)
{
#ifdef MSC01E_INT_BASE
	if (cpu_has_veic) {
		set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
		mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
	} else
#endif
	{
		if (cpu_has_vint)
			set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
		mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
	}


	/* we are using the cpu counter for timer interrupts */
	irq->handler = mips_timer_interrupt;	/* we use our own handler */
#ifdef CONFIG_MIPS_MT_SMTC
	setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
#else
	setup_irq(mips_cpu_timer_irq, irq);
#endif /* CONFIG_MIPS_MT_SMTC */

#ifdef CONFIG_SMP
	/* irq_desc(riptor) is a global resource, when the interrupt overlaps
	   on seperate cpu's the first one tries to handle the second interrupt.
	   The effect is that the int remains disabled on the second cpu.
	   Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
	irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
	set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif

        /* to generate the first timer interrupt */
	write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
}
示例#7
0
void __init plat_time_init(void)
{
	unsigned long hz = 0;

	/*
                                                          
                                                               
  */
	write_c0_count(0);
	write_c0_compare(0xffff);

	switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
	case BCM47XX_BUS_TYPE_SSB:
		hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
		break;
#endif
#ifdef CONFIG_BCM47XX_BCMA
	case BCM47XX_BUS_TYPE_BCMA:
		hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
		break;
#endif
	}

	if (!hz)
		hz = 100000000;

	/*                                                           */
	mips_hpt_frequency = hz;
}
示例#8
0
void __cpuinit synchronise_count_slave(int cpu)
{
	int i;
	unsigned int initcount;

	/*
	 * Not every cpu is online at the time this gets called,
	 * so we first wait for the master to say everyone is ready
	 */

	while (atomic_read(&count_start_flag) != cpu)
		mb();

	/* Count will be initialised to next expire for all CPU's */
	initcount = atomic_read(&count_reference);

	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc(&count_count_start);
		while (atomic_read(&count_count_start) != 2)
			mb();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		atomic_inc(&count_count_stop);
		while (atomic_read(&count_count_stop) != 2)
			mb();
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
	const int r2 = cpu_has_mips_r2;
	struct clock_event_device *cd;
	int cpu = smp_processor_id();

	/*
                  
                                                              
                                                                
                                                     
  */
	if (handle_perf_irq(r2))
		goto out;

	/*
                                                                     
                                                                 
                                                            
  */
	if (!r2 || (read_c0_cause() & (1 << 30))) {
		/*                               */
		write_c0_compare(read_c0_compare());
		cd = &per_cpu(mips_clockevent_device, cpu);
		cd->event_handler(cd);
	}

out:
	return IRQ_HANDLED;
}
示例#10
0
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
	const int r2 = cpu_has_mips_r2;
	struct clock_event_device *cd;
	int cpu = smp_processor_id();

	/*
	 * Suckage alert:
	 * Before R2 of the architecture there was no way to see if a
	 * performance counter interrupt was pending, so we have to run
	 * the performance counter interrupt handler anyway.
	 */
	if (handle_perf_irq(r2))
		goto out;

	/*
	 * The same applies to performance counter interrupts.  But with the
	 * above we now know that the reason we got here must be a timer
	 * interrupt.  Being the paranoiacs we are we check anyway.
	 */
	if (!r2 || (read_c0_cause() & (1 << 30))) {
		/* Clear Count/Compare Interrupt */
		write_c0_compare(read_c0_compare());
		cd = &per_cpu(mips_clockevent_device, cpu);
		cd->event_handler(cd);
	}

out:
	return IRQ_HANDLED;
}
示例#11
0
int timer_init(void)
{
	/* Set up the timer for the first expiration. */
	write_c0_compare(read_c0_count() + CYCLES_PER_JIFFY);

	return 0;
}
示例#12
0
文件: sync-r4k.c 项目: 020gzh/linux
void synchronise_count_slave(int cpu)
{
	int i;

	/*
	 * Not every cpu is online at the time this gets called,
	 * so we first wait for the master to say everyone is ready
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc(&count_count_start);
		while (atomic_read(&count_count_start) != 2)
			mb();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		atomic_inc(&count_count_stop);
		while (atomic_read(&count_count_stop) != 2)
			mb();
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
}
示例#13
0
asmlinkage void plat_irq_dispatch(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned int pending;

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	pending = read_c0_cause() & read_c0_status();

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	if (pending & CAUSEF_IP7)	/* Cpu performance counter interrupt */
		sbprof_cpu_intr();
	else
#endif

	if (pending & CAUSEF_IP4)
		do_IRQ(K_BCM1480_INT_TIMER_0 + cpu);
#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		bcm1480_mailbox_interrupt();
#endif

	else if (pending & CAUSEF_IP2)
		dispatch_ip2();
}
示例#14
0
文件: clock.c 项目: TySag/project
static void reload_timer()
{
  uint32_t counter = read_c0_count();
  counter += TIMER0_INTERVAL;
  write_c0_compare(counter);
  next_compare_val = counter;
}
示例#15
0
/**
 * Last chance for the board code to finish SMP initialization before
 * the CPU is "online".
 */
static void octeon_smp_finish(void)
{
	octeon_user_io_init();

	/* to generate the first CPU timer interrupt */
	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
	local_irq_enable();
}
示例#16
0
asmlinkage void plat_irq_dispatch(void)
{
	unsigned int pending;

#ifdef CONFIG_SIBYTE_SB1250_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	/*
	 * What a pain. We have to be really careful saving the upper 32 bits
	 * of any * register across function calls if we don't want them
	 * trashed--since were running in -o32, the calling routing never saves
	 * the full 64 bits of a register across a function call.  Being the
	 * interrupt handler, we're guaranteed that interrupts are disabled
	 * during this code so we don't have to worry about random interrupts
	 * blasting the high 32 bits.
	 */

	pending = read_c0_cause() & read_c0_status() & ST0_IM;

#ifdef CONFIG_SIBYTE_SB1250_PROF
	if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */
		sbprof_cpu_intr();
	else
#endif

	if (pending & CAUSEF_IP4)
		sb1250_timer_interrupt();

#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		sb1250_mailbox_interrupt();
#endif

#ifdef CONFIG_KGDB
	else if (pending & CAUSEF_IP6)			/* KGDB (uart 1) */
		sb1250_kgdb_interrupt();
#endif

	else if (pending & CAUSEF_IP2) {
		unsigned long long mask;

		/*
		 * Default...we've hit an IP[2] interrupt, which means we've
		 * got to check the 1250 interrupt registers to figure out what
		 * to do.  Need to detect which CPU we're on, now that
		 * smp_affinity is supported.
		 */
		mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(),
		                              R_IMR_INTERRUPT_STATUS_BASE)));
		if (mask)
			do_IRQ(fls64(mask) - 1);
		else
			spurious_interrupt();
	} else
		spurious_interrupt();
}
示例#17
0
文件: time.c 项目: Cribstone/linino
void __init plat_timer_setup(struct irqaction *irq)
{
	/* we are using the cpu counter for timer interrupts */
	setup_irq(MIPS_CPU_TIMER_IRQ, irq);

	/* to generate the first timer interrupt */
	r4k_cur = (read_c0_count() + r4k_offset);
	write_c0_compare(r4k_cur);
}
示例#18
0
void __init mips_timer_setup(struct irqaction *irq)
{
	/* we are using the cpu counter for timer interrupts */
	irq->handler = no_action;     /* we use our own handler */
	setup_irq(MIPS_CPU_TIMER_IRQ, irq);

        /* to generate the first timer interrupt */
	write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
	set_c0_status(ALLINTS);
}
示例#19
0
int c0_compare_int_usable(void)
{
	unsigned int delta;
	unsigned int cnt;

	/*
	 * IP7 already pending?  Try to clear it by acking the timer.
	 */
	if (c0_compare_int_pending()) {
		write_c0_compare(read_c0_count());
		compare_change_hazard();
		if (c0_compare_int_pending())
			return 0;
	}

	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
		cnt = read_c0_count();
		cnt += delta;
		write_c0_compare(cnt);
		compare_change_hazard();
		if ((int)(read_c0_count() - cnt) < 0)
		    break;
		/* increase delta if the timer was already expired */
	}

	while ((int)(read_c0_count() - cnt) <= 0)
		;	/* Wait for expiry  */

	compare_change_hazard();
	if (!c0_compare_int_pending())
		return 0;

	write_c0_compare(read_c0_count());
	compare_change_hazard();
	if (c0_compare_int_pending())
		return 0;

	/*
	 * Feels like a real count / compare timer.
	 */
	return 1;
}
示例#20
0
void __init time_init(void)
{
        unsigned int est_freq;

	printk("calculating r4koff... ");
	r4k_offset = cal_r4koff();
	printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);

	//est_freq = 2*r4k_offset*HZ;
	est_freq = r4k_offset*HZ;
	est_freq += 5000;    /* round */
	est_freq -= est_freq%10000;
	printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
	       (est_freq%1000000)*100/1000000);
 	set_au1x00_speed(est_freq);
 	set_au1x00_lcd_clock(); // program the LCD clock
	r4k_cur = (read_c0_count() + r4k_offset);

	write_c0_compare(r4k_cur);

	/* no RTC on the pb1000 */
	xtime.tv_sec = 0;
	xtime.tv_usec = 0;

#ifdef CONFIG_PM
	/*
	 * setup counter 0, since it keeps ticking after a
	 * 'wait' instruction has been executed. The CP0 timer and
	 * counter 1 do NOT continue running after 'wait'
	 *
	 * It's too early to call request_irq() here, so we handle
	 * counter 0 interrupt as a special irq and it doesn't show
	 * up under /proc/interrupts.
	 */
	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
	au_writel(0, SYS_TOYWRITE);
	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);

	au_writel(au_readl(SYS_WAKEMSK) | (1<<8), SYS_WAKEMSK);
	au_writel(~0, SYS_WAKESRC);
	au_sync();
	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);

	/* setup match20 to interrupt once every 10ms */
	last_pc0 = last_match20 = au_readl(SYS_TOYREAD);
	au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
	au_sync();
	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
	startup_match20_interrupt();
#endif

	//set_c0_status(ALLINTS);
	au_sync();
}
示例#21
0
void mips_cpu_timer_enable(void)
{
	uint32_t sr = read_c0_status();
	sr |= ((0x1UL << 7) << 8);
	write_c0_status(sr);

	uint32_t cause = read_c0_cause();
	cause &= ~(0x1UL << 27);
	write_c0_cause(cause);
	write_c0_compare(read_c0_count() + COUNTER_TICK_COUNT);
}
示例#22
0
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
{
	unsigned int pending;

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	pending = read_c0_cause() & read_c0_status();

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	if (pending & CAUSEF_IP7)	/* Cpu performance counter interrupt */
		sbprof_cpu_intr(exception_epc(regs));
	else
#endif

	if (pending & CAUSEF_IP4)
		bcm1480_timer_interrupt(regs);

#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		bcm1480_mailbox_interrupt(regs);
#endif

#ifdef CONFIG_KGDB
	else if (pending & CAUSEF_IP6)
		bcm1480_kgdb_interrupt(regs);		/* KGDB (uart 1) */
#endif

	else if (pending & CAUSEF_IP2) {
		unsigned long long mask_h, mask_l;
		unsigned long base;

		/*
		 * Default...we've hit an IP[2] interrupt, which means we've
		 * got to check the 1480 interrupt registers to figure out what
		 * to do.  Need to detect which CPU we're on, now that
		 * smp_affinity is supported.
		 */
		base = A_BCM1480_IMR_MAPPER(smp_processor_id());
		mask_h = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
		mask_l = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));

		if (mask_h) {
			if (mask_h ^ 1)
				do_IRQ(fls64(mask_h) - 1, regs);
			else
				do_IRQ(63 + fls64(mask_l), regs);
		}
	}
}
示例#23
0
文件: board.c 项目: xianjimli/misc
/**
 * This is the timer interrupt service routine.
 */
void rt_hw_timer_handler()
{
    unsigned int count;

    count = read_c0_compare();
    write_c0_compare(count);
    write_c0_count(0);

	/* increase a OS tick */
	rt_tick_increase();
}
static int mips_next_event(unsigned long delta,
                           struct clock_event_device *evt)
{
	unsigned int cnt;
	int res;

	cnt = read_c0_count();
	cnt += delta;
	write_c0_compare(cnt);
	res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
	return res;
}
示例#25
0
void __init tx4938_timer_setup(struct irqaction *irq)
{
	u32 count;
	u32 c1;
	u32 c2;

	setup_irq(TX4938_IRQ_CPU_TIMER, irq);

	c1 = read_c0_count();
	count = c1 + (mips_hpt_frequency / HZ);
	write_c0_compare(count);
	c2 = read_c0_count();
}
示例#26
0
void __init it8172_timer_setup(struct irqaction *irq)
{
	puts("timer_setup\n");
	put32(NR_IRQS);
	puts("");
        /* we are using the cpu counter for timer interrupts */
	setup_irq(MIPS_CPU_TIMER_IRQ, irq);

        /* to generate the first timer interrupt */
	r4k_cur = (read_c0_count() + r4k_offset);
	write_c0_compare(r4k_cur);
	set_c0_status(ALLINTS);
}
示例#27
0
void __cpuinit synchronise_count_master(void)
{
	int i;
	unsigned long flags;
	unsigned int initcount;
	int nslaves;

#ifdef CONFIG_MIPS_MT_SMTC
	return;
#endif

	printk(KERN_INFO "Synchronize counters across %u CPUs: ",
	       num_online_cpus());

	local_irq_save(flags);

	atomic_set(&count_reference, read_c0_count());
	atomic_set(&count_start_flag, 1);
	smp_wmb();

	
	initcount = read_c0_count();


	nslaves = num_online_cpus()-1;
	for (i = 0; i < NR_LOOPS; i++) {
		
		while (atomic_read(&count_count_start) != nslaves)
			mb();
		atomic_set(&count_count_stop, 0);
		smp_wmb();

		
		atomic_inc(&count_count_start);

		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		while (atomic_read(&count_count_stop) != nslaves)
			mb();
		atomic_set(&count_count_start, 0);
		smp_wmb();
		atomic_inc(&count_count_stop);
	}
	
	write_c0_compare(read_c0_count() + COUNTON);

	local_irq_restore(flags);

	printk("done.\n");
}
示例#28
0
void __cpuinit synchronise_count_slave(void)
{
	int i;
	unsigned long flags;
	unsigned int initcount;
	int ncpus;

#ifdef CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC needs to synchronise per VPE, not per CPU
	 * ignore for now
	 */
	return;
#endif

	local_irq_save(flags);

	/*
	 * Not every cpu is online at the time this gets called,
	 * so we first wait for the master to say everyone is ready
	 */

	while (!atomic_read(&count_start_flag))
		mb();

	/* Count will be initialised to next expire for all CPU's */
	initcount = atomic_read(&count_reference);

	ncpus = num_online_cpus();
	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc(&count_count_start);
		while (atomic_read(&count_count_start) != ncpus)
			mb();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		atomic_inc(&count_count_stop);
		while (atomic_read(&count_count_stop) != ncpus)
			mb();
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);

	local_irq_restore(flags);
}
示例#29
0
void __init  plat_timer_setup(struct irqaction *irq)
{
	/* Connect the timer interrupt */
	irq->dev_id = (void *) irq;
	setup_irq(BCM_LINUX_SYSTIMER_IRQ, irq);

#ifdef	CONFIG_MIPS_MT_SMTC 
	irq->handler = smtc_timer_interrupt;
	irq_desc[BCM_LINUX_SYSTIMER_IRQ].status &= ~IRQ_DISABLED;
	irq_desc[BCM_LINUX_SYSTIMER_IRQ].status |= IRQ_PER_CPU;
#endif

	/* Generate first timer interrupt */
	write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
}
示例#30
0
ulong get_timer(ulong base)
{
	unsigned int count;
	unsigned int expirelo = read_c0_compare();

	/* Check to see if we have missed any timestamps. */
	count = read_c0_count();
	while ((count - expirelo) < 0x7fffffff) {
		expirelo += CYCLES_PER_JIFFY;
		timestamp++;
	}
	write_c0_compare(expirelo);

	return timestamp - base;
}