static __init unsigned int sead3_measure_hpt_freq(void) { void __iomem *status_reg = (void __iomem *)0xbf000410; unsigned int freq, orig, tick = 0; unsigned long flags; local_irq_save(flags); orig = readl(status_reg) & 0x2; /* get original sample */ /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ write_c0_count(0); /* wait 1 second (the sampling clock transitions every 10ms) */ while (tick < 100) { /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ tick++; } freq = read_c0_count(); local_irq_restore(flags); return freq; }
void synchronise_count_slave(int cpu) { int i; /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready */ for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); while (atomic_read(&count_count_start) != 2) mb(); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); atomic_inc(&count_count_stop); while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); }
void __init plat_time_init(void) { unsigned long hz = 0; /* */ write_c0_count(0); write_c0_compare(0xffff); switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2; break; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2; break; #endif } if (!hz) hz = 100000000; /* */ mips_hpt_frequency = hz; }
void __init ip32_timer_setup (struct irqaction *irq) { u64 crime_time; u32 cc_tick; write_c0_count(0); irq->handler = cc_timer_interrupt; printk("Calibrating system timer... "); crime_time = crime_read_64(CRIME_TIME) & CRIME_TIME_MASK; cc_tick = read_c0_count(); while ((crime_read_64 (CRIME_TIME) & CRIME_TIME_MASK) - crime_time < WAIT_MS * 1000000 / CRIME_NS_PER_TICK) ; cc_tick = read_c0_count() - cc_tick; cc_interval = cc_tick / HZ * (1000 / WAIT_MS); /* * The round-off seems unnecessary; in testing, the error of the * above procedure is < 100 ticks, which means it gets filtered * out by the HZ adjustment. */ cc_interval = (cc_interval / PER_MHZ) * PER_MHZ; printk("%d MHz CPU detected\n", (int) (cc_interval / PER_MHZ)); setup_irq (CLOCK_IRQ, irq); #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) /* Set ourselves up for future interrupts */ write_c0_compare(read_c0_count() + cc_interval); change_c0_status(ST0_IM, ALLINTS); local_irq_enable(); }
void __cpuinit synchronise_count_slave(int cpu) { int i; unsigned int initcount; /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready */ while (atomic_read(&count_start_flag) != cpu) mb(); /* Count will be initialised to next expire for all CPU's */ initcount = atomic_read(&count_reference); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); while (atomic_read(&count_count_start) != 2) mb(); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); atomic_inc(&count_count_stop); while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); }
void __init plat_time_init(void) { printk(KERN_INFO "Calibrating system timer... "); write_c0_count(0); crime->timer = 0; while (crime->timer < CRIME_MASTER_FREQ * WAIT_MS / 1000) ; mips_hpt_frequency = read_c0_count() * 1000 / WAIT_MS; printk("%d MHz CPU detected\n", mips_hpt_frequency * 2 / 1000000); }
__init void plat_time_init(void) { unsigned int configPR; unsigned int n; unsigned int m; unsigned int p; unsigned int pow2p; pnx8xxx_clockevent.cpumask = cpu_none_mask; clockevents_register_device(&pnx8xxx_clockevent); clocksource_register(&pnx_clocksource); /* Timer 1 start */ configPR = read_c0_config7(); configPR &= ~0x00000008; write_c0_config7(configPR); /* Timer 2 start */ configPR = read_c0_config7(); configPR &= ~0x00000010; write_c0_config7(configPR); /* Timer 3 stop */ configPR = read_c0_config7(); configPR |= 0x00000020; write_c0_config7(configPR); /* PLL0 sets MIPS clock (PLL1 <=> TM1, PLL6 <=> TM2, PLL5 <=> mem) */ /* (but only if CLK_MIPS_CTL select value [bits 3:1] is 1: FIXME) */ n = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_N_MASK) >> 16; m = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_M_MASK) >> 8; p = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_P_MASK) >> 2; pow2p = (1 << p); db_assert(m != 0 && pow2p != 0); /* * Compute the frequency as in the PNX8550 User Manual 1.0, p.186 * (a.k.a. 8-10). Divide by HZ for a timer offset that results in * HZ timer interrupts per second. */ mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); cpj = (mips_hpt_frequency + HZ / 2) / HZ; write_c0_count(0); timer_ack(); /* Setup Timer 2 */ write_c0_count2(0); write_c0_compare2(0xffffffff); setup_irq(PNX8550_INT_TIMER1, &pnx8xxx_timer_irq); setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction); }
/** * This is the timer interrupt service routine. */ void rt_hw_timer_handler() { unsigned int count; count = read_c0_compare(); write_c0_compare(count); write_c0_count(0); /* increase a OS tick */ rt_tick_increase(); }
void __init time_init(void) { #ifdef CONFIG_HR_SCHED_CLOCK if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug()) write_c0_count(0); #endif plat_time_init(); if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug()) init_mips_clocksource(); }
static __init void brcm_timer_setup(struct irqaction *irq) { unsigned int count; /* Connect the timer interrupt */ irq->dev_id = (void *) irq; setup_irq(BCM_LINUX_SYSTIMER_IRQ, irq); /* Generate first timer interrupt */ count = read_c0_count(); write_c0_count(count + 1000); }
void __cpuinit synchronise_count_master(void) { int i; unsigned long flags; unsigned int initcount; int nslaves; #ifdef CONFIG_MIPS_MT_SMTC return; #endif printk(KERN_INFO "Synchronize counters across %u CPUs: ", num_online_cpus()); local_irq_save(flags); atomic_set(&count_reference, read_c0_count()); atomic_set(&count_start_flag, 1); smp_wmb(); initcount = read_c0_count(); nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { while (atomic_read(&count_count_start) != nslaves) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); atomic_inc(&count_count_start); if (i == NR_LOOPS-1) write_c0_count(initcount); while (atomic_read(&count_count_stop) != nslaves) mb(); atomic_set(&count_count_start, 0); smp_wmb(); atomic_inc(&count_count_stop); } write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); printk("done.\n"); }
/* * Estimate CPU frequency. Sets mips_counter_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int count; #ifdef CONFIG_MIPS_SEAD /* * The SEAD board doesn't have a real time clock, so we can't * really calculate the timer frequency * For now we hardwire the SEAD board frequency to 12MHz. */ if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) || (prid == (PRID_COMP_MIPS | PRID_IMP_25KF))) count = 12000000; else count = 6000000; #endif #if defined(CONFIG_MIPS_ATLAS) || defined(CONFIG_MIPS_MALTA) unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); #endif mips_hpt_frequency = count; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; count += 5000; /* round */ count -= count%10000; return count; }
void __cpuinit synchronise_count_slave(void) { int i; unsigned long flags; unsigned int initcount; int ncpus; #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC needs to synchronise per VPE, not per CPU * ignore for now */ return; #endif local_irq_save(flags); /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready */ while (!atomic_read(&count_start_flag)) mb(); /* Count will be initialised to next expire for all CPU's */ initcount = atomic_read(&count_reference); ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); while (atomic_read(&count_count_start) != ncpus) mb(); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); atomic_inc(&count_count_stop); while (atomic_read(&count_count_stop) != ncpus) mb(); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); }
/* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int count; #if 1 /* * hardwire the board frequency to 12MHz. */ if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) || (prid == (PRID_COMP_MIPS | PRID_IMP_25KF))) count = 12000000; else count = 6000000; #else unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); #endif mips_hpt_frequency = count; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; count += 5000; /* round */ count -= count%10000; return count; }
void __init bcm947xx_time_init(void) { unsigned int hz; extifregs_t *eir; /* * Use deterministic values for initial counter interrupt * so that calibrate delay avoids encountering a counter wrap. */ write_c0_count(0); write_c0_compare(0xffff); if (!(hz = sb_mips_clock(sbh))) hz = 100000000; #if defined(CONFIG_BCM94702_CPCI) || defined(CONFIG_BCM94704_CPCI) /* Init RTC */ rtc17xx_tod_init(); rtc17xx_tod_print(); /* Use RTC from local bus */ rtc_get_time = rtc17xx_get_time; rtc_set_time = rtc17xx_set_time; #endif printk("CPU: BCM%04x rev %d at %d MHz\n", sb_chip(sbh), sb_chiprev(sbh), (hz + 500000) / 1000000); /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */ mips_hpt_frequency = hz / 2; /* Set watchdog interval in ms */ watchdog = simple_strtoul(nvram_safe_get("watchdog"), NULL, 0); /* Set panic timeout in seconds */ panic_timeout = watchdog / 1000; /* Setup blink */ if ((eir = sb_setcore(sbh, SB_EXTIF, 0))) { sbconfig_t *sb = (sbconfig_t *) ((unsigned int) eir + SBCONFIGOFF); unsigned long base = EXTIF_CFGIF_BASE(sb_base (readl((void *) (&sb->sbadmatch1)))); mcr = (u8 *) ioremap_nocache(base + UART_MCR, 1); } }
int timer_init(void) { #ifdef CONFIG_MIPS_CPU_PR4450 unsigned int config7; /* enable and start counter */ config7 = read_c0_config7(); config7 &= ~0x00000008; write_c0_config7(config7); #endif // CONFIG_MIPS_CPU_PR4450 /* Set up the timer for the first expiration. */ timestamp = 0; write_c0_count(0); write_c0_compare(~0); return 0; }
void __init bcm47xx_time_init(void) { unsigned long hz; /* * Use deterministic values for initial counter interrupt * so that calibrate delay avoids encountering a counter wrap. */ write_c0_count(0); write_c0_compare(0xffff); hz = ssb_cpu_clock(&ssb.mipscore) / 2; if (!hz) hz = 100000000; /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */ mips_hpt_frequency = hz; }
/* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect. */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int tick = 0; unsigned int freq; unsigned int orig; unsigned long flags; local_irq_save(flags); orig = readl(status_reg) & 0x2; /* get original sample */ /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ write_c0_count(0); /* wait 1 second (the sampling clock transitions every 10ms) */ while (tick < 100) { /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ tick++; } freq = read_c0_count(); local_irq_restore(flags); mips_hpt_frequency = freq; /* Adjust for processor */ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) freq *= 2; freq += 5000; /* rounding */ freq -= freq%10000; return freq ; }
void __init time_init(void) { #ifdef CONFIG_MET #ifdef CONFIG_CPU_SUPPORTS_HR_SCHED_CLOCK if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug()) write_c0_count(0); #endif #endif plat_time_init(); /* * The use of the R4k timer as a clock event takes precedence; * if reading the Count register might interfere with the timer * interrupt, then we don't use the timer as a clock source. * We may still use the timer as a clock source though if the * timer interrupt isn't reliable; the interference doesn't * matter then, because we don't use the interrupt. */ if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug()) init_mips_clocksource(); }
/* * Figure out the r4k offset, the amount to increment the compare * register for each time tick. * Use the Programmable Counter 1 to do this. */ unsigned long cal_r4koff(void) { unsigned long count; unsigned long cpu_speed; unsigned long start, end; unsigned long counter; int trim_divide = 16; unsigned long flags; spin_lock_irqsave(&time_lock, flags); counter = au_readl(SYS_COUNTER_CNTRL); au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); au_writel(trim_divide-1, SYS_RTCTRIM); /* RTC now ticks at 32.768/16 kHz */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); au_writel (0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); start = au_readl(SYS_RTCREAD); start += 2; /* wait for the beginning of a new tick */ while (au_readl(SYS_RTCREAD) < start); /* Start r4k counter. */ write_c0_count(0); end = start + (32768 / trim_divide)/2; /* wait 0.5 seconds */ while (end > au_readl(SYS_RTCREAD)); count = read_c0_count(); cpu_speed = count * 2; mips_counter_frequency = count; set_au1x00_uart_baud_base(((cpu_speed) / 4) / 16); spin_unlock_irqrestore(&time_lock, flags); return (cpu_speed / HZ); }
void __cpuinit synchronise_count_slave(void) { int i; unsigned long flags; unsigned int initcount; int ncpus; #ifdef CONFIG_MIPS_MT_SMTC return; #endif local_irq_save(flags); while (!atomic_read(&count_start_flag)) mb(); initcount = atomic_read(&count_reference); ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); while (atomic_read(&count_count_start) != ncpus) mb(); if (i == NR_LOOPS-1) write_c0_count(initcount); atomic_inc(&count_count_stop); while (atomic_read(&count_count_stop) != ncpus) mb(); } write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); }
//void __init bsp_timer_init(void) void __init plat_time_init(void) { unsigned int ocp; unsigned int cpu_freq_sel; /* set cp0_compare_irq and cp0_perfcount_irq */ #if 0 cp0_compare_irq = BSP_COMPARE_IRQ; //mark_bb , wana rm !! cp0_perfcount_irq = BSP_PERFCOUNT_IRQ; if (cp0_perfcount_irq == cp0_compare_irq) cp0_perfcount_irq = -1; #endif //write_c0_count(0); //mark_bb // mips_hpt_frequency = BSP_CPU0_FREQ / 2; cpu_freq_sel=GET_BITVAL(REG32(SYS_HW_STRAP), ST_CPU_FREQ_SEL_OFFSET, RANG4); ocp=cpu_clksel_table[cpu_freq_sel] * 1000000; mips_hpt_frequency = ocp / 2; write_c0_count(0); //need //mips_clockevent_init(cp0_compare_irq); // mark_bb , no need //mips_clocksource_init(); }
/* * Figure out the r4k offset, the amount to increment the compare * register for each time tick. * Use the RTC to calculate offset. */ static unsigned long __init cal_r4koff(void) { unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); mips_hpt_frequency = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); return (mips_hpt_frequency / HZ); }
/* * Estimate CPU frequency. Sets mips_counter_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int count; #ifdef CONFIG_MIPS_SEAD /* * The SEAD board doesn't have a real time clock, so we can't * really calculate the timer frequency * For now we hardwire the SEAD board frequency to 12MHz. */ if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) || (prid == (PRID_COMP_MIPS | PRID_IMP_25KF))) count = 12000000; else count = 6000000; #endif #if defined(CONFIG_MIPS_ATLAS) || defined(CONFIG_MIPS_MALTA) unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); #endif #if defined(CONFIG_MIPS_AVALANCHE_SOC) { char *cpu_freq_ptr; cpu_freq_ptr = prom_getenv("cpufrequency"); if(!cpu_freq_ptr) { cpu_freq = CONFIG_CPU_FREQUENCY_AVALANCHE * 1000000 ; } else { cpu_freq = simple_strtol(cpu_freq_ptr,NULL,0); } #ifdef CONFIG_HIGH_RES_TIMERS count = cpu_freq; #else count = cpu_freq/2; #endif } #endif mips_hpt_frequency = count; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; count += 5000; /* round */ count -= count%10000; return count; }
/* * We read the real processor speed from the PLL. This is important * because it is more accurate than computing it from the 32KHz * counter, if it exists. If we don't have an accurate processor * speed, all of the peripherals that derive their clocks based on * this advertised speed will introduce error and sometimes not work * properly. This function is futher convoluted to still allow configurations * to do that in case they have really, really old silicon with a * write-only PLL register, that we need the 32KHz when power management * "wait" is enabled, and we need to detect if the 32KHz isn't present * but requested......got it? :-) -- Dan */ unsigned long cal_r4koff(void) { unsigned long cpu_speed; unsigned long flags; unsigned long counter; spin_lock_irqsave(&time_lock, flags); /* Power management cares if we don't have a 32KHz counter. */ no_au1xxx_32khz = 0; counter = au_readl(SYS_COUNTER_CNTRL); if (counter & SYS_CNTRL_E0) { int trim_divide = 16; au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); /* RTC now ticks at 32.768/16 kHz */ au_writel(trim_divide-1, SYS_RTCTRIM); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); au_writel (0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); #if defined(CONFIG_AU1000_USE32K) { unsigned long start, end, count; start = au_readl(SYS_RTCREAD); start += 2; /* wait for the beginning of a new tick */ while (au_readl(SYS_RTCREAD) < start); /* Start r4k counter. */ write_c0_count(0); /* Wait 0.5 seconds. */ end = start + (32768 / trim_divide)/2; while (end > au_readl(SYS_RTCREAD)); count = read_c0_count(); cpu_speed = count * 2; } #else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; #endif } else { /* The 32KHz oscillator isn't running, so assume there * isn't one and grab the processor speed from the PLL. * NOTE: some old silicon doesn't allow reading the PLL. */ cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; no_au1xxx_32khz = 1; } mips_hpt_frequency = cpu_speed; // Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)&0x03) + 2) * 16)); spin_unlock_irqrestore(&time_lock, flags); return (cpu_speed / HZ); }
void reset_timer(void) { /* Set up the timer for the first expiration. */ write_c0_count(0); write_c0_compare(~0); }
void __cpuinit synchronise_count_master(void) { int i; unsigned long flags; unsigned int initcount; int nslaves; #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC needs to synchronise per VPE, not per CPU * ignore for now */ return; #endif printk(KERN_INFO "Synchronize counters across %u CPUs: ", num_online_cpus()); local_irq_save(flags); /* * Notify the slaves that it's time to start */ atomic_set(&count_reference, read_c0_count()); atomic_set(&count_start_flag, 1); smp_wmb(); /* Count will be initialised to current timer for all CPU's */ initcount = read_c0_count(); /* * We loop a few times to get a primed instruction cache, * then the last pass is more or less synchronised and * the master and slaves each set their cycle counters to a known * value all at once. This reduces the chance of having random offsets * between the processors, and guarantees that the maximum * delay between the cycle counters is never bigger than * the latency of information-passing (cachelines) between * two CPUs. */ nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { /* slaves loop on '!= ncpus' */ while (atomic_read(&count_count_start) != nslaves) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); /* this lets the slaves write their count register */ atomic_inc(&count_count_start); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); /* * Wait for all slaves to leave the synchronization point: */ while (atomic_read(&count_count_stop) != nslaves) mb(); atomic_set(&count_count_start, 0); smp_wmb(); atomic_inc(&count_count_stop); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); /* * i386 code reported the skew here, but the * count registers were almost certainly out of sync * so no point in alarming people */ printk("done.\n"); }
void synchronise_count_master(int cpu) { int i; unsigned long flags; printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); /* * We loop a few times to get a primed instruction cache, * then the last pass is more or less synchronised and * the master and slaves each set their cycle counters to a known * value all at once. This reduces the chance of having random offsets * between the processors, and guarantees that the maximum * delay between the cycle counters is never bigger than * the latency of information-passing (cachelines) between * two CPUs. */ for (i = 0; i < NR_LOOPS; i++) { /* slaves loop on '!= 2' */ while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); /* Let the slave writes its count register */ atomic_inc(&count_count_start); /* Count will be initialised to current timer */ if (i == 1) initcount = read_c0_count(); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); /* * Wait for slave to leave the synchronization point: */ while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); atomic_inc(&count_count_stop); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); /* * i386 code reported the skew here, but the * count registers were almost certainly out of sync * so no point in alarming people */ printk("done.\n"); }
/** * This function will initial OS timer */ void rt_hw_timer_init() { write_c0_compare(CPU_HZ/2/RT_TICK_PER_SECOND); write_c0_count(0); }