/* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int count; #if 1 /* * hardwire the board frequency to 12MHz. */ if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) || (prid == (PRID_COMP_MIPS | PRID_IMP_25KF))) count = 12000000; else count = 6000000; #else unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); #endif mips_hpt_frequency = count; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; count += 5000; /* round */ count -= count%10000; return count; }
static int vflash_block_markbad(int partition, int offset) { ItcRpcMsg req; // Construct a request message memset((void *)&req, 0, sizeof(req)); req.dev_func = DEV_FUNC(REMOTE_FLASH_DEVICE_ID, REMOTE_BLOCK_MARKBAD, partition, 0); req.xid = read_c0_count(); req.u0 = 0; req.u1 = offset; #if DEBUG_DQM_IO printk("%s partition %d offset %08x\n", __func__, partition, offset); #endif return do_rpc_io(&req); }
static ssize_t ath_ioc_read(struct file *file, char *buf, size_t count, loff_t * ppos) { unsigned int c0, c1, ticks = (read_c0_count() - clocks_at_start); char str[256]; unsigned int secs = ticks / mips_hpt_frequency; read_cntrs(&c0, &c1); stop_cntrs(); sprintf(str, "%d secs (%#x) event0:%#x event1:%#x", secs, ticks, c0, c1); copy_to_user(buf, str, strlen(str)); return (strlen(str)); }
int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; if (c0_compare_int_pending()) { cnt = read_c0_count(); write_c0_compare(cnt); back_to_back_c0_hazard(); while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) if (!c0_compare_int_pending()) break; if (c0_compare_int_pending()) return 0; } for (delta = 0x10; delta <= 0x400000; delta <<= 1) { cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); back_to_back_c0_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; } while ((int)(read_c0_count() - cnt) <= 0) ; while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) if (c0_compare_int_pending()) break; if (!c0_compare_int_pending()) return 0; cnt = read_c0_count(); write_c0_compare(cnt); back_to_back_c0_hazard(); while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) if (!c0_compare_int_pending()) break; if (c0_compare_int_pending()) return 0; return 1; }
static int pnx8550_timers_read (char* page, char** start, off_t offset, int count, int* eof, void* data) { int len = 0; int configPR = read_c0_config7(); if (offset==0) { len += sprintf(&page[len],"Timer: count, compare, tc, status\n"); len += sprintf(&page[len]," 1: %11i, %8i, %1i, %s\n", read_c0_count(), read_c0_compare(), (configPR>>6)&0x1, ((configPR>>3)&0x1)? "off":"on"); len += sprintf(&page[len]," 2: %11i, %8i, %1i, %s\n", read_c0_count2(), read_c0_compare2(), (configPR>>7)&0x1, ((configPR>>4)&0x1)? "off":"on"); len += sprintf(&page[len]," 3: %11i, %8i, %1i, %s\n", read_c0_count3(), read_c0_compare3(), (configPR>>8)&0x1, ((configPR>>5)&0x1)? "off":"on"); }
void handle_mips_systick(void) { /* clear EXL from status */ uint32_t sr = read_c0_status(); sr &= ~0x00000002; write_c0_status(sr); /* Call the interrupt entry routine */ atomIntEnter(); /* Call the OS system tick handler */ atomTimerTick(); write_c0_compare(read_c0_count() + COUNTER_TICK_COUNT); /* Call the interrupt exit routine */ atomIntExit(TRUE); }
/* Early setup - runs on TP1 after cache probe */ static void brcmstb_init_secondary(void) { #if defined(CONFIG_BMIPS4380) unsigned long cbr = BMIPS_GET_CBR(); unsigned long old_vec = DEV_RD(cbr + BMIPS_RELO_VECTOR_CONTROL_1); /* make sure the NMI vector is in kseg0 now that we've booted */ DEV_WR_RB(cbr + BMIPS_RELO_VECTOR_CONTROL_1, old_vec & ~0x20000000); #elif defined(CONFIG_BMIPS5000) write_c0_brcm_bootvec(read_c0_brcm_bootvec() & ~0x20000000); #endif brcmstb_ack_ipi(0); write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); /* hw irq lines 3+4 (gfap) goes to tp0 (secondary thread) */ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5 | ST0_IE); irq_enable_hazard(); }
ulong get_timer(ulong base) { unsigned int count; unsigned int expirelo = read_c0_compare(); /* Check to see if we have missed any timestamps. */ count = read_c0_count(); asm("sync"); while ((count - expirelo) < 0x7fffffff) { asm("sync"); expirelo += CYCLES_PER_JIFFY; asm("sync"); timestamp++; } asm("sync"); write_c0_compare(expirelo); return (timestamp - base); }
static int vflash_get_device_info(vflash_info_t *info) { ItcRpcMsg req; #if DEBUG_DQM_IO printk("-->%s info=%p\n", __func__, (void*)info); #endif // Construct a request message memset((void *)&req, 0, sizeof(req)); req.dev_func = DEV_FUNC(REMOTE_FLASH_DEVICE_ID, REMOTE_GET_DEV_INFO, LINUX_KERNEL_ROOTFS_PARTITION, sizeof(struct vflash_info_t)); req.xid = read_c0_count(); req.u0 = (uint32)info; req.u1 = sizeof(struct vflash_info_t); bcm_cache_inv((uint32)info, sizeof(struct vflash_info_t)); return do_rpc_io(&req); }
/* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect. */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int tick = 0; unsigned int freq; unsigned int orig; unsigned long flags; local_irq_save(flags); orig = readl(status_reg) & 0x2; /* get original sample */ /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ write_c0_count(0); /* wait 1 second (the sampling clock transitions every 10ms) */ while (tick < 100) { /* wait for transition */ while ((readl(status_reg) & 0x2) == orig) ; orig = orig ^ 0x2; /* flip the bit */ tick++; } freq = read_c0_count(); local_irq_restore(flags); mips_hpt_frequency = freq; /* Adjust for processor */ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) freq *= 2; freq += 5000; /* rounding */ freq -= freq%10000; return freq ; }
static void stop_hardirq_count(void) { unsigned int end_cnt = read_c0_count(); struct kernel_stat_shadow *ks_shadow; ks_shadow = &per_cpu(kstat_shadow, smp_processor_id()); ks_shadow->intrs++; if (end_cnt > ks_shadow->start_cnt) ks_shadow->accumulated_cnt += end_cnt - ks_shadow->start_cnt; else //counter rolled over ks_shadow->accumulated_cnt += (UINT_MAX - ks_shadow->start_cnt) + end_cnt; if (cycles_per_tick == 0) { cycles_per_tick = mips_hpt_frequency/HZ; } // See if we have accumulated a whole tick if (ks_shadow->accumulated_cnt >= cycles_per_tick) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t user_delta = cpustat->user - ks_shadow->last_cpustat.user; cputime64_t system_delta = cpustat->system - ks_shadow->last_cpustat.system; cputime64_t softirq_delta = cpustat->softirq - ks_shadow->last_cpustat.softirq; cputime64_t idle_delta = cpustat->idle - ks_shadow->last_cpustat.idle; // printk("TICK on %d in %d intrs!\n", smp_processor_id(), ks_shadow->intrs); cpustat->irq++; // subtract 1 tick from the field that has incremented the most if (user_delta > system_delta && user_delta > softirq_delta && user_delta > idle_delta) cpustat->user--; else if (system_delta > user_delta && system_delta > softirq_delta && system_delta > idle_delta) cpustat->system--; else if (softirq_delta > user_delta && softirq_delta > system_delta && softirq_delta > idle_delta) cpustat->softirq--; else cpustat->idle--; ks_shadow->accumulated_cnt -= cycles_per_tick; ks_shadow->intrs = 0; ks_shadow->last_cpustat = *cpustat; } }
void __init mips_timer_setup(struct irqaction *irq) #endif { #ifdef CONFIG_RALINK_EXTERNAL_TIMER u32 reg; #endif /* we are using the cpu counter for timer interrupts */ //irq->handler = no_action; /* we use our own handler */ setup_irq(RALINK_CPU_TIMER_IRQ, irq); /* to generate the first timer interrupt */ #ifndef CONFIG_RALINK_EXTERNAL_TIMER r4k_cur = (read_c0_count() + r4k_offset); write_c0_compare(r4k_cur); #else r4k_cur = ((*((volatile u32 *)(RALINK_COUNT))) + r4k_offset); (*((volatile u32 *)(RALINK_COMPARE))) = r4k_cur; (*((volatile u32 *)(RALINK_MCNT_CFG))) = 3; #endif set_c0_status(ALLINTS); }
void dump_cp0(char *key) { if (key == NULL) key = ""; print_cp0(key, 0, "INDEX ", read_c0_index()); print_cp0(key, 2, "ENTRYLO1", read_c0_entrylo0()); print_cp0(key, 3, "ENTRYLO2", read_c0_entrylo1()); print_cp0(key, 4, "CONTEXT ", read_c0_context()); print_cp0(key, 5, "PAGEMASK", read_c0_pagemask()); print_cp0(key, 6, "WIRED ", read_c0_wired()); //print_cp0(key, 8, "BADVADDR", read_c0_badvaddr()); print_cp0(key, 9, "COUNT ", read_c0_count()); print_cp0(key, 10, "ENTRYHI ", read_c0_entryhi()); print_cp0(key, 11, "COMPARE ", read_c0_compare()); print_cp0(key, 12, "STATUS ", read_c0_status()); print_cp0(key, 13, "CAUSE ", read_c0_cause() & 0xffff87ff); print_cp0(key, 16, "CONFIG ", read_c0_config()); return; }
/* * Figure out the r4k offset, the amount to increment the compare * register for each time tick. * Use the Programmable Counter 1 to do this. */ unsigned long cal_r4koff(void) { unsigned long count; unsigned long cpu_speed; unsigned long start, end; unsigned long counter; int trim_divide = 16; unsigned long flags; spin_lock_irqsave(&time_lock, flags); counter = au_readl(SYS_COUNTER_CNTRL); au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); au_writel(trim_divide-1, SYS_RTCTRIM); /* RTC now ticks at 32.768/16 kHz */ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); au_writel (0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); start = au_readl(SYS_RTCREAD); start += 2; /* wait for the beginning of a new tick */ while (au_readl(SYS_RTCREAD) < start); /* Start r4k counter. */ write_c0_count(0); end = start + (32768 / trim_divide)/2; /* wait 0.5 seconds */ while (end > au_readl(SYS_RTCREAD)); count = read_c0_count(); cpu_speed = count * 2; mips_counter_frequency = count; set_au1x00_uart_baud_base(((cpu_speed) / 4) / 16); spin_unlock_irqrestore(&time_lock, flags); return (cpu_speed / HZ); }
/******************************************************************* * vflash_write_buf: * create a request message and send it via do_rpc_io * do_rpc_io waits for response or timeout ******************************************************************/ static int vflash_write_buf(int partition, int offset, u_char *buffer, int numbytes) { ItcRpcMsg req; u_char *vmallocated_buf = NULL; int ret, is_buf_vmallocated; /* VMallocated (mmu translated) memory can't be used by the eCos CPU */ is_buf_vmallocated = KSEGX(buffer) == KSEG2; if (is_buf_vmallocated) { vmallocated_buf = buffer; buffer = kmalloc(numbytes, GFP_KERNEL); if (!buffer) return -EINVAL; memcpy(buffer, vmallocated_buf, numbytes); } // Construct a request message memset((void *)&req, 0, sizeof(req)); req.dev_func = DEV_FUNC(REMOTE_FLASH_DEVICE_ID, REMOTE_WRITE, partition, numbytes); req.xid = read_c0_count(); req.u0 = (uint32)buffer; req.u1 = offset; bcm_cache_wback_inv((uint32)buffer, (uint32)numbytes); #if DEBUG_DQM_IO printk("%s partition %d offset %08x buffer %p size %d\n", __func__, partition, offset, buffer, numbytes); #endif ret = do_rpc_io(&req); if (is_buf_vmallocated) kfree(buffer); return ret; }
void __cpuinit synchronise_count_slave(void) { int i; unsigned long flags; unsigned int initcount; int ncpus; #ifdef CONFIG_MIPS_MT_SMTC return; #endif local_irq_save(flags); while (!atomic_read(&count_start_flag)) mb(); initcount = atomic_read(&count_reference); ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); while (atomic_read(&count_count_start) != ncpus) mb(); if (i == NR_LOOPS-1) write_c0_count(initcount); atomic_inc(&count_count_stop); while (atomic_read(&count_count_stop) != ncpus) mb(); } write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); }
/* * There are a lot of conceptually broken versions of the MIPS timer interrupt * handler floating around. This one is rather different, but the algorithm * is probably more robust. */ void mips_timer_interrupt(struct pt_regs *regs) { int irq = 7; /* FIX ME */ if (r4k_offset == 0) { goto null; } do { kstat_this_cpu.irqs[irq]++; do_timer(regs); #ifndef CONFIG_SMP update_process_times(user_mode(regs)); #endif r4k_cur += r4k_offset; ack_r4ktimer(r4k_cur); } while (((unsigned long)read_c0_count() - r4k_cur) < 0x7fffffff); return; null: ack_r4ktimer(0); }
/* * Figure out the r4k offset, the amount to increment the compare * register for each time tick. * Use the RTC to calculate offset. */ static unsigned long __init cal_r4koff(void) { unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); mips_hpt_frequency = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); return (mips_hpt_frequency / HZ); }
uint64_t timestamp_get(void) { return read_c0_count()/get_count_mhz_freq(); }
irqreturn_t mips_timer_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); #ifdef CONFIG_MIPS_MT_SMTC /* * In an SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the a particular platform, so it's invoked from * the general MIPS timer_interrupt routine. */ int vpflags; /* * We could be here due to timer interrupt, * perf counter overflow, or both. */ if (read_c0_cause() & (1 << 26)) perf_irq(); if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); /* * DVPE is necessary so long as cross-VPE interrupts * are done via read-modify-write of Cause register. */ vpflags = dvpe(); clear_c0_cause(CPUCTR_IMASKBIT); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL); smtc_timer_broadcast(cpu_data[cpu].vpe_id); scroll_display_message(); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } #else /* CONFIG_MIPS_MT_SMTC */ int r2 = cpu_has_mips_r2; if (cpu == 0) { /* * CPU 0 handles the global timer interrupt job and process * accounting resets count/compare registers to trigger next * timer int. */ if (!r2 || (read_c0_cause() & (1 << 26))) if (perf_irq()) goto out; /* we keep interrupt disabled all the time */ if (!r2 || (read_c0_cause() & (1 << 30))) timer_interrupt(irq, NULL); scroll_display_message(); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); /* * Other CPUs should do profiling and process accounting */ local_timer_interrupt(irq, dev_id); } out: #endif /* CONFIG_MIPS_MT_SMTC */ return IRQ_HANDLED; }
static void reload_timer() { uint32_t counter = read_c0_count(); counter += TIMER0_INTERVAL; write_c0_compare(counter); }
/* * Estimate CPU frequency. Sets mips_counter_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) { unsigned int prid = read_c0_prid() & 0xffff00; unsigned int count; #ifdef CONFIG_MIPS_SEAD /* * The SEAD board doesn't have a real time clock, so we can't * really calculate the timer frequency * For now we hardwire the SEAD board frequency to 12MHz. */ if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) || (prid == (PRID_COMP_MIPS | PRID_IMP_25KF))) count = 12000000; else count = 6000000; #endif #if defined(CONFIG_MIPS_ATLAS) || defined(CONFIG_MIPS_MALTA) unsigned int flags; local_irq_save(flags); /* Start counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); /* Start r4k counter. */ write_c0_count(0); /* Read counter exactly on falling edge of update flag */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); /* restore interrupts */ local_irq_restore(flags); #endif #if defined(CONFIG_MIPS_AVALANCHE_SOC) { char *cpu_freq_ptr; cpu_freq_ptr = prom_getenv("cpufrequency"); if(!cpu_freq_ptr) { cpu_freq = CONFIG_CPU_FREQUENCY_AVALANCHE * 1000000 ; } else { cpu_freq = simple_strtol(cpu_freq_ptr,NULL,0); } #ifdef CONFIG_HIGH_RES_TIMERS count = cpu_freq; #else count = cpu_freq/2; #endif } #endif mips_hpt_frequency = count; if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) count *= 2; count += 5000; /* round */ count -= count%10000; return count; }
static cycle_t c0_hpt_read(struct clocksource *cs) { return read_c0_count(); }
void __init plat_timer_setup(struct irqaction *irq) { unsigned int est_freq; printk("calculating r4koff... "); r4k_offset = cal_r4koff(); printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset); //est_freq = 2*r4k_offset*HZ; est_freq = r4k_offset*HZ; est_freq += 5000; /* round */ est_freq -= est_freq%10000; printk("CPU frequency %d.%02d MHz\n", est_freq/1000000, (est_freq%1000000)*100/1000000); set_au1x00_speed(est_freq); set_au1x00_lcd_clock(); // program the LCD clock r4k_cur = (read_c0_count() + r4k_offset); write_c0_compare(r4k_cur); #ifdef CONFIG_PM /* * setup counter 0, since it keeps ticking after a * 'wait' instruction has been executed. The CP0 timer and * counter 1 do NOT continue running after 'wait' * * It's too early to call request_irq() here, so we handle * counter 0 interrupt as a special irq and it doesn't show * up under /proc/interrupts. * * Check to ensure we really have a 32KHz oscillator before * we do this. */ if (no_au1xxx_32khz) { unsigned int c0_status; printk("WARNING: no 32KHz clock found.\n"); /* Ensure we get CPO_COUNTER interrupts. */ c0_status = read_c0_status(); c0_status |= IE_IRQ5; write_c0_status(c0_status); } else { while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S); au_writel(0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S); au_writel(au_readl(SYS_WAKEMSK) | (1<<8), SYS_WAKEMSK); au_writel(~0, SYS_WAKESRC); au_sync(); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20); /* setup match20 to interrupt once every HZ */ last_pc0 = last_match20 = au_readl(SYS_TOYREAD); au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2); au_sync(); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20); startup_match20_interrupt(counter0_irq); /* We can use the real 'wait' instruction. */ allow_au1k_wait = 1; } #endif }
irqreturn_t sim_timer_interrupt(int irq, void *dev_id) { #ifdef CONFIG_SMP int cpu = smp_processor_id(); /* * CPU 0 handles the global timer interrupt job * resets count/compare registers to trigger next timer int. */ #ifndef CONFIG_MIPS_MT_SMTC if (cpu == 0) { timer_interrupt(irq, dev_id); } else { /* Everyone else needs to reset the timer int here as ll_local_timer_interrupt doesn't */ /* * FIXME: need to cope with counter underflow. * More support needs to be added to kernel/time for * counter/timer interrupts on multiple CPU's */ write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); } #else /* SMTC */ /* * In SMTC system, one Count/Compare set exists per VPE. * Which TC within a VPE gets the interrupt is essentially * random - we only know that it shouldn't be one with * IXMT set. Whichever TC gets the interrupt needs to * send special interprocessor interrupts to the other * TCs to make sure that they schedule, etc. * * That code is specific to the SMTC kernel, not to * the simulation platform, so it's invoked from * the general MIPS timer_interrupt routine. * * We have a problem in that the interrupt vector code * had to turn off the timer IM bit to avoid redundant * entries, but we may never get to mips_cpu_irq_end * to turn it back on again if the scheduler gets * involved. So we clear the pending timer here, * and re-enable the mask... */ int vpflags = dvpe(); write_c0_compare (read_c0_count() - 1); clear_c0_cause(0x100 << cp0_compare_irq); set_c0_status(0x100 << cp0_compare_irq); irq_enable_hazard(); evpe(vpflags); if (cpu_data[cpu].vpe_id == 0) timer_interrupt(irq, dev_id); else write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); smtc_timer_broadcast(cpu_data[cpu].vpe_id); #endif /* CONFIG_MIPS_MT_SMTC */ /* * every CPU should do profiling and process accounting */ local_timer_interrupt (irq, dev_id); return IRQ_HANDLED; #else return timer_interrupt (irq, dev_id); #endif }
/* * We read the real processor speed from the PLL. This is important * because it is more accurate than computing it from the 32KHz * counter, if it exists. If we don't have an accurate processor * speed, all of the peripherals that derive their clocks based on * this advertised speed will introduce error and sometimes not work * properly. This function is futher convoluted to still allow configurations * to do that in case they have really, really old silicon with a * write-only PLL register, that we need the 32KHz when power management * "wait" is enabled, and we need to detect if the 32KHz isn't present * but requested......got it? :-) -- Dan */ unsigned long cal_r4koff(void) { unsigned long cpu_speed; unsigned long flags; unsigned long counter; spin_lock_irqsave(&time_lock, flags); /* Power management cares if we don't have a 32KHz counter. */ no_au1xxx_32khz = 0; counter = au_readl(SYS_COUNTER_CNTRL); if (counter & SYS_CNTRL_E0) { int trim_divide = 16; au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); /* RTC now ticks at 32.768/16 kHz */ au_writel(trim_divide-1, SYS_RTCTRIM); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); au_writel (0, SYS_TOYWRITE); while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S); #if defined(CONFIG_AU1000_USE32K) { unsigned long start, end, count; start = au_readl(SYS_RTCREAD); start += 2; /* wait for the beginning of a new tick */ while (au_readl(SYS_RTCREAD) < start); /* Start r4k counter. */ write_c0_count(0); /* Wait 0.5 seconds. */ end = start + (32768 / trim_divide)/2; while (end > au_readl(SYS_RTCREAD)); count = read_c0_count(); cpu_speed = count * 2; } #else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; #endif } else { /* The 32KHz oscillator isn't running, so assume there * isn't one and grab the processor speed from the PLL. * NOTE: some old silicon doesn't allow reading the PLL. */ cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; no_au1xxx_32khz = 1; } mips_hpt_frequency = cpu_speed; // Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)&0x03) + 2) * 16)); spin_unlock_irqrestore(&time_lock, flags); return (cpu_speed / HZ); }
static void paravirt_smp_finish(void) { /* to generate the first CPU timer interrupt */ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); local_irq_enable(); }
static void start_hardirq_count(void) { struct kernel_stat_shadow *ks_shadow = &per_cpu(kstat_shadow, smp_processor_id()); ks_shadow->start_cnt = read_c0_count(); }
static u64 __maybe_unused notrace r4k_read_sched_clock(void) { return read_c0_count(); }
void synchronise_count_master(int cpu) { int i; unsigned long flags; printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); /* * We loop a few times to get a primed instruction cache, * then the last pass is more or less synchronised and * the master and slaves each set their cycle counters to a known * value all at once. This reduces the chance of having random offsets * between the processors, and guarantees that the maximum * delay between the cycle counters is never bigger than * the latency of information-passing (cachelines) between * two CPUs. */ for (i = 0; i < NR_LOOPS; i++) { /* slaves loop on '!= 2' */ while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); /* Let the slave writes its count register */ atomic_inc(&count_count_start); /* Count will be initialised to current timer */ if (i == 1) initcount = read_c0_count(); /* * Everyone initialises count in the last loop: */ if (i == NR_LOOPS-1) write_c0_count(initcount); /* * Wait for slave to leave the synchronization point: */ while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); atomic_inc(&count_count_stop); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); local_irq_restore(flags); /* * i386 code reported the skew here, but the * count registers were almost certainly out of sync * so no point in alarming people */ printk("done.\n"); }