void __init time_init(void) { time_t sec_o, sec_n = 0; /* The platform must provide a function to calibrate the processor * speed for the CALIBRATE. */ #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), (int)(ccount_per_jiffy/(10000/HZ))%100); #endif /* Set time from RTC (if provided) */ if (platform_get_rtc_time(&sec_o) == 0) while (platform_get_rtc_time(&sec_n)) if (sec_o != sec_n) break; xtime.tv_nsec = 0; last_rtc_update = xtime.tv_sec = sec_n; last_ccount_stamp = get_ccount(); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); /* Initialize the linux timer interrupt. */ setup_irq(LINUX_TIMER_INT, &timer_irqaction); set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); }
irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long next; next = get_linux_timer(); again: while ((signed long)(get_ccount() - next) > 0) { profile_tick(CPU_PROFILING); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif xtime_update(1); /* Linux handler in kernel/time/timekeeping */ /* Note that writing CCOMPARE clears the interrupt. */ next += CCOUNT_PER_JIFFY; set_linux_timer(next); } /* Allow platform to do something useful (Wdog). */ platform_heartbeat(); /* Make sure we didn't miss any tick... */ if ((signed long)(get_ccount() - next) > 0) goto again; return IRQ_HANDLED; }
irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long next; next = get_linux_timer(); again: while ((signed long)(get_ccount() - next) > 0) { profile_tick(CPU_PROFILING); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif xtime_update(1); /* */ /* */ next += CCOUNT_PER_JIFFY; set_linux_timer(next); } /* */ platform_heartbeat(); /* */ if ((signed long)(get_ccount() - next) > 0) goto again; return IRQ_HANDLED; }
irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long next; next = get_linux_timer(); again: while ((signed long)(get_ccount() - next) > 0) { profile_tick(CPU_PROFILING); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif write_seqlock(&xtime_lock); last_ccount_stamp = next; next += CCOUNT_PER_JIFFY; do_timer (1); /* Linux handler in kernel/timer.c */ if (ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ) { if (platform_set_rtc_time(xtime.tv_sec+1) == 0) last_rtc_update = xtime.tv_sec+1; else /* Do it again in 60 s */ last_rtc_update += 60; } write_sequnlock(&xtime_lock); } /* NOTE: writing CCOMPAREn clears the interrupt. */ set_linux_timer (next); /* Make sure we didn't miss any tick... */ if ((signed long)(get_ccount() - next) > 0) goto again; /* Allow platform to do something useful (Wdog). */ platform_heartbeat(); return IRQ_HANDLED; }
static int ccount_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags, next; int ret = 0; local_irq_save(flags); next = get_ccount() + delta; set_linux_timer(next); if (next - get_ccount() > delta) ret = -ETIME; local_irq_restore(flags); return ret; }
int do_settimeofday(struct timespec *tv) { time_t wtm_sec, sec = tv->tv_sec; long wtm_nsec, nsec = tv->tv_nsec; unsigned long ccount; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; write_seqlock_irq(&xtime_lock); /* This is revolting. We need to set "xtime" correctly. However, the * value in this location is the value at the most recent update of * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ ccount = get_ccount(); nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); set_normalized_timespec(&xtime, sec, nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); ntp_clear(); write_sequnlock_irq(&xtime_lock); return 0; }
static int boot_secondary(unsigned int cpu, struct task_struct *ts) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned long ccount; int i; #ifdef CONFIG_HOTPLUG_CPU cpu_start_id = cpu; system_flush_invalidate_dcache_range( (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); for (i = 0; i < 2; ++i) { do ccount = get_ccount(); while (!ccount); cpu_start_ccount = ccount; while (time_before(jiffies, timeout)) { mb(); if (!cpu_start_ccount) break; } if (cpu_start_ccount) { smp_call_function_single(0, mx_cpu_stop, (void *)cpu, 1); cpu_start_ccount = 0; return -EIO; } } return 0; }
void __init time_init(void) { #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), (int)(ccount_per_jiffy/(10000/HZ))%100); #endif clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); /* Initialize the linux timer interrupt. */ setup_irq(LINUX_TIMER_INT, &timer_irqaction); set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); }
void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long sec, usec, delta, seq; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); delta = get_ccount() - last_ccount_stamp; sec = xtime.tv_sec; usec = (xtime.tv_nsec / NSEC_PER_USEC); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); usec += (delta * CCOUNT_NSEC) / NSEC_PER_USEC; for (; usec >= 1000000; sec++, usec -= 1000000) ; tv->tv_sec = sec; tv->tv_usec = usec; }
static int boot_secondary(unsigned int cpu, struct task_struct *ts) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned long ccount; int i; #ifdef CONFIG_HOTPLUG_CPU WRITE_ONCE(cpu_start_id, cpu); /* Pairs with the third memw in the cpu_restart */ mb(); system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); for (i = 0; i < 2; ++i) { do ccount = get_ccount(); while (!ccount); WRITE_ONCE(cpu_start_ccount, ccount); do { /* * Pairs with the first two memws in the * .Lboot_secondary. */ mb(); ccount = READ_ONCE(cpu_start_ccount); } while (ccount && time_before(jiffies, timeout)); if (ccount) { smp_call_function_single(0, mx_cpu_stop, (void *)cpu, 1); WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } return 0; }
void __init time_init(void) { /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */ read_persistent_clock(&xtime); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), (int)(ccount_per_jiffy/(10000/HZ))%100); #endif ccount_clocksource.mult = clocksource_hz2mult(CCOUNT_PER_JIFFY * HZ, ccount_clocksource.shift); clocksource_register(&ccount_clocksource); /* Initialize the linux timer interrupt. */ setup_irq(LINUX_TIMER_INT, &timer_irqaction); set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY); }
void do_gettimeofday(struct timeval *tv) { unsigned long flags; unsigned long volatile sec, usec, delta, seq; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; usec = (xtime.tv_nsec / NSEC_PER_USEC); delta = get_linux_timer() - get_ccount(); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); usec += (((unsigned long) CCOUNT_PER_JIFFY - delta) * (unsigned long) NSEC_PER_CCOUNT) / NSEC_PER_USEC; for (; usec >= 1000000; sec++, usec -= 1000000) ; tv->tv_sec = sec; tv->tv_usec = usec; }
static cycle_t ccount_read(void) { return (cycle_t)get_ccount(); }
static u64 notrace ccount_sched_clock_read(void) { return get_ccount(); }
static cycle_t ccount_read(struct clocksource *cs) { return (cycle_t)get_ccount(); }