/** * Detects the local APIC reference bus clock. The only sure-fire way to do * this is to depend on some other absolute timing source. This function uses * the CPU's cycle counter and the previously detected CPU clock frequency. * * NOTE: This assumes that the CPU's clock frequency has already been detected. * (i.e., cpu_info[cpu_id()].arch.tsc_khz has been initialized. */ unsigned int __init lapic_calibrate_timer(void) { const unsigned int tick_count = 100000000; cycles_t tsc_start, tsc_now; uint32_t apic_start, apic_now; unsigned int apic_Hz; /* Start the APIC counter running for calibration */ lapic_set_timer_count(4000000000); apic_start = apic_read(APIC_TMCCT); tsc_start = get_cycles_sync(); /* Spin until enough ticks for a meaningful result have elapsed */ do { apic_now = apic_read(APIC_TMCCT); tsc_now = get_cycles_sync(); } while ( ((tsc_now - tsc_start) < tick_count) && ((apic_start - apic_now) < tick_count) ); apic_Hz = (apic_start - apic_now) * 1000L * cpu_info[this_cpu].arch.tsc_khz / (tsc_now - tsc_start); lapic_stop_timer(); return (apic_Hz / 1000); }
/* * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none * occurs between the reads of the hpet & TSC. */ static void __init read_hpet_tsc(int *hpet, int *tsc) { int tsc1, tsc2, hpet1, i; for (i = 0; i < MAX_TRIES; i++) { tsc1 = get_cycles_sync(); hpet1 = hpet_readl(HPET_COUNTER); tsc2 = get_cycles_sync(); if (tsc2 - tsc1 > TICK_MIN) break; } *hpet = hpet1; *tsc = tsc2; }
/* * TSC-warp measurement loop running on both CPUs: */ static __cpuinit void check_tsc_warp(void) { cycles_t start, now, prev, end; int i; start = get_cycles_sync(); /* * The measurement runs for 20 msecs: */ end = start + cpu_khz * 20ULL; now = start; for (i = 0; ; i++) { /* * We take the global lock, measure TSC, save the * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ __raw_spin_lock(&sync_lock); prev = last_tsc; now = get_cycles_sync(); last_tsc = now; __raw_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether * measurement is done [we also insert a 100 million * loops safety exit, so we dont lock up in case the * TSC readout is totally broken]): */ if (unlikely(!(i & 7))) { if (now > end || i > 100000000) break; cpu_relax(); touch_nmi_watchdog(); } /* * Outside the critical section we can now see whether * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { __raw_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; __raw_spin_unlock(&sync_lock); } } }
/* * Mark it noinline so we make sure it is not unrolled. * Wait until value is reached. */ static noinline void tsc_barrier(long wait_cpu, int value) { sync_core(); per_cpu(wait_sync, smp_processor_id())--; do { barrier(); } while (unlikely(per_cpu(wait_sync, wait_cpu) > value)); __get_cpu_var(tsc_count) = get_cycles_sync(); }
static __always_inline void do_vgettimeofday(struct timeval * tv) { long sequence, t; unsigned long sec, usec; do { sequence = read_seqbegin(&__xtime_lock); sec = __xtime.tv_sec; usec = (__xtime.tv_nsec / 1000) + (__jiffies - __wall_jiffies) * (1000000 / HZ); if (__vxtime.mode != VXTIME_HPET) { t = get_cycles_sync(); if (t < __vxtime.last_tsc) t = __vxtime.last_tsc; usec += ((t - __vxtime.last_tsc) * __vxtime.tsc_quot) >> 32; /* See comment in x86_64 do_gettimeofday. */ } else { usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) - __vxtime.last) * __vxtime.quot) >> 32; } } while (read_seqretry(&__xtime_lock, sequence));