/* * TSC-warp measurement loop running on both CPUs. This is not called * if there is no TSC. */ static cycles_t check_tsc_warp(unsigned int timeout) { cycles_t start, now, prev, end, cur_max_warp = 0; int i, cur_warps = 0; start = rdtsc_ordered(); /* * The measurement runs for 'timeout' msecs: */ end = start + (cycles_t) tsc_khz * timeout; now = start; for (i = 0; ; i++) { /* * We take the global lock, measure TSC, save the * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ arch_spin_lock(&sync_lock); prev = last_tsc; now = rdtsc_ordered(); last_tsc = now; arch_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether * measurement is done [we also insert a 10 million * loops safety exit, so we dont lock up in case the * TSC readout is totally broken]): */ if (unlikely(!(i & 7))) { if (now > end || i > 10000000) break; cpu_relax(); touch_nmi_watchdog(); } /* * Outside the critical section we can now see whether * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { arch_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); cur_max_warp = max_warp; /* * Check whether this bounces back and forth. Only * one CPU should observe time going backwards. */ if (cur_warps != nr_warps) random_warps++; nr_warps++; cur_warps = nr_warps; arch_spin_unlock(&sync_lock); } } WARN(!(now-start), "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", now-start, end-start); return cur_max_warp; }
static inline u64 msr_read_counter(struct perf_event *event) { u64 now; if (event->hw.event_base) rdmsrl(event->hw.event_base, now); else now = rdtsc_ordered(); return now; }
/* * trace_clock_x86_tsc(): A clock that is just the cycle counter. * * Unlike the other clocks, this is not in nanoseconds. */ u64 notrace trace_clock_x86_tsc(void) { return rdtsc_ordered(); }