static unsigned long long vmware_sched_clock(void) { unsigned long long ns; ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul, vmware_cyc2ns.cyc2ns_shift); ns -= vmware_cyc2ns.cyc2ns_offset; return ns; }
static void __init vmware_sched_clock_setup(void) { struct cyc2ns_data *d = &vmware_cyc2ns; unsigned long long tsc_now = rdtsc(); clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift, vmware_tsc_khz, NSEC_PER_MSEC, 0); d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul, d->cyc2ns_shift); pv_time_ops.sched_clock = vmware_sched_clock; pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset); }
static inline unsigned long long cycles_2_ns(unsigned long long cyc) { struct cyc2ns_data data; unsigned long long ns; cyc2ns_read_begin(&data); ns = data.cyc2ns_offset; ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); cyc2ns_read_end(); return ns; }
clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz, NSEC_PER_MSEC, 0); /* * cyc2ns_shift is exported via arch_perf_update_userpage() where it is * not expected to be greater than 31 due to the original published * conversion algorithm shifting a 32-bit value (now specifies a 64-bit * value) - refer perf_event_mmap_page documentation in perf_event.h. */ if (data.cyc2ns_shift == 32) { data.cyc2ns_shift = 31; data.cyc2ns_mul >>= 1; } data.cyc2ns_offset = ns_now - mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift); c2n = per_cpu_ptr(&cyc2ns, cpu); raw_write_seqcount_latch(&c2n->seq); c2n->data[0] = data; raw_write_seqcount_latch(&c2n->seq); c2n->data[1] = data; } static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) { unsigned long flags; local_irq_save(flags); sched_clock_idle_sleep_event();