static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; if (cd.suspended) return cd.epoch_ns; /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and * epoch_cyc_copy in strict order, and read them in strict order. * If epoch_cyc and epoch_cyc_copy are not equal, then we're in * the middle of an update, and we should repeat the load. */ do { epoch_cyc = cd.epoch_cyc; smp_rmb(); epoch_ns = cd.epoch_ns; smp_rmb(); } while (epoch_cyc != cd.epoch_cyc_copy); cyc = read_sched_clock(); cyc = (cyc - epoch_cyc) & sched_clock_mask; return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); }
static void sched_clock_resume(void) { if (cd.needs_suspend) { cd.epoch_cyc = read_sched_clock(); cd.epoch_cyc_copy = cd.epoch_cyc; cd.suspended = false; } }
unsigned long long notrace sched_clock(void) { u32 cyc = read_sched_clock(); #ifdef CONFIG_SEC_DEBUG u64 local = cyc_to_sched_clock(cyc, sched_clock_mask); sec_debug_save_last_ns(local); return local; #endif return cyc_to_sched_clock(cyc, sched_clock_mask); }
unsigned long long notrace sched_clock(void) { u32 cyc = read_sched_clock(); #if defined(CONFIG_QC_ABNORMAL_DEBUG_CODE) u64 local = cyc_to_sched_clock(cyc, sched_clock_mask); atomic64_set(&last_ns, local); return local; #else return cyc_to_sched_clock(cyc, sched_clock_mask); #endif }
static void notrace update_sched_clock(void) { unsigned long flags; u32 cyc; u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); raw_local_irq_save(flags); cd.epoch_cyc = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); cd.epoch_cyc_copy = cyc; raw_local_irq_restore(flags); }
/* * Atomically update the sched_clock epoch. */ static void notrace update_sched_clock(void) { unsigned long flags; u32 cyc; u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); /* * Write epoch_cyc and epoch_ns in a way that the update is * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); cd.epoch_cyc = cyc; raw_local_irq_restore(flags); }
static unsigned long long notrace sched_clock_32(void) { u32 cyc = read_sched_clock(); return cyc_to_sched_clock(cyc, sched_clock_mask); }