unsigned long long notrace sched_clock(void) { if (ctr) { u32 cyc = readl(ctr); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); } else return 0; }
unsigned long long notrace sched_clock(void) { cycle_t cyc; if (!timer.cs_base) return 0; cyc = ~rk_timer_read_current_value(timer.cs_base); return cyc_to_fixed_sched_clock(&cd, cyc, MASK, MULT, SHIFT); }
unsigned long long notrace sched_clock(void) { u32 cyc = ~RK_TIMER_READVALUE(TIMER_CLKSRC); return cyc_to_fixed_sched_clock(&cd, cyc, MASK, MULT, SHIFT); }
unsigned long long notrace sched_clock(void) { u32 cyc = OSCR; return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); }
static inline unsigned long long notrace _omap_32k_sched_clock(void) { u32 cyc = clocksource_32k.read(&clocksource_32k); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); }
unsigned long long notrace sched_clock(void) { u32 cyc = timer_readl(TIMERUS_CNTR_1US); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); }
unsigned long long notrace sched_clock(void) { u32 cyc = __gptimer_read(NULL); return cyc_to_fixed_sched_clock(&cd, cyc, (u32) ~ 0, SC_MULT, SC_SHIFT); }
/* * sched_clock() */ unsigned long long notrace sched_clock(void) { struct clocksource *cs = &clocksource_timer_e; u32 cyc = cycle_read_timerE(NULL); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, cs->mult, cs->shift); }
unsigned long long notrace sched_clock(void) { u32 cyc = tegra_read_usec(); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); }
unsigned long long notrace sched_clock(void) { u32 cyc = ~rk30_timer_read_current_value(TIMER_CLKSRC); return cyc_to_fixed_sched_clock(&cd, cyc, MASK, MULT, SHIFT); }
unsigned long long sched_clock(void) { u32 cyc = clocksource_stc.read(&clocksource_stc); return cyc_to_fixed_sched_clock(&cd, cyc, clocksource_stc.mask, SC_MULT, SC_SHIFT); }