/* * sched_clock() */ unsigned long long sched_clock(void) { cycle_t cyc = ixp4xx_get_cycles(NULL); struct clocksource *cs = &clocksource_ixp4xx; return clocksource_cyc2ns(cyc, cs->mult, cs->shift); }
/* * sched_clock() */ unsigned long long sched_clock(void) { cycle_t cyc = cycle_read_timerE(NULL); struct clocksource *cs = &clocksource_timer_e; return clocksource_cyc2ns(cyc, cs->mult, cs->shift); }
unsigned long long notrace sched_clock(void) { return clocksource_cyc2ns(clocksource_u300_1mhz.read( &clocksource_u300_1mhz), clocksource_u300_1mhz.mult, clocksource_u300_1mhz.shift); }
/** * read_persistent_clock - Return time from a persistent clock. * * Reads the time from a source which isn't disabled during PM, the * 32k sync timer. Convert the cycles elapsed since last read into * nsecs and adds to a monotonically increasing timespec. */ void read_persistent_clock(struct timespec *ts) { struct omap_32k_sync_device *omap = thecs; unsigned long long nsecs; cycles_t delta; struct timespec *tsp; if (!omap) { ts->tv_sec = 0; ts->tv_nsec = 0; return; } tsp = &omap->persistent_ts; omap->last_cycles = omap->cycles; omap->cycles = omap->cs.read(&omap->cs); delta = omap->cycles - omap->last_cycles; nsecs = clocksource_cyc2ns(delta, omap->cs.mult, omap->cs.shift); timespec_add_ns(tsp, nsecs); *ts = *tsp; }
/* * Override the global weak sched_clock symbol with this * local implementation which uses the clocksource to get some * better resolution when scheduling the kernel. We accept that * this wraps around for now, since it is just a relative time * stamp. (Inspired by OMAP implementation.) */ unsigned long long notrace sched_clock(void) { return clocksource_cyc2ns(nmdk_clksrc.read( &nmdk_clksrc), nmdk_clksrc.mult, nmdk_clksrc.shift); }
static unsigned long __init boottime_get_time(void) { return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read( &clocksource_dbx500_prcmu), clocksource_dbx500_prcmu.mult, clocksource_dbx500_prcmu.shift), 1000); }
/** * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { if (config.cs_module == TIMER_MODULE_NONE) return (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); else return clocksource_cyc2ns(bcm_read_timer(NULL), bcm_clksrc.mult, bcm_clksrc.shift); }
/* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { struct omap_32k_sync_device *omap = thecs; if (!omap) return 0; return clocksource_cyc2ns(omap->cs.read(&omap->cs), omap->cs.mult, omap->cs.shift); }
unsigned long long notrace sched_clock(void) { if (timer_initialized) { struct clocksource *cs = &clocksource_microblaze; cycle_t cyc = cnt32_to_63(cs->read(NULL)) & LLONG_MAX; return clocksource_cyc2ns(cyc, cs->mult, cs->shift); } return 0; }
/* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { static int first = 1; static cycle_t saved_ticks; static int saved_ticks_valid; static unsigned long long base; static unsigned long long last_result; unsigned long irq_flags; static cycle_t last_ticks; cycle_t ticks; static unsigned long long result; local_irq_save(irq_flags); last_ticks = saved_ticks; saved_ticks = ticks = sec_sched_timer_read(); if(!saved_ticks_valid) { saved_ticks_valid = 1; last_ticks = ticks; base -= clocksource_cyc2ns(ticks, clocksource_sec.mult, clocksource_sec.shift); } if(ticks < last_ticks) { if (first) first = 0; else { base += clocksource_cyc2ns(clocksource_sec.mask, clocksource_sec.mult, clocksource_sec.shift); base += clocksource_cyc2ns(1, clocksource_sec.mult, clocksource_sec.shift); } } last_result = result = clocksource_cyc2ns(ticks, clocksource_sec.mult, clocksource_sec.shift) + base; local_irq_restore(irq_flags); return result; }
/* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { unsigned long irq_flags; cycle_t ticks, elapsed_ticks = 0; unsigned long long increment = 0; unsigned int overflow_cnt = 0; local_irq_save(irq_flags); if (likely(sched_timer_running)) { overflow_cnt = (s5p_sched_timer_overflows - old_overflows); ticks = s5p_sched_timer_read(&clocksource_s5p); if (overflow_cnt) { increment = (overflow_cnt - 1) * (clocksource_cyc2ns(clocksource_s5p.read(&clocksource_s5p), clocksource_s5p.mult, clocksource_s5p.shift)); elapsed_ticks = (clocksource_s5p.mask - last_ticks) + ticks; } else { if (unlikely(last_ticks > ticks)) { pending_irq = 1; elapsed_ticks = (clocksource_s5p.mask - last_ticks) + ticks; s5p_sched_timer_overflows++; } else { elapsed_ticks = (ticks - last_ticks); } } time_stamp += (clocksource_cyc2ns(elapsed_ticks, clocksource_s5p.mult, clocksource_s5p.shift) + increment); old_overflows = s5p_sched_timer_overflows; last_ticks = ticks; } local_irq_restore(irq_flags); return time_stamp; }
/* * sched_clock() */ unsigned long long sched_clock(void) { static unsigned long last_timeE=0; static cycle_t cyc=0; struct clocksource *cs = &clocksource_timer_e; unsigned long cur; cur=cycle_read_timerE(NULL); cyc += cur - last_timeE; last_timeE=cur; return clocksource_cyc2ns(cyc, cs->mult, cs->shift); }
void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; cycles_t delta; struct timespec *tsp = &persistent_ts; last_cycles = cycles; cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0; delta = cycles - last_cycles; nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift); timespec_add_ns(tsp, nsecs); *ts = *tsp; }
static void omap_read_persistent_clock64(struct timespec64 *ts) { unsigned long long nsecs; cycles_t last_cycles; last_cycles = cycles; cycles = sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0; nsecs = clocksource_cyc2ns(cycles - last_cycles, persistent_mult, persistent_shift); timespec64_add_ns(&persistent_ts, nsecs); *ts = persistent_ts; }
void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; cycles_t delta; struct timespec *tsp = &persistent_ts; last_cycles = cycles; cycles = clocksource_32k.read(&clocksource_32k); delta = cycles - last_cycles; nsecs = clocksource_cyc2ns(delta, clocksource_32k.mult, clocksource_32k.shift); timespec_add_ns(tsp, nsecs); *ts = *tsp; }
void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; cycles_t last_cycles; unsigned long flags; spin_lock_irqsave(&read_persistent_clock_lock, flags); last_cycles = cycles; cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0; nsecs = clocksource_cyc2ns(cycles - last_cycles, persistent_mult, persistent_shift); timespec_add_ns(&persistent_ts, nsecs); *ts = persistent_ts; spin_unlock_irqrestore(&read_persistent_clock_lock, flags); }
void read_persistent_clock(struct timespec *ts) { unsigned long long nsecs; cycles_t delta; struct timespec *tsp = &persistent_ts; last_cycles = cycles; cycles = clocksource_32k.read(&clocksource_32k); delta = cycles - last_cycles; if (unlikely(cycles < last_cycles)) { pr_warning("%s: WRAP\n", __func__); delta = last_cycles - cycles; } nsecs = clocksource_cyc2ns(delta, clocksource_32k.mult, clocksource_32k.shift); timespec_add_ns(tsp, nsecs); *ts = *tsp; }
/* * sched_clock() * Returns current time in nano-second units. * * Notes: * 1) This is an override for the weak alias in * kernel/sched_clock.c. * 2) Do not use xtime_lock as this function is * sometimes called with xtime_lock held. * 3) This approach allows us to perform sched_clock() calls with interrupts * disabled, since our cycle counter moves forward no matter what. */ unsigned long long sched_clock(void) { return clocksource_cyc2ns(timer_device_clockbase_read(&timer_device_clockbase), timer_device_clockbase.mult, timer_device_clockbase.shift); }
/* * Rounds down to nearest nsec. */ unsigned long long sec_ticks_to_nsecs(unsigned long ticks) { return clocksource_cyc2ns(ticks, clocksource_sec.mult, clocksource_sec.shift); }
unsigned long long sched_clock(void) { return clocksource_cyc2ns(tegra_clocksource.read(&tegra_clocksource), tegra_clocksource.mult, tegra_clocksource.shift); }
/* * Scheduler clock - returns current time in nanosec units. * Note that with LOCKDEP, this is called during lockdep_init(), and * we will claim that sched_clock() is zero for a little while, until * we run setup_clock(), above. */ unsigned long long sched_clock(void) { return clocksource_cyc2ns(get_cycles(), sched_clock_mult, SCHED_CLOCK_SHIFT); }
/* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { return clocksource_cyc2ns(clocksource_32k.read(&clocksource_32k), clocksource_32k.mult, clocksource_32k.shift); }
static inline unsigned long long bfin_cs_cycles_sched_clock(void) { return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles), bfin_cs_cycles.mult, bfin_cs_cycles.shift); }
/* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. */ unsigned long long sched_clock(void) { return clocksource_cyc2ns(clocksource_timer.read(&clocksource_timer), clocksource_timer.mult, clocksource_timer.shift); }
static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) { return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(), bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift); }