unsigned long long notrace sched_clock(void) { u32 cyc = read_sched_clock(); #if defined(CONFIG_QC_ABNORMAL_DEBUG_CODE) u64 local = cyc_to_sched_clock(cyc, sched_clock_mask); atomic64_set(&last_ns, local); return local; #else return cyc_to_sched_clock(cyc, sched_clock_mask); #endif }
unsigned long long notrace sched_clock(void) { u32 cyc = read_sched_clock(); #ifdef CONFIG_SEC_DEBUG u64 local = cyc_to_sched_clock(cyc, sched_clock_mask); sec_debug_save_last_ns(local); return local; #endif return cyc_to_sched_clock(cyc, sched_clock_mask); }
unsigned long long notrace sched_clock(void) { u32 cyc; unsigned long offset = 0; switch (timer_source.source_id) { case S5P_PWM0: case S5P_PWM1: case S5P_PWM2: case S5P_PWM3: offset = (timer_source.source_id * 0x0c) + 0x14; break; case S5P_PWM4: offset = 0x40; break; default: printk(KERN_ERR "Invalid Timer %d\n", timer_source.source_id); return 0; } cyc = ~__raw_readl(S3C_TIMERREG(offset)); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { if (unlikely(gpt_src == NULL)) return 0; return cyc_to_sched_clock(&cd, gptimer_clksrc_read(NULL), (u32)~0); }
unsigned long long notrace sched_clock(void) { void __iomem *reg = s5p_timer_reg(); if (!reg) return 0; return cyc_to_sched_clock(&cd, ~__raw_readl(reg), (u32)~0); }
unsigned long long notrace sched_clock(void) { u32 cyc; if (unlikely(!mtu_base)) return 0; cyc = -readl(mtu_base + MTU_VAL(0)); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { u32 cyc; unsigned long long t_clk; unsigned long flags; spin_lock_irqsave(&sched_clock_lock, flags); cyc = ~readl(CFG_TIMER_VABASE + REG_TIMER1_VALUE); t_clk = cyc_to_sched_clock(&cd, cyc, (u32)~0); spin_unlock_irqrestore(&sched_clock_lock, flags); return t_clk; }
unsigned long long notrace sched_clock(void) { u32 cyc = *IXP4XX_OSTS; return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
static inline unsigned long long notrace _omap_mpu_sched_clock(void) { u32 cyc = ~omap_mpu_timer_read(1); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { return cyc_to_sched_clock(&cd, ~__raw_readl(S3C2410_TCNTO(PWM_SOURCE)), TCNT_MAX); }
unsigned long long notrace sched_clock(void) { cycle_t cyc = sched_clock_reg ? ((u32)~0 - __raw_readl(sched_clock_reg)) : 0; return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { cycle_t cyc = clocksource_mxc.read(&clocksource_mxc); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { u32 cyc = ~readl(CFG_TIMER_VABASE + REG_TIMER1_VALUE); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { u32 cyc = ~RK_TIMER_READVALUE(TIMER_CLKSRC); return cyc_to_sched_clock(&cd, cyc, MASK); }
unsigned long long notrace sched_clock(void) { u32 cyc = timer_read(); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
unsigned long long notrace sched_clock(void) { u32 cyc = OSCR; return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
static unsigned long long notrace sched_clock_32(void) { u32 cyc = read_sched_clock(); return cyc_to_sched_clock(cyc, sched_clock_mask); }
unsigned long long notrace sched_clock(void) { u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }