/**************************************************************************** * * Get the time from the Generic Timer Counter Register. * * @param Pointer to the location to be updated with the time. * * @return None. * * @note None. * ****************************************************************************/ void XTime_GetTime(XTime *Xtime_Global) { /* Start global timer counter, it will only be enabled if it is disabled */ XTime_StartTimer(); *Xtime_Global = arch_counter_get_cntvct(); }
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) { int rt = ESR_ELx_SYS64_ISS_RT(esr); pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); }
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) { int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); regs->pc += 4; }
/********************** export area *********************/ u64 localtimer_get_phy_count(void) { u64 cval = 0; cval = arch_counter_get_cntvct(); return cval; }
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) { int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; isb(); if (rt != 31) regs->regs[rt] = arch_counter_get_cntvct(); regs->pc += 4; }
static __always_inline void fsl_a008585_set_next_event(const int access, unsigned long evt, struct clock_event_device *clk) { unsigned long ctrl; u64 cval = evt + arch_counter_get_cntvct(); ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; if (access == ARCH_TIMER_PHYS_ACCESS) write_sysreg(cval, cntp_cval_el0); else if (access == ARCH_TIMER_VIRT_ACCESS) write_sysreg(cval, cntv_cval_el0); arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); }
static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) { struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); u64 timeleft = 0; /* * In the single stage mode, if WS0 is deasserted * (watchdog is in the first stage), * timeleft = WOR + (WCV - system counter) */ if (!action && !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - arch_counter_get_cntvct(); do_div(timeleft, gwdt->clk); return timeleft; }
/**************************************************************************** * * Get the time from the Generic Timer Counter Register. * * @param Pointer to the location to be updated with the time. * * @return None. * * @note None. * ****************************************************************************/ void XTime_GetTime(XTime *Xtime_Global) { *Xtime_Global = arch_counter_get_cntvct(); }
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) { return arch_counter_get_cntvct(); }
static cycle_t arch_counter_read(struct clocksource *cs) { return arch_counter_get_cntvct(); }
static int __init arch_timer_register(void) { int err; int ppi; err = arch_timer_available(); if (err) goto out; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } clocksource_register_hz(&clocksource_counter, arch_timer_rate); cyclecounter.mult = clocksource_counter.mult; cyclecounter.shift = clocksource_counter.shift; timecounter_init(&timecounter, &cyclecounter, arch_counter_get_cntvct()); if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); } else { ppi = arch_timer_ppi[PHYS_SECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = register_cpu_notifier(&arch_timer_cpu_nb); if (err) goto out_free_irq; /* Immediately configure the timer on the boot CPU */ arch_timer_setup(this_cpu_ptr(arch_timer_evt)); return 0; out_free_irq: if (arch_timer_use_virtual) free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); else { free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); } out_free: free_percpu(arch_timer_evt); out: return err; }
u64 arch_timer_read_counter(void) { return arch_counter_get_cntvct(); }
unsigned long long notrace sched_clock(void) { return arch_counter_get_cntvct() * sched_clock_mult; }