static void __init intcp_init_early(void) { cm_map = syscon_regmap_lookup_by_compatible("arm,core-module-integrator"); if (IS_ERR(cm_map)) return; sched_clock_register(intcp_read_sched_clock, 32, 24000000); }
static int __init integrator_clocksource_init(unsigned long inrate, void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; int ret; if (rate >= 1500000) { rate /= 16; ctrl |= TIMER_CTRL_DIV16; } writel(0xffff, base + TIMER_LOAD); writel(ctrl, base + TIMER_CTRL); ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2", rate, 200, 16, clocksource_mmio_readl_down); if (ret) return ret; sched_clk_base = base; sched_clock_register(integrator_read_sched_clock, 16, rate); return 0; }
static void __init bcm2708_timer_init(void) { if (of_have_populated_dt()) { of_clk_init(NULL); clocksource_of_init(); return; } /* init high res timer */ bcm2708_clocksource_init(); /* * Make irqs happen for the system timer */ setup_irq(IRQ_TIMER3, &bcm2708_timer_irq); sched_clock_register(bcm2708_read_sched_clock, 32, STC_FREQ_HZ); timer0_clockevent.mult = div_sc(STC_FREQ_HZ, NSEC_PER_SEC, timer0_clockevent.shift); timer0_clockevent.max_delta_ns = clockevent_delta2ns(0xffffffff, &timer0_clockevent); timer0_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &timer0_clockevent); timer0_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&timer0_clockevent); register_current_timer_delay(&bcm2708_delay_timer); }
void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; sched_clock_register(iop_read_sched_clock, 32, tick_rate); ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* * Set up interrupting clockevent timer 0. */ write_tmr0(timer_ctl & ~IOP_TMR_EN); write_tisr(1); setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); iop_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&iop_clockevent, tick_rate, 0xf, 0xfffffffe); /* * Set up free-running clocksource timer 1. */ write_trr1(0xffffffff); write_tcr1(0xffffffff); write_tmr1(timer_ctl); clocksource_register_hz(&iop_clocksource, tick_rate); }
static void __init arch_counter_register(unsigned type) { u64 start_count; /* Register the CP15 based counter if we have one */ if (type & ARCH_CP15_TIMER) { if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual) arch_timer_read_counter = arch_counter_get_cntvct; else arch_timer_read_counter = arch_counter_get_cntpct; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; /* If the clocksource name is "arch_sys_counter" the * VDSO will attempt to read the CP15-based counter. * Ensure this does not happen when CP15-based * counter is not available. */ clocksource_counter.name = "arch_mem_counter"; } start_count = arch_timer_read_counter(); clocksource_register_hz(&clocksource_counter, arch_timer_rate); cyclecounter.mult = clocksource_counter.mult; cyclecounter.shift = clocksource_counter.shift; timecounter_init(&timecounter, &cyclecounter, start_count); /* 56 bits minimum, so we assume worst case rollover */ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); }
static void __init omap2_gptimer_clocksource_init(int gptimer_id, const char *fck_source, const char *property) { int res; clksrc.id = gptimer_id; clksrc.errata = omap_dm_timer_get_errata(); res = omap_dm_timer_init_one(&clksrc, fck_source, property, &clocksource_gpt.name, OMAP_TIMER_NONPOSTED); BUG_ON(res); __omap_dm_timer_load_start(&clksrc, OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, OMAP_TIMER_NONPOSTED); sched_clock_register(dmtimer_read_sched_clock, 32, clksrc.rate); if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) pr_err("Could not register clocksource %s\n", clocksource_gpt.name); else pr_info("OMAP clocksource: %s at %lu Hz\n", clocksource_gpt.name, clksrc.rate); }
static inline void setup_clksrc(u32 freq) { struct clocksource *cs = &gpt_clocksource; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); struct timecounter *mt_timecounter; u64 start_count; pr_alert("setup_clksrc1: dev->base_addr=0x%lx GPT2_CON=0x%x\n", (unsigned long)dev->base_addr, __raw_readl(dev->base_addr)); cs->mult = clocksource_hz2mult(freq, cs->shift); sched_clock_register(mt_read_sched_clock, 32, freq); setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); clocksource_register(cs); start_count = mt_read_sched_clock(); mt_cyclecounter.mult = cs->mult; mt_cyclecounter.shift = cs->shift; mt_timecounter = arch_timer_get_timecounter(); timecounter_init(mt_timecounter, &mt_cyclecounter, start_count); pr_alert("setup_clksrc1: mt_cyclecounter.mult=0x%x mt_cyclecounter.shift=0x%x\n", mt_cyclecounter.mult, mt_cyclecounter.shift); pr_alert("setup_clksrc2: dev->base_addr=0x%lx GPT2_CON=0x%x\n", (unsigned long)dev->base_addr, __raw_readl(dev->base_addr)); }
static void __init tango_clocksource_init(struct device_node *np) { struct clk *clk; int xtal_freq, ret; xtal_in_cnt = of_iomap(np, 0); if (xtal_in_cnt == NULL) { pr_err("%s: invalid address\n", np->full_name); return; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("%s: invalid clock\n", np->full_name); return; } xtal_freq = clk_get_rate(clk); delay_timer.freq = xtal_freq; delay_timer.read_current_timer = read_xtal_counter; ret = clocksource_register_hz(&tango_xtal, xtal_freq); if (ret != 0) { pr_err("%s: registration failed\n", np->full_name); return; } sched_clock_register(read_sched_clock, 32, xtal_freq); register_current_timer_delay(&delay_timer); }
static int __init lpc32xx_clocksource_init(struct device_node *np) { void __iomem *base; unsigned long rate; struct clk *clk; int ret; clk = of_clk_get_by_name(np, "timerclk"); if (IS_ERR(clk)) { pr_err("clock get failed (%lu)\n", PTR_ERR(clk)); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) { pr_err("clock enable failed (%d)\n", ret); goto err_clk_enable; } base = of_iomap(np, 0); if (!base) { pr_err("unable to map registers\n"); ret = -EADDRNOTAVAIL; goto err_iomap; } /* * Disable and reset timer then set it to free running timer * mode (CTCR) with no prescaler (PR) or match operations (MCR). * After setup the timer is released from reset and enabled. */ writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR); writel_relaxed(0, base + LPC32XX_TIMER_PR); writel_relaxed(0, base + LPC32XX_TIMER_MCR); writel_relaxed(0, base + LPC32XX_TIMER_CTCR); writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR); rate = clk_get_rate(clk); ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer", rate, 300, 32, clocksource_mmio_readl_up); if (ret) { pr_err("failed to init clocksource (%d)\n", ret); goto err_clocksource_init; } clocksource_timer_counter = base + LPC32XX_TIMER_TC; sched_clock_register(lpc32xx_read_sched_clock, 32, rate); return 0; err_clocksource_init: iounmap(base); err_iomap: clk_disable_unprepare(clk); err_clk_enable: clk_put(clk); return ret; }
/* initialize the kernel jiffy timer source */ static int __init sirfsoc_prima2_timer_init(struct device_node *np) { unsigned long rate; struct clk *clk; int ret; clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("Failed to get clock"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) { pr_err("Failed to enable clock"); return ret; } rate = clk_get_rate(clk); if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { pr_err("Invalid clock rate"); return -EINVAL; } sirfsoc_timer_base = of_iomap(np, 0); if (!sirfsoc_timer_base) { pr_err("unable to map timer cpu registers\n"); return -ENXIO; } sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); if (ret) { pr_err("Failed to register clocksource"); return ret; } sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); if (ret) { pr_err("Failed to setup irq"); return ret; } sirfsoc_clockevent_init(); return 0; }
static int __init csky_mptimer_init(struct device_node *np) { int ret, cpu, cpu_rollback; struct timer_of *to = NULL; /* * Csky_mptimer is designed for C-SKY SMP multi-processors and * every core has it's own private irq and regs for clkevt and * clksrc. * * The regs is accessed by cpu instruction: mfcr/mtcr instead of * mmio map style. So we needn't mmio-address in dts, but we still * need to give clk and irq number. * * We use private irq for the mptimer and irq number is the same * for every core. So we use request_percpu_irq() in timer_of_init. */ csky_mptimer_irq = irq_of_parse_and_map(np, 0); if (csky_mptimer_irq <= 0) return -EINVAL; ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt, "csky_mp_timer", &csky_to); if (ret) return -EINVAL; for_each_possible_cpu(cpu) { to = per_cpu_ptr(&csky_to, cpu); ret = timer_of_init(np, to); if (ret) goto rollback; } clocksource_register_hz(&csky_clocksource, timer_of_rate(to)); sched_clock_register(sched_clock_read, 32, timer_of_rate(to)); ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING, "clockevents/csky/timer:starting", csky_mptimer_starting_cpu, csky_mptimer_dying_cpu); if (ret) return -EINVAL; return 0; rollback: for_each_possible_cpu(cpu_rollback) { if (cpu_rollback == cpu) break; to = per_cpu_ptr(&csky_to, cpu_rollback); timer_of_cleanup(to); } return -EINVAL; }
static int __init pit_clocksource_init(unsigned long rate) { /* set the max load value and start the clock source counter */ __raw_writel(0, clksrc_base + PITTCTRL); __raw_writel(~0UL, clksrc_base + PITLDVAL); __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); sched_clock_register(pit_read_sched_clock, 32, rate); return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, 300, 32, clocksource_mmio_readl_down); }
static inline void setup_clksrc(void) { struct clocksource *cs = &mt6582_gpt.clocksource; struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID); cs->mult = clocksource_hz2mult(SYS_CLK_RATE, cs->shift); setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); sched_clock_register((void *)mt_read_sched_clock, 32, SYS_CLK_RATE); }
static void __init versatile_sched_clock_init(struct device_node *node) { void __iomem *base = of_iomap(node, 0); if (!base) return; versatile_sys_24mhz = base + SYS_24MHZ; sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); }
static void __init sun4i_timer_init(struct device_node *node) { unsigned long rate = 0; struct clk *clk; int ret, irq; u32 val; timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(~0, timer_base + TIMER_INTVAL_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(1)); sched_clock_register(sun4i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, rate, 350, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(0)); /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); sun4i_clockevent.cpumask = cpu_possible_mask; sun4i_clockevent.irq = irq; clockevents_config_and_register(&sun4i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); }
void __init time_init(void) { unsigned long cr16_hz; clocktick = (100 * PAGE0->mem_10msec) / HZ; start_cpu_itimer(); /* get CPU 0 started */ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ /* register as sched_clock source */ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); }
void __init mmp_timer_init(int irq, unsigned long rate) { timer_config(); sched_clock_register(mmp_read_sched_clock, 32, rate); ckevt.cpumask = cpumask_of(0); setup_irq(irq, &timer_irq); clocksource_register_hz(&cksrc, rate); clockevents_config_and_register(&ckevt, rate, MIN_DELTA, MAX_DELTA); }
static void __init gt_clocksource_init(void) { writel(0, gt_base + GT_CONTROL); writel(0, gt_base + GT_COUNTER0); writel(0, gt_base + GT_COUNTER1); /* enables timer on all the cores */ writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); #endif clocksource_register_hz(>_clocksource, gt_clk_rate); }
static void __init init_sched_clock(void) { struct device_node *sched_timer; sched_timer = of_find_matching_node(NULL, sptimer_ids); if (sched_timer) { timer_get_base_and_rate(sched_timer, &sched_io_base, &sched_rate); of_node_put(sched_timer); } sched_clock_register(read_sched_clock, 32, sched_rate); }
static int __init sun4i_timer_init(struct device_node *node) { int ret; u32 val; ret = timer_of_init(node, &to); if (ret) return ret; writel(~0, timer_of_base(&to) + TIMER_INTVAL_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_of_base(&to) + TIMER_CTL_REG(1)); /* * sched_clock_register does not have priorities, and on sun6i and * later there is a better sched_clock registered by arm_arch_timer.c */ if (of_machine_is_compatible("allwinner,sun4i-a10") || of_machine_is_compatible("allwinner,sun5i-a13") || of_machine_is_compatible("allwinner,sun5i-a10s") || of_machine_is_compatible("allwinner,suniv-f1c100s")) sched_clock_register(sun4i_timer_sched_read, 32, timer_of_rate(&to)); ret = clocksource_mmio_init(timer_of_base(&to) + TIMER_CNTVAL_REG(1), node->name, timer_of_rate(&to), 350, 32, clocksource_mmio_readl_down); if (ret) { pr_err("Failed to register clocksource\n"); return ret; } writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_of_base(&to) + TIMER_CTL_REG(0)); /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(timer_of_base(&to), 0); /* clear timer0 interrupt */ sun4i_timer_clear_interrupt(timer_of_base(&to)); clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), TIMER_SYNC_TICKS, 0xffffffff); /* Enable timer0 interrupt */ val = readl(timer_of_base(&to) + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_of_base(&to) + TIMER_IRQ_EN_REG); return ret; }
static int __init mxs_clocksource_init(struct clk *timer_clk) { unsigned int c = clk_get_rate(timer_clk); if (timrot_is_v1()) clocksource_register_hz(&clocksource_mxs, c); else { clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1), "mxs_timer", c, 200, 32, clocksource_mmio_readl_down); sched_clock_register(mxs_read_sched_clock_v2, 32, c); } return 0; }
int __init arch_timer_arch_init(void) { u32 arch_timer_rate = arch_timer_get_rate(); if (arch_timer_rate == 0) return -ENXIO; arch_timer_delay_timer_register(); /* Cache the sched_clock multiplier to save a divide in the hot path. */ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); return 0; }
void __init timer_init(int irq) { timer_config(); sched_clock_register(mmp_read_sched_clock, 32, MMP_CLOCK_FREQ); ckevt.cpumask = cpumask_of(0); setup_irq(irq, &timer_irq); clocksource_register_hz(&cksrc, MMP_CLOCK_FREQ); clockevents_config_and_register(&ckevt, MMP_CLOCK_FREQ, MIN_DELTA, MAX_DELTA); }
static int __init fttmr010_timer_common_init(struct device_node *np) { int irq; base = of_iomap(np, 0); if (!base) { pr_err("Can't remap registers"); return -ENXIO; } /* IRQ for timer 1 */ irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { pr_err("Can't parse IRQ"); return -EINVAL; } /* * Reset the interrupt mask and status */ writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK); writel(0, base + TIMER_INTR_STATE); writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR); /* * Setup free-running clocksource timer (interrupts * disabled.) */ writel(0, base + TIMER3_COUNT); writel(0, base + TIMER3_LOAD); writel(0, base + TIMER3_MATCH1); writel(0, base + TIMER3_MATCH2); clocksource_mmio_init(base + TIMER3_COUNT, "fttmr010_clocksource", tick_rate, 300, 32, clocksource_mmio_readl_up); sched_clock_register(fttmr010_read_sched_clock, 32, tick_rate); /* * Setup clockevent timer (interrupt-driven.) */ writel(0, base + TIMER1_COUNT); writel(0, base + TIMER1_LOAD); writel(0, base + TIMER1_MATCH1); writel(0, base + TIMER1_MATCH2); setup_irq(irq, &fttmr010_timer_irq); fttmr010_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&fttmr010_clockevent, tick_rate, 1, 0xffffffff); return 0; }
static void __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; unsigned long rate; struct clk *clk; int ret, irq; u32 val; timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); clk_prepare_enable(clk); rate = clk_get_rate(clk); rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) reset_control_deassert(rstc); writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, timer_base + TIMER_CTL_REG(1)); sched_clock_register(sun5i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, rate, 340, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); ret = setup_irq(irq, &sun5i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); sun5i_clockevent.cpumask = cpu_possible_mask; sun5i_clockevent.irq = irq; clockevents_config_and_register(&sun5i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); }
static int __init mxc_clocksource_init(struct clk *timer_clk) { unsigned int c = clk_get_rate(timer_clk); void __iomem *reg = timer_base + (timer_is_v2() ? V2_TCN : MX1_2_TCN); imx_delay_timer.read_current_timer = &imx_read_current_timer; imx_delay_timer.freq = c; register_current_timer_delay(&imx_delay_timer); sched_clock_reg = reg; sched_clock_register(mxc_read_sched_clock, 32, c); return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, clocksource_mmio_readl_up); }
static int __init meson6_timer_init(struct device_node *node) { u32 val; int ret, irq; timer_base = of_io_request_and_map(node, 0, "meson6-timer"); if (IS_ERR(timer_base)) { pr_err("Can't map registers\n"); return -ENXIO; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("Can't parse IRQ\n"); return -EINVAL; } /* Set 1us for timer E */ val = readl(timer_base + TIMER_ISA_MUX); val &= ~TIMER_CSD_INPUT_MASK; val |= TIMER_CSD_UNIT_1US << TIMER_INPUT_BIT(CSD_ID); writel(val, timer_base + TIMER_ISA_MUX); sched_clock_register(meson6_timer_sched_read, 32, USEC_PER_SEC); clocksource_mmio_init(timer_base + TIMER_ISA_VAL(CSD_ID), node->name, 1000 * 1000, 300, 32, clocksource_mmio_readl_up); /* Timer A base 1us */ val &= ~TIMER_CED_INPUT_MASK; val |= TIMER_CED_UNIT_1US << TIMER_INPUT_BIT(CED_ID); writel(val, timer_base + TIMER_ISA_MUX); /* Stop the timer A */ meson6_clkevt_time_stop(CED_ID); ret = setup_irq(irq, &meson6_timer_irq); if (ret) { pr_warn("failed to setup irq %d\n", irq); return ret; } meson6_clockevent.cpumask = cpu_possible_mask; meson6_clockevent.irq = irq; clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC, 1, 0xfffe); return 0; }
void __init time_init(void) { #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); printk("%d.%02d MHz\n", (int)ccount_freq/1000000, (int)(ccount_freq/10000)%100); #else ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; #endif clocksource_register_hz(&ccount_clocksource, ccount_freq); local_timer_setup(0); setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction); sched_clock_register(ccount_sched_clock_read, 32, ccount_freq); clocksource_of_init(); }
int __init init_r4k_clocksource(void) { if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; /* Calculate a somewhat reasonable rating value */ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); #ifndef CONFIG_CPU_FREQ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); #endif return 0; }
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; int res = 0; msm_timer_irq = irq; msm_timer_has_ppi = percpu; msm_evt = alloc_percpu(struct clock_event_device); if (!msm_evt) { pr_err("memory allocation failed for clockevents\n"); goto err; } if (percpu) res = request_percpu_irq(irq, msm_timer_interrupt, "gp_timer", msm_evt); if (res) { pr_err("request_percpu_irq failed\n"); } else { /* Install and invoke hotplug callbacks */ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, "AP_QCOM_TIMER_STARTING", msm_local_timer_starting_cpu, msm_local_timer_dying_cpu); if (res) { free_percpu_irq(irq, msm_evt); goto err; } } err: writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); res = clocksource_register_hz(cs, dgt_hz); if (res) pr_err("clocksource_register failed\n"); sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); return res; }