/* * Setup the local clock events for a CPU. */ static int __cpuinit generic_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; pr_info("[ca7_timer]%s entry\n", __func__); generic_timer_calibrate_rate(); write_cntp_ctl(0x0); clk->name = "generic_timer"; clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->rating = 350; clk->set_mode = generic_timer_set_mode; clk->set_next_event = generic_timer_set_next_event; clk->irq = timer_ppi; this_cpu_clk = __this_cpu_ptr(timer_evt); *this_cpu_clk = clk; clockevents_config_and_register(clk, generic_timer_rate, 0xf, 0x7fffffff); enable_percpu_irq(clk->irq, 0); return 0; }
static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu) { struct irqaction *irq = &timer.ce_irq[cpu]; void __iomem *base = timer.ce_base[cpu]; if (!base) return 0; ce->name = timer.ce_name[cpu]; ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->set_next_event = rk_timer_set_next_event; ce->set_mode = rk_timer_set_mode; ce->irq = irq->irq; ce->cpumask = cpumask_of(cpu); writel_relaxed(1, base + TIMER_INT_STATUS); rk_timer_disable(base); irq->dev_id = ce; irq_set_affinity(irq->irq, cpumask_of(cpu)); setup_irq(irq->irq, irq); clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF); return 0; }
static int integrator_clockevent_init(unsigned long inrate, void __iomem *base, int irq) { unsigned long rate = inrate; unsigned int ctrl = 0; int ret; clkevt_base = base; /* Calculate and program a divisor */ if (rate > 0x100000 * HZ) { rate /= 256; ctrl |= TIMER_CTRL_DIV256; } else if (rate > 0x10000 * HZ) { rate /= 16; ctrl |= TIMER_CTRL_DIV16; } timer_reload = rate / HZ; writel(ctrl, clkevt_base + TIMER_CTRL); ret = setup_irq(irq, &integrator_timer_irq); if (ret) return ret; clockevents_config_and_register(&integrator_clockevent, rate, 1, 0xffffU); return 0; }
void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) { struct clock_event_device *evt = &sp804_clockevent; long rate; if (!clk) clk = clk_get_sys("sp804", name); if (IS_ERR(clk)) { pr_err("sp804: %s clock not found: %d\n", name, (int)PTR_ERR(clk)); return; } rate = sp804_get_clock_rate(clk); if (rate < 0) return; clkevt_base = base; clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); evt->name = name; evt->irq = irq; evt->cpumask = cpu_possible_mask; writel(0, base + TIMER_CTRL); setup_irq(irq, &sp804_timer_irq); clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); }
static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base, unsigned int irq) { struct clock_event_device *clkevt; unsigned long rate; if (!irq) return -EINVAL; if (!base) return -ENOMEM; if (IS_ERR(clock)) return PTR_ERR(clock); clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); if (!clkevt) return -ENOMEM; rate = clk_get_rate(clock); /* Set Timer prescaler */ writew(DIV_ROUND_CLOSEST(rate, HZ), base); clkevt->name = "clps711x-clockevent"; clkevt->rating = 300; clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP; clkevt->cpumask = cpumask_of(0); clockevents_config_and_register(clkevt, HZ, 0, 0); return request_irq(irq, clps711x_timer_interrupt, IRQF_TIMER, "clps711x-timer", clkevt); }
/* * Setup the local clock events for a CPU. */ void __cpuinit twd_timer_setup(struct clock_event_device *clk) { if (!twd_clk) twd_clk = twd_get_clock(); if (!IS_ERR_OR_NULL(twd_clk)) twd_timer_rate = clk_get_rate(twd_clk); else twd_calibrate_rate(); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; __get_cpu_var(twd_ce) = clk; clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); /* Make sure our local interrupt controller has this enabled */ gic_enable_ppi(clk->irq); }
static int __cpuinit mmp_percpu_timer_setup(struct clock_event_device *clk) { u32 cpuid = hard_smp_processor_id(); clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->name = "apb_percpu_timer"; clk->rating = 300; clk->irq = irq_map[cpuid].irq; clk->cpumask = cpumask_of(cpuid); clk->set_mode = percpu_timer_set_mode; clk->set_next_event = percpu_timer_set_next_event; clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); irq_map[cpuid].irq_act.dev_id = clk; irq_set_affinity(clk->irq, clk->cpumask); clockevents_config_and_register(clk, CLOCK_TICK_RATE_32KHZ, MIN_DELTA, MAX_DELTA); enable_irq(clk->irq); /* the broadcast clockevent is no longer needed */ if (cpuid == 0) { remove_irq(ckevt.irq, &timer_irq); #ifndef CONFIG_TZ_HYPERVISOR /* reset APB and functional domain */ __raw_writel(APBC_RST, APBC_PXA1088_TIMERS2); #endif /* CONFIG_TZ_HYPERVISOR */ } return 0; }
static int msm_local_timer_setup(struct clock_event_device *evt) { int cpu = smp_processor_id(); int err; evt->irq = msm_timer_irq; evt->name = "msm_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 200; evt->set_mode = msm_timer_set_mode; evt->set_next_event = msm_timer_set_next_event; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); if (msm_timer_has_ppi) { enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); } else { err = request_irq(evt->irq, msm_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "gp_timer", evt); if (err) pr_err("request_irq failed\n"); } return 0; }
static int msm_local_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); int err; evt->irq = msm_timer_irq; evt->name = "msm_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 200; evt->set_state_shutdown = msm_timer_shutdown; evt->set_state_oneshot = msm_timer_shutdown; evt->tick_resume = msm_timer_shutdown; evt->set_next_event = msm_timer_set_next_event; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); if (msm_timer_has_ppi) { enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); } else { err = request_irq(evt->irq, msm_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "gp_timer", evt); if (err) pr_err("request_irq failed\n"); } return 0; }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_setup(struct clock_event_device *evt) { u32 u; int cpu = smp_processor_id(); u = readl(local_base + TIMER_CTRL_OFF); if (timer25Mhz) writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); else writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_mode = armada_370_xp_clkevt_mode, evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
static void jz_clockevent_init(struct jz_timerevent *evt_dev) { struct clock_event_device *cd = &evt_dev->clkevt; struct clk *ext_clk = clk_get(NULL,"ext1"); spin_lock_init(&evt_dev->lock); evt_dev->rate = clk_get_rate(ext_clk) / CLKEVENT_DIV; clk_put(ext_clk); stoptimer(); tcu_writel(CH_TCSR(CLKEVENT_CH),CSRDIV(CLKEVENT_DIV) | CSR_EXT_EN); evt_dev->evt_action.handler = jz_timer_interrupt; evt_dev->evt_action.thread_fn = NULL; evt_dev->evt_action.flags = IRQF_DISABLED | IRQF_TIMER; evt_dev->evt_action.name = "jz-timerirq"; evt_dev->evt_action.dev_id = (void*)evt_dev; if(setup_irq(IRQ_TCU1, &evt_dev->evt_action) < 0) { pr_err("timer request irq error\n"); BUG(); } memset(cd,0,sizeof(struct clock_event_device)); cd->name = "jz-clockenvent"; cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; cd->shift = 10; cd->rating = 400; cd->set_mode = jz_set_mode; cd->set_next_event = jz_set_next_event; cd->irq = IRQ_TCU1; cd->cpumask = cpumask_of(0); clockevents_config_and_register(cd,evt_dev->rate,4,65536); printk("clockevents_config_and_register success.\n"); }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); u32 clr = 0, set = 0; if (timer25Mhz) set = TIMER0_25MHZ; else clr = TIMER0_25MHZ; local_timer_ctrl_clrset(clr, set); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_state_shutdown = armada_370_xp_clkevt_shutdown; evt->set_state_periodic = armada_370_xp_clkevt_set_periodic; evt->set_state_oneshot = armada_370_xp_clkevt_shutdown; evt->tick_resume = armada_370_xp_clkevt_shutdown; evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
/* * ST (system timer) module supports both clockevents and clocksource. */ void __init at91rm9200_timer_init(void) { /* For device tree enabled device: initialize here */ of_at91rm9200_st_init(); /* Disable all timer interrupts, and clear any pending ones */ at91_st_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS); at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ setup_irq(at91rm9200_timer_irq.irq, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting * its prescaler from the 1 Hz default. */ at91_st_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, 2, AT91_ST_ALMV); /* register clocksource */ clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); }
/* * clockevent setup for boot CPU */ static void __init arc_clockevent_setup(struct device_node *node) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int ret; register_cpu_notifier(&arc_timer_cpu_nb); arc_timer_irq = irq_of_parse_and_map(node, 0); if (arc_timer_irq <= 0) panic("clockevent: missing irq"); ret = arc_get_timer_clk(node); if (ret) panic("clockevent: missing clk"); evt->irq = arc_timer_irq; evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX); /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); if (ret) panic("clockevent: unable to request irq\n"); enable_percpu_irq(arc_timer_irq, 0); }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_setup(struct clock_event_device *evt) { u32 clr = 0, set = 0; int cpu = smp_processor_id(); if (timer25Mhz) set = TIMER0_25MHZ; else clr = TIMER0_25MHZ; local_timer_ctrl_clrset(clr, set); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_mode = armada_370_xp_clkevt_mode, evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
static int __cpuinit arch_timer_setup(struct clock_event_device *clk) { clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 450; if (arch_timer_use_virtual) { clk->irq = arch_timer_ppi[VIRT_PPI]; clk->set_mode = arch_timer_set_mode_virt; clk->set_next_event = arch_timer_set_next_event_virt; } else { clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; clk->set_mode = arch_timer_set_mode_phys; clk->set_next_event = arch_timer_set_next_event_phys; } clk->cpumask = cpumask_of(smp_processor_id()); clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); else { enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); } arch_counter_set_user_access(); return 0; }
static void __init shmac_clockevent_init(struct device_node *np) { void __iomem *base; u32 freq; int irq; base = of_iomap(np, 0); if (!base) panic("Can't remap registers"); if (of_property_read_u32(np, "clock-frequency", &freq)) panic("Can't read clock-frequency"); ticks_per_jiffy = DIV_ROUND_UP(freq, HZ); irq = irq_of_parse_and_map(np, DEFAULT_TIMER); if (irq <= 0) panic("Can't parse IRQ"); if (setup_irq(irq, &shmac_clock_event_irq)) panic("Can't set up timer IRQ\n"); clock_event_ddata.base = base; clockevents_config_and_register(&clock_event_ddata.evtdev, freq, 0xf, 0xffff); pr_info("SHMAC clockevent init done\n"); }
static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, char *name, unsigned long rating) { struct clock_event_device *ced = &p->ced; int ret; memset(ced, 0, sizeof(*ced)); ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; ced->suspend = sh_tmu_clock_event_suspend; ced->resume = sh_tmu_clock_event_resume; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); ret = setup_irq(p->irqaction.irq, &p->irqaction); if (ret) { dev_err(&p->pdev->dev, "failed to request irq %d\n", p->irqaction.irq); return; } }
static void __init omap2_gp_clockevent_init(int gptimer_id, const char *fck_source, const char *property) { int res; clkev.id = gptimer_id; clkev.errata = omap_dm_timer_get_errata(); /* * For clock-event timers we never read the timer counter and * so we are not impacted by errata i103 and i767. Therefore, * we can safely ignore this errata for clock-event timers. */ __omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767); res = omap_dm_timer_init_one(&clkev, fck_source, property, &clockevent_gpt.name, OMAP_TIMER_POSTED); BUG_ON(res); omap2_gp_timer_irq.dev_id = &clkev; setup_irq(clkev.irq, &omap2_gp_timer_irq); __omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW); clockevent_gpt.cpumask = cpu_possible_mask; clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); clockevents_config_and_register(&clockevent_gpt, clkev.rate, 3, /* Timer internal resynch latency */ 0xffffffff); pr_info("OMAP clockevent source: %s at %lu Hz\n", clockevent_gpt.name, clkev.rate); }
static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; if (!twd_clk) twd_clk = twd_get_clock(); if (!IS_ERR_OR_NULL(twd_clk)) twd_timer_rate = clk_get_rate(twd_clk); else twd_calibrate_rate(); __raw_writel(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; clk->irq = twd_ppi; this_cpu_clk = __this_cpu_ptr(twd_evt); *this_cpu_clk = clk; clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); return 0; }
static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, const char *name) { struct clock_event_device *ced = &ch->ced; int ret; ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = 200; ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; ced->suspend = sh_tmu_clock_event_suspend; ced->resume = sh_tmu_clock_event_resume; dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", ch->index); clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); ret = request_irq(ch->irq, sh_tmu_interrupt, IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, dev_name(&ch->tmu->pdev->dev), ch); if (ret) { dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", ch->index, ch->irq); return; } }
void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; sched_clock_register(iop_read_sched_clock, 32, tick_rate); ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* * Set up interrupting clockevent timer 0. */ write_tmr0(timer_ctl & ~IOP_TMR_EN); write_tisr(1); setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); iop_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&iop_clockevent, tick_rate, 0xf, 0xfffffffe); /* * Set up free-running clocksource timer 1. */ write_trr1(0xffffffff); write_tcr1(0xffffffff); write_tmr1(timer_ctl); clocksource_register_hz(&iop_clocksource, tick_rate); }
static int __init ftm_clockevent_init(unsigned long freq, int irq) { int err; ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); ftm_writel(~0u, priv->clkevt_base + FTM_MOD); ftm_reset_counter(priv->clkevt_base); err = setup_irq(irq, &ftm_timer_irq); if (err) { pr_err("ftm: setup irq failed: %d\n", err); return err; } ftm_clockevent.cpumask = cpumask_of(0); ftm_clockevent.irq = irq; clockevents_config_and_register(&ftm_clockevent, freq / (1 << priv->ps), 1, 0xffff); ftm_counter_enable(priv->clkevt_base); return 0; }
static void __init vt8500_timer_init(struct device_node *np) { int timer_irq; regbase = of_iomap(np, 0); if (!regbase) { pr_err("%s: Missing iobase description in Device Tree\n", __func__); return; } timer_irq = irq_of_parse_and_map(np, 0); if (!timer_irq) { pr_err("%s: Missing irq description in Device Tree\n", __func__); return; } writel(1, regbase + TIMER_CTRL_VAL); writel(0xf, regbase + TIMER_STATUS_VAL); writel(~0, regbase + TIMER_MATCH_VAL); if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", __func__, clocksource.name); clockevent.cpumask = cpumask_of(0); if (setup_irq(timer_irq, &irq)) pr_err("%s: setup_irq failed for %s\n", __func__, clockevent.name); clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, MIN_OSCR_DELTA * 2, 0xf0000000); }
void __init nmdk_timer_init(void __iomem *base, int irq) { unsigned long rate; struct clk *clk0, *pclk0; mtu_base = base; pclk0 = clk_get_sys("mtu0", "apb_pclk"); BUG_ON(IS_ERR(pclk0)); BUG_ON(clk_prepare(pclk0) < 0); BUG_ON(clk_enable(pclk0) < 0); clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); BUG_ON(clk_prepare(clk0) < 0); BUG_ON(clk_enable(clk0) < 0); /* * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz * for ux500. * Use a divide-by-16 counter if the tick rate is more than 32MHz. * At 32 MHz, the timer (with 32 bit counter) can be programmed * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer * with 16 gives too low timer resolution. */ rate = clk_get_rate(clk0); if (rate > 32000000) { rate /= 16; clk_prescale = MTU_CRn_PRESCALE_16; } else { clk_prescale = MTU_CRn_PRESCALE_1; } /* Cycles for periodic mode */ nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ); /* Timer 0 is the free running clocksource */ nmdk_clksrc_reset(); if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", rate, 200, 32, clocksource_mmio_readl_down)) pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK setup_sched_clock(nomadik_read_sched_clock, 32, rate); #endif /* Timer 1 is used for events, register irq and clockevents */ setup_irq(irq, &nmdk_timer_irq); nmdk_clkevt.cpumask = cpumask_of(0); nmdk_clkevt.irq = irq; clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU); mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer; mtu_delay_timer.freq = rate; register_current_timer_delay(&mtu_delay_timer); }
static __init void omap_init_mpu_timer(unsigned long rate) { setup_irq(INT_TIMER1, &omap_mpu_timer1_irq); omap_mpu_timer_start(0, (rate / HZ) - 1, 1); clockevent_mpu_timer1.cpumask = cpumask_of(0); clockevents_config_and_register(&clockevent_mpu_timer1, rate, 1, -1); }
static int __init epit_clockevent_init(struct clk *timer_clk) { clockevent_epit.cpumask = cpumask_of(0); clockevents_config_and_register(&clockevent_epit, clk_get_rate(timer_clk), 0x800, 0xfffffffe); return 0; }
static void __init ttc_setup_clockevent(struct clk *clk, void __iomem *base, u32 irq) { struct ttc_timer_clockevent *ttcce; int err; ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); if (WARN_ON(!ttcce)) return; ttcce->ttc.clk = clk; err = clk_prepare_enable(ttcce->ttc.clk); if (WARN_ON(err)) { kfree(ttcce); return; } ttcce->ttc.clk_rate_change_nb.notifier_call = ttc_rate_change_clockevent_cb; ttcce->ttc.clk_rate_change_nb.next = NULL; if (clk_notifier_register(ttcce->ttc.clk, &ttcce->ttc.clk_rate_change_nb)) pr_warn("Unable to register clock notifier.\n"); ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); ttcce->ttc.base_addr = base; ttcce->ce.name = "ttc_clockevent"; ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ttcce->ce.set_next_event = ttc_set_next_event; ttcce->ce.set_mode = ttc_set_mode; ttcce->ce.rating = 200; ttcce->ce.irq = irq; ttcce->ce.cpumask = cpu_possible_mask; /* * Setup the clock event timer to be an interval timer which * is prescaled by 32 using the interval interrupt. Leave it * disabled for now. */ __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); err = request_irq(irq, ttc_clock_event_interrupt, IRQF_DISABLED | IRQF_TIMER, ttcce->ce.name, ttcce); if (WARN_ON(err)) { kfree(ttcce); return; } clockevents_config_and_register(&ttcce->ce, ttcce->ttc.freq / PRESCALE, 1, 0xfffe); }
static int arc_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX); enable_percpu_irq(arc_timer_irq, 0); return 0; }
void __init sunxi_timer_init(void) { struct device_node *node; unsigned long rate = 0; struct clk *clk; int ret, irq; u32 val; node = of_find_matching_node(NULL, sunxi_timer_dt_ids); if (!node) panic("No sunxi timer node"); timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); of_clk_init(NULL); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); rate = clk_get_rate(clk); writel(rate / (TIMER_SCAL * HZ), timer_base + TIMER0_INTVAL_REG); /* set clock source to HOSC, 16 pre-division */ val = readl(timer_base + TIMER0_CTL_REG); val &= ~(0x07 << 4); val &= ~(0x03 << 2); val |= (4 << 4) | (1 << 2); writel(val, timer_base + TIMER0_CTL_REG); /* set mode to auto reload */ val = readl(timer_base + TIMER0_CTL_REG); writel(val | TIMER0_CTL_AUTORELOAD, timer_base + TIMER0_CTL_REG); ret = setup_irq(irq, &sunxi_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_CTL_REG); writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG); sunxi_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL, 0x1, 0xff); }