int __init init_mfgpt_clocksource(void) { if (num_possible_cpus() > 1) /* MFGPT does not scale! */ return 0; return clocksource_register_hz(&clocksource_mfgpt, MFGPT_TICK_RATE); }
static int __init init_hrt_clocksource(void) { u32 freq; if (!scx200_cb_present()) return -ENODEV; if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET, SCx200_TIMER_SIZE, "NatSemi SCx200 High-Resolution Timer")) { pr_warn("unable to lock timer region\n"); return -ENODEV; } outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0), scx200_cb_base + SCx200_TMCNFG_OFFSET); freq = (HRT_FREQ + ppm); if (mhz27) freq *= 27; pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm); return clocksource_register_hz(&cs_hrt, freq); }
static void __init arch_counter_register(unsigned type) { u64 start_count; /* Register the CP15 based counter if we have one */ if (type & ARCH_CP15_TIMER) { arch_timer_read_counter = arch_counter_get_cntvct_cp15; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; /* If the clocksource name is "arch_sys_counter" the * VDSO will attempt to read the CP15-based counter. * Ensure this does not happen when CP15-based * counter is not available. */ clocksource_counter.name = "arch_mem_counter"; } start_count = arch_timer_read_counter(); clocksource_register_hz(&clocksource_counter, arch_timer_rate); cyclecounter.mult = clocksource_counter.mult; cyclecounter.shift = clocksource_counter.shift; timecounter_init(&timecounter, &cyclecounter, start_count); /* 56 bits minimum, so we assume worst case rollover */ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); }
static int __init timer_source_init(int ch) { struct clocksource *cs = &tm_source_clk; struct timer_info *info = tm_source_info(); info->channel = ch; info->irqno = -1; timer_clock_select(info, TIMER_CLOCK_SOURCE_HZ); /* * register timer source */ clocksource_register_hz(cs, info->rate); pr_debug("timer.%d: source shift =%u \n", ch, cs->shift); pr_debug("timer.%d: source mult =%u \n", ch, cs->mult); /* * source timer run */ info->tcount = -1UL; timer_reset(ch); timer_stop (ch, 0); timer_clock(ch, info->tmmux, info->prescale); timer_count(ch, info->tcount + 1); timer_start(ch, 0); __timer_sys_mux_val = info->tmmux; __timer_sys_scl_val = info->prescale; __timer_sys_clk_clr = readl(TIMER_SYS_CLKGEN + CLKGEN_CLR); printk("timer.%d: source, %9lu(HZ:%d), mult:%u\n", ch, info->rate, HZ, cs->mult); return 0; }
static int ra_systick_clocksource_init(void) { ra_systick_clocksource.rating = 350; clocksource_register_hz(&ra_systick_clocksource, 50000); return 0; }
void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; sched_clock_register(iop_read_sched_clock, 32, tick_rate); ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* * Set up interrupting clockevent timer 0. */ write_tmr0(timer_ctl & ~IOP_TMR_EN); write_tisr(1); setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); iop_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&iop_clockevent, tick_rate, 0xf, 0xfffffffe); /* * Set up free-running clocksource timer 1. */ write_trr1(0xffffffff); write_tcr1(0xffffffff); write_tmr1(timer_ctl); clocksource_register_hz(&iop_clocksource, tick_rate); }
static void __init exynos4_clocksource_init(void) { exynos4_mct_frc_start(0, 0); if (clocksource_register_hz(&mct_frc, clk_rate)) panic("%s: can't register clocksource\n", mct_frc.name); }
void __init time_init(void) { unsigned long counter_hz; int ret; /* figure rate for counter */ counter_hz = clk_get_rate(boot_cpu_data.clk); ret = clocksource_register_hz(&counter, counter_hz); if (ret) pr_debug("timer: could not register clocksource: %d\n", ret); /* setup COMPARE clockevent */ comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; comparator.cpumask = cpumask_of(0); sysreg_write(COMPARE, 0); timer_irqaction.dev_id = &comparator; ret = setup_irq(0, &timer_irqaction); if (ret) pr_debug("timer: could not request IRQ 0: %d\n", ret); else { clockevents_register_device(&comparator); pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, ((counter_hz + 500) / 1000) / 1000, ((counter_hz + 500) / 1000) % 1000); } }
static int __init arc_cs_setup_rtc(struct device_node *node) { struct bcr_timer timer; int ret; READ_BCR(ARC_REG_TIMERS_BCR, timer); if (!timer.rtc) { pr_warn("Local-64-bit-Ctr clocksource not detected"); return -ENXIO; } /* Local to CPU hence not usable in SMP */ if (IS_ENABLED(CONFIG_SMP)) { pr_warn("Local-64-bit-Ctr not usable in SMP"); return -EINVAL; } ret = arc_get_timer_clk(node); if (ret) return ret; write_aux_reg(AUX_RTC_CTRL, 1); return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); }
static int __init alchemy_time_init(unsigned int m2int) { struct clock_event_device *cd = &au1x_rtcmatch2_clockdev; unsigned long t; au1x_rtcmatch2_clockdev.irq = m2int; /* Check if firmware (YAMON, ...) has enabled 32kHz and clock * has been detected. If so install the rtcmatch2 clocksource, * otherwise don't bother. Note that both bits being set is by * no means a definite guarantee that the counters actually work * (the 32S bit seems to be stuck set to 1 once a single clock- * edge is detected, hence the timeouts). */ if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK)) goto cntr_err; /* * setup counter 1 (RTC) to tick at full speed */ t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */ au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCWRITE); au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; /* register counter1 clocksource and event device */ clocksource_register_hz(&au1x_counter1_clocksource, 32768); cd->shift = 32; cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */ clockevents_register_device(cd); setup_irq(m2int, &au1x_rtcmatch2_irqaction); printk(KERN_INFO "Alchemy clocksource installed\n"); return 0; cntr_err: return -1; }
static void __init bcm2709_clocksource_init(void) { if (clocksource_register_hz(&clocksource_stc, STC_FREQ_HZ)) { printk(KERN_ERR "timer: failed to initialize clock " "source %s\n", clocksource_stc.name); } }
/* * Set up both clocksource and clockevent support. */ static void __init at91sam926x_pit_init(void) { unsigned long pit_rate; unsigned bits; /* * Use our actual MCK to figure out how many MCK/16 ticks per * 1/HZ period (instead of a compile-time constant LATCH). */ pit_rate = clk_get_rate(clk_get(NULL, "mck")) / 16; pit_cycle = (pit_rate + HZ/2) / HZ; WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0); /* Initialize and enable the timer */ at91sam926x_pit_reset(); /* * Register clocksource. The high order bits of PIV are unused, * so this isn't a 32-bit counter unless we get clockevent irqs. */ bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */; pit_clk.mask = CLOCKSOURCE_MASK(bits); clocksource_register_hz(&pit_clk, pit_rate); /* Set up irq handler */ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq); /* Set up and register clockevents */ pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); pit_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&pit_clkevt); }
static void __init vt8500_timer_init(struct device_node *np) { int timer_irq; regbase = of_iomap(np, 0); if (!regbase) { pr_err("%s: Missing iobase description in Device Tree\n", __func__); return; } timer_irq = irq_of_parse_and_map(np, 0); if (!timer_irq) { pr_err("%s: Missing irq description in Device Tree\n", __func__); return; } writel(1, regbase + TIMER_CTRL_VAL); writel(0xf, regbase + TIMER_STATUS_VAL); writel(~0, regbase + TIMER_MATCH_VAL); if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", __func__, clocksource.name); clockevent.cpumask = cpumask_of(0); if (setup_irq(timer_irq, &irq)) pr_err("%s: setup_irq failed for %s\n", __func__, clockevent.name); clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, 4, 0xf0000000); }
int __init metag_generic_timer_init(void) { /* * On Meta 2 SoCs, the actual frequency of the timer is based on the * Meta core clock speed divided by an integer, so it is only * approximately 1MHz. Calculating the real frequency here drastically * reduces clock skew on these SoCs. */ #ifdef CONFIG_METAG_META21 hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1); #endif pr_info("Timer frequency: %u Hz\n", hwtimer_freq); clocksource_register_hz(&clocksource_metag, hwtimer_freq); setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); /* Configure timer on boot CPU */ arch_timer_setup(smp_processor_id()); /* Hook cpu boot to configure other CPU's timers */ register_cpu_notifier(&arch_timer_cpu_nb); return 0; }
static void __init omap2_gptimer_clocksource_init(int gptimer_id, const char *fck_source, const char *property) { int res; clksrc.id = gptimer_id; clksrc.errata = omap_dm_timer_get_errata(); if (soc_is_am43xx()) { clocksource_gpt.suspend = omap2_gptimer_clksrc_suspend; clocksource_gpt.resume = omap2_gptimer_clksrc_resume; } res = omap_dm_timer_init_one(&clksrc, fck_source, property, &clocksource_gpt.name, OMAP_TIMER_NONPOSTED); BUG_ON(res); __omap_dm_timer_load_start(&clksrc, OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, OMAP_TIMER_NONPOSTED); sched_clock_register(dmtimer_read_sched_clock, 32, clksrc.rate); if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) pr_err("Could not register clocksource %s\n", clocksource_gpt.name); else pr_info("OMAP clocksource: %s at %lu Hz\n", clocksource_gpt.name, clksrc.rate); }
/* * ST (system timer) module supports both clockevents and clocksource. */ void __init at91rm9200_timer_init(void) { /* For device tree enabled device: initialize here */ of_at91rm9200_st_init(); /* Disable all timer interrupts, and clear any pending ones */ at91_st_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS); at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ setup_irq(at91rm9200_timer_irq.irq, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting * its prescaler from the 1 Hz default. */ at91_st_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; clkevt.cpumask = cpumask_of(0); clockevents_register_device(&clkevt); /* register clocksource */ clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); }
int __init init_r4k_clocksource(void) { if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; /* Calculate a somewhat reasonable rating value */ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; #if defined (CONFIG_RALINK_SYSTICK_COUNTER) clocksource_register_hz(&clocksource_mips, 50000); #else clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); #endif return 0; }
void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; setup_sched_clock(iop_read_sched_clock, 32, tick_rate); ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* * Set up interrupting clockevent timer 0. */ write_tmr0(timer_ctl & ~IOP_TMR_EN); write_tisr(1); setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); clockevents_calc_mult_shift(&iop_clockevent, tick_rate, IOP_MIN_RANGE); iop_clockevent.max_delta_ns = clockevent_delta2ns(0xfffffffe, &iop_clockevent); iop_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &iop_clockevent); iop_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&iop_clockevent); /* * Set up free-running clocksource timer 1. */ write_trr1(0xffffffff); write_tcr1(0xffffffff); write_tmr1(timer_ctl); clocksource_register_hz(&iop_clocksource, tick_rate); }
static int __init h8300_16timer_init(struct device_node *node) { void __iomem *base[2]; int ret, irq; unsigned int ch; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); return PTR_ERR(clk); } ret = -ENXIO; base[REG_CH] = of_iomap(node, 0); if (!base[REG_CH]) { pr_err("failed to map registers for clocksource\n"); goto free_clk; } base[REG_COMM] = of_iomap(node, 1); if (!base[REG_COMM]) { pr_err("failed to map registers for clocksource\n"); goto unmap_ch; } ret = -EINVAL; irq = irq_of_parse_and_map(node, 0); if (!irq) { pr_err("failed to get irq for clockevent\n"); goto unmap_comm; } of_property_read_u32(node, "renesas,channel", &ch); timer16_priv.mapbase = base[REG_CH]; timer16_priv.mapcommon = base[REG_COMM]; timer16_priv.enb = ch; timer16_priv.ovf = ch; timer16_priv.ovie = 4 + ch; ret = request_irq(irq, timer16_interrupt, IRQF_TIMER, timer16_priv.cs.name, &timer16_priv); if (ret < 0) { pr_err("failed to request irq %d of clocksource\n", irq); goto unmap_comm; } clocksource_register_hz(&timer16_priv.cs, clk_get_rate(clk) / 8); return 0; unmap_comm: iounmap(base[REG_COMM]); unmap_ch: iounmap(base[REG_CH]); free_clk: clk_put(clk); return ret; }
static int __init init_cf_dt_clocksource(void) { __raw_writeb(0x00, DTXMR0); __raw_writeb(0x00, DTER0); __raw_writel(0x00000000, DTRR0); __raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0); return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ); }
void __init nmdk_timer_init(void) { unsigned long rate; struct clk *clk0; u32 cr = MTU_CRn_32BITS; clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); clk_enable(clk0); /* * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz * for ux500. * Use a divide-by-16 counter if the tick rate is more than 32MHz. * At 32 MHz, the timer (with 32 bit counter) can be programmed * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer * with 16 gives too low timer resolution. */ rate = clk_get_rate(clk0); if (rate > 32000000) { rate /= 16; cr |= MTU_CRn_PRESCALE_16; } else { cr |= MTU_CRn_PRESCALE_1; } /* Timer 0 is the free running clocksource */ writel(cr, mtu_base + MTU_CR(0)); writel(0, mtu_base + MTU_LR(0)); writel(0, mtu_base + MTU_BGLR(0)); writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); /* Now the clock source is ready */ nmdk_clksrc.read = nmdk_read_timer; if (clocksource_register_hz(&nmdk_clksrc, rate)) pr_err("timer: failed to initialize clock source %s\n", nmdk_clksrc.name); init_sched_clock(&cd, nomadik_update_sched_clock, 32, rate); /* Timer 1 is used for events */ clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ nmdk_clkevt.max_delta_ns = clockevent_delta2ns(0xffffffff, &nmdk_clkevt); nmdk_clkevt.min_delta_ns = clockevent_delta2ns(0x00000002, &nmdk_clkevt); nmdk_clkevt.cpumask = cpumask_of(0); /* Register irq and clockevents */ setup_irq(IRQ_MTU0, &nmdk_timer_irq); clockevents_register_device(&nmdk_clkevt); }
static int __init sun5i_setup_clocksource(struct device_node *node, void __iomem *base, struct clk *clk, int irq) { struct sun5i_timer_clksrc *cs; unsigned long rate; int ret; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; ret = clk_prepare_enable(clk); if (ret) { pr_err("Couldn't enable parent clock\n"); goto err_free; } rate = clk_get_rate(clk); cs->timer.base = base; cs->timer.clk = clk; cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc; cs->timer.clk_rate_cb.next = NULL; ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb); if (ret) { pr_err("Unable to register clock notifier.\n"); goto err_disable_clk; } writel(~0, base + TIMER_INTVAL_LO_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, base + TIMER_CTL_REG(1)); cs->clksrc.name = node->name; cs->clksrc.rating = 340; cs->clksrc.read = sun5i_clksrc_read; cs->clksrc.mask = CLOCKSOURCE_MASK(32); cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; ret = clocksource_register_hz(&cs->clksrc, rate); if (ret) { pr_err("Couldn't register clock source.\n"); goto err_remove_notifier; } return 0; err_remove_notifier: clk_notifier_unregister(clk, &cs->timer.clk_rate_cb); err_disable_clk: clk_disable_unprepare(clk); err_free: kfree(cs); return ret; }
/* initialize the kernel jiffy timer source */ static int __init sirfsoc_prima2_timer_init(struct device_node *np) { unsigned long rate; struct clk *clk; int ret; clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("Failed to get clock"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) { pr_err("Failed to enable clock"); return ret; } rate = clk_get_rate(clk); if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { pr_err("Invalid clock rate"); return -EINVAL; } sirfsoc_timer_base = of_iomap(np, 0); if (!sirfsoc_timer_base) { pr_err("unable to map timer cpu registers\n"); return -ENXIO; } sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); if (ret) { pr_err("Failed to register clocksource"); return ret; } sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); if (ret) { pr_err("Failed to setup irq"); return ret; } sirfsoc_clockevent_init(); return 0; }
static int __init bfin_cs_gptimer0_init(void) { setup_gptimer0(); if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk())) panic("failed to register clocksource"); return 0; }
/** * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock * * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to * 1 / (27,000,000/8) seconds. */ static void __init powertv_tim_c_clocksource_init(void) { const unsigned long counts_per_second = 27000000 / 8; clocksource_tim_c.rating = 200; clocksource_register_hz(&clocksource_tim_c, counts_per_second); tim_c = (struct tim_c *) asic_reg_addr(tim_ch); }
static int __init tegra20_init_rtc(struct device_node *np) { int ret; ret = timer_of_init(np, &suspend_rtc_to); if (ret) return ret; return clocksource_register_hz(&suspend_rtc_clocksource, 1000); }
static int __init csky_mptimer_init(struct device_node *np) { int ret, cpu, cpu_rollback; struct timer_of *to = NULL; /* * Csky_mptimer is designed for C-SKY SMP multi-processors and * every core has it's own private irq and regs for clkevt and * clksrc. * * The regs is accessed by cpu instruction: mfcr/mtcr instead of * mmio map style. So we needn't mmio-address in dts, but we still * need to give clk and irq number. * * We use private irq for the mptimer and irq number is the same * for every core. So we use request_percpu_irq() in timer_of_init. */ csky_mptimer_irq = irq_of_parse_and_map(np, 0); if (csky_mptimer_irq <= 0) return -EINVAL; ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt, "csky_mp_timer", &csky_to); if (ret) return -EINVAL; for_each_possible_cpu(cpu) { to = per_cpu_ptr(&csky_to, cpu); ret = timer_of_init(np, to); if (ret) goto rollback; } clocksource_register_hz(&csky_clocksource, timer_of_rate(to)); sched_clock_register(sched_clock_read, 32, timer_of_rate(to)); ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING, "clockevents/csky/timer:starting", csky_mptimer_starting_cpu, csky_mptimer_dying_cpu); if (ret) return -EINVAL; return 0; rollback: for_each_possible_cpu(cpu_rollback) { if (cpu_rollback == cpu) break; to = per_cpu_ptr(&csky_to, cpu_rollback); timer_of_cleanup(to); } return -EINVAL; }
/* * Set up timer interrupt. */ void __init footbridge_timer_init(void) { struct clock_event_device *ce = &ckevt_dc21285; clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); setup_irq(ce->irq, &footbridge_timer_irq); ce->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); }
static void __init ns2816_clocksource_init(void) { /* setup timer 0 as free-running clocksource */ writel(0, _timer3_va_base + TIMERX_CTRL); writel(0xffffffff, _timer3_va_base + TIMERX_LOAD); writel(3, _timer3_va_base + TIMERX_CTRL); init_sched_clock(&cd, ns2816_update_sched_clock, 32, 66666666); clocksource_register_hz(&clocksource_ns2816, 66666666); }
int __init ra_systick_clocksource_init(void) { ra_systick_clocksource.rating = 350; #if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,13) clocksource_register_hz(&ra_systick_clocksource, 50000); #else clocksource_set_clock(&ra_systick_clocksource, 50000); clocksource_register(&ra_systick_clocksource); #endif return 0; }