int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_class_is_omap2()) { u32 pbase; unsigned long size = SZ_4K; void __iomem *base; struct clk *sync_32k_ick; if (cpu_is_omap16xx()) { pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED; size = SZ_1K; } else if (cpu_is_omap2420()) pbase = OMAP2420_32KSYNCT_BASE + 0x10; else if (cpu_is_omap2430()) pbase = OMAP2430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap34xx()) pbase = OMAP3430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap44xx()) pbase = OMAP4430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap54xx()) pbase = OMAP54XX_32KSYNCT_BASE + 0x30; else return -ENODEV; /* For this to work we must have a static mapping in io.c for this area */ base = ioremap(pbase, size); if (!base) return -ENODEV; sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (!IS_ERR(sync_32k_ick)) clk_enable(sync_32k_ick); timer_32k_base = base; /* * 120000 rough estimate from the calculations in * __clocksource_updatefreq_scale. */ clocks_calc_mult_shift(&persistent_mult, &persistent_shift, 32768, NSEC_PER_SEC, 120000); if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32, clocksource_mmio_readl_up)) printk(err, "32k_counter"); setup_sched_clock(omap_32k_read_sched_clock, 32, 32768); } return 0; }
/* ****************************************************************** */ static void __gptimer_clocksource_init(const char *name, unsigned long hz) { /* disalbe irq since it's just a read source */ __raw_writel(0, TIMER_INT(SOURCE_TIMER)); __gptimer_ctl(SOURCE_TIMER, TIMER_DISABLE, PERIOD_MODE); __raw_writel(ULONG_MAX, TIMER_LOAD(SOURCE_TIMER)); __gptimer_ctl(SOURCE_TIMER, TIMER_ENABLE, PERIOD_MODE); clocksource_mmio_init(TIMER_VALUE(SOURCE_TIMER), name, hz, 300, 32, clocksource_mmio_readl_down); }
static void __init tegra_init_timer(void) { struct clk *clk; int ret; clk = clk_get_sys("timer", NULL); BUG_ON(IS_ERR(clk)); clk_enable(clk); /* * rtc registers are used by read_persistent_clock, keep the rtc clock * enabled */ clk = clk_get_sys("rtc-tegra", NULL); BUG_ON(IS_ERR(clk)); clk_enable(clk); #ifdef CONFIG_HAVE_ARM_TWD twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600); #endif #ifdef CONFIG_ARCH_TEGRA_2x_SOC tegra2_init_timer(&system_timer, &tegra_timer_irq.irq); #else tegra3_init_timer(&system_timer, &tegra_timer_irq.irq); #endif init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32, 1000000, SC_MULT, SC_SHIFT); if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { printk(KERN_ERR "Failed to register clocksource\n"); BUG(); } ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { printk(KERN_ERR "Failed to register timer IRQ: %d\n", ret); BUG(); } clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5); tegra_clockevent.max_delta_ns = clockevent_delta2ns(0x1fffffff, &tegra_clockevent); tegra_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &tegra_clockevent); tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_register_device(&tegra_clockevent); register_syscore_ops(&tegra_timer_syscore_ops); }
static int __init mxs_clocksource_init(struct clk *timer_clk) { unsigned int c = clk_get_rate(timer_clk); if (timrot_is_v1()) clocksource_register_hz(&clocksource_mxs, c); else clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1), "mxs_timer", c, 200, 32, clocksource_mmio_readl_down); return 0; }
static void __init sun4i_timer_init(struct device_node *node) { unsigned long rate = 0; struct clk *clk; int ret, irq; u32 val; timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(~0, timer_base + TIMER_INTVAL_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(1)); sched_clock_register(sun4i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, rate, 350, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_base + TIMER_CTL_REG(0)); /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); sun4i_clockevent.cpumask = cpu_possible_mask; sun4i_clockevent.irq = irq; clockevents_config_and_register(&sun4i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); }
void __init nmdk_timer_init(void) { unsigned long rate; struct clk *clk0; int ret; clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); ret = clk_prepare_enable(clk0); BUG_ON(ret != 0); /* * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz * for ux500. * Use a divide-by-16 counter if the tick rate is more than 32MHz. * At 32 MHz, the timer (with 32 bit counter) can be programmed * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer * with 16 gives too low timer resolution. */ rate = clk_get_rate(clk0); if (rate > 32000000) { rate /= 16; clk_prescale = MTU_CRn_PRESCALE_16; } else { clk_prescale = MTU_CRn_PRESCALE_1; } nmdk_cycle = (rate + HZ/2) / HZ; /* Timer 0 is the free running clocksource */ nmdk_clksrc_reset(); if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", rate, 200, 32, clocksource_mmio_readl_down)) pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK setup_sched_clock(nomadik_read_sched_clock, 32, rate); #endif /* Timer 1 is used for events, register irq and clockevents */ setup_irq(IRQ_MTU0, &nmdk_timer_irq); nmdk_clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU); #ifdef ARCH_HAS_READ_CURRENT_TIMER set_delay_fn(nmdk_timer_delay_loop); #endif }
static void __init omap_init_clocksource(unsigned long rate) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(1); static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; omap_mpu_timer_start(1, ~0, 1); init_sched_clock(&cd, mpu_update_sched_clock, 32, rate); if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate, 300, 32, clocksource_mmio_readl_down)) printk(err, "mpu_timer2"); }
static int __init sun4i_timer_init(struct device_node *node) { int ret; u32 val; ret = timer_of_init(node, &to); if (ret) return ret; writel(~0, timer_of_base(&to) + TIMER_INTVAL_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_of_base(&to) + TIMER_CTL_REG(1)); /* * sched_clock_register does not have priorities, and on sun6i and * later there is a better sched_clock registered by arm_arch_timer.c */ if (of_machine_is_compatible("allwinner,sun4i-a10") || of_machine_is_compatible("allwinner,sun5i-a13") || of_machine_is_compatible("allwinner,sun5i-a10s") || of_machine_is_compatible("allwinner,suniv-f1c100s")) sched_clock_register(sun4i_timer_sched_read, 32, timer_of_rate(&to)); ret = clocksource_mmio_init(timer_of_base(&to) + TIMER_CNTVAL_REG(1), node->name, timer_of_rate(&to), 350, 32, clocksource_mmio_readl_down); if (ret) { pr_err("Failed to register clocksource\n"); return ret; } writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), timer_of_base(&to) + TIMER_CTL_REG(0)); /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(timer_of_base(&to), 0); /* clear timer0 interrupt */ sun4i_timer_clear_interrupt(timer_of_base(&to)); clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), TIMER_SYNC_TICKS, 0xffffffff); /* Enable timer0 interrupt */ val = readl(timer_of_base(&to) + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_of_base(&to) + TIMER_IRQ_EN_REG); return ret; }
static void __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; unsigned long rate; struct clk *clk; int ret, irq; u32 val; timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); clk_prepare_enable(clk); rate = clk_get_rate(clk); rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) reset_control_deassert(rstc); writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, timer_base + TIMER_CTL_REG(1)); sched_clock_register(sun5i_timer_sched_read, 32, rate); clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, rate, 340, 32, clocksource_mmio_readl_down); ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); ret = setup_irq(irq, &sun5i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); sun5i_clockevent.cpumask = cpu_possible_mask; sun5i_clockevent.irq = irq; clockevents_config_and_register(&sun5i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); }
static int __init fttmr010_timer_common_init(struct device_node *np) { int irq; base = of_iomap(np, 0); if (!base) { pr_err("Can't remap registers"); return -ENXIO; } /* IRQ for timer 1 */ irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { pr_err("Can't parse IRQ"); return -EINVAL; } /* * Reset the interrupt mask and status */ writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK); writel(0, base + TIMER_INTR_STATE); writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR); /* * Setup free-running clocksource timer (interrupts * disabled.) */ writel(0, base + TIMER3_COUNT); writel(0, base + TIMER3_LOAD); writel(0, base + TIMER3_MATCH1); writel(0, base + TIMER3_MATCH2); clocksource_mmio_init(base + TIMER3_COUNT, "fttmr010_clocksource", tick_rate, 300, 32, clocksource_mmio_readl_up); sched_clock_register(fttmr010_read_sched_clock, 32, tick_rate); /* * Setup clockevent timer (interrupt-driven.) */ writel(0, base + TIMER1_COUNT); writel(0, base + TIMER1_LOAD); writel(0, base + TIMER1_MATCH1); writel(0, base + TIMER1_MATCH2); setup_irq(irq, &fttmr010_timer_irq); fttmr010_clockevent.cpumask = cpumask_of(0); clockevents_config_and_register(&fttmr010_clockevent, tick_rate, 1, 0xffffffff); return 0; }
static int __init mxc_clocksource_init(struct clk *timer_clk) { unsigned int c = clk_get_rate(timer_clk); void __iomem *reg = timer_base + (timer_is_v2() ? V2_TCN : MX1_2_TCN); imx_delay_timer.read_current_timer = &imx_read_current_timer; imx_delay_timer.freq = c; register_current_timer_delay(&imx_delay_timer); sched_clock_reg = reg; sched_clock_register(mxc_read_sched_clock, 32, c); return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, clocksource_mmio_readl_up); }
static int __init meson6_timer_init(struct device_node *node) { u32 val; int ret, irq; timer_base = of_io_request_and_map(node, 0, "meson6-timer"); if (IS_ERR(timer_base)) { pr_err("Can't map registers\n"); return -ENXIO; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("Can't parse IRQ\n"); return -EINVAL; } /* Set 1us for timer E */ val = readl(timer_base + TIMER_ISA_MUX); val &= ~TIMER_CSD_INPUT_MASK; val |= TIMER_CSD_UNIT_1US << TIMER_INPUT_BIT(CSD_ID); writel(val, timer_base + TIMER_ISA_MUX); sched_clock_register(meson6_timer_sched_read, 32, USEC_PER_SEC); clocksource_mmio_init(timer_base + TIMER_ISA_VAL(CSD_ID), node->name, 1000 * 1000, 300, 32, clocksource_mmio_readl_up); /* Timer A base 1us */ val &= ~TIMER_CED_INPUT_MASK; val |= TIMER_CED_UNIT_1US << TIMER_INPUT_BIT(CED_ID); writel(val, timer_base + TIMER_ISA_MUX); /* Stop the timer A */ meson6_clkevt_time_stop(CED_ID); ret = setup_irq(irq, &meson6_timer_irq); if (ret) { pr_warn("failed to setup irq %d\n", irq); return ret; } meson6_clockevent.cpumask = cpu_possible_mask; meson6_clockevent.irq = irq; clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC, 1, 0xfffe); return 0; }
static void integrator_clocksource_init(u32 khz) { void __iomem *base = (void __iomem *)TIMER2_VA_BASE; u32 ctrl = TIMER_CTRL_ENABLE; if (khz >= 1500) { khz /= 16; ctrl = TIMER_CTRL_DIV16; } writel(ctrl, base + TIMER_CTRL); writel(0xffff, base + TIMER_LOAD); clocksource_mmio_init(base + TIMER_VALUE, "timer2", khz * 1000, 200, 16, clocksource_mmio_readl_down); }
void __init sa1100_timer_init(void) { writel_relaxed(0, OIER); writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); setup_sched_clock(sa1100_read_sched_clock, 32, 3686400); ckevt_sa1100_osmr0.cpumask = cpumask_of(0); setup_irq(IRQ_OST0, &sa1100_timer_irq); clocksource_mmio_init(OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, clocksource_mmio_readl_up); clockevents_config_and_register(&ckevt_sa1100_osmr0, 3686400, MIN_OSCR_DELTA * 2, 0x7fffffff); }
void __init sp804_clocksource_init(void __iomem *base, const char *name) { long rate = sp804_get_clock_rate(name); if (rate < 0) return; /* setup timer 0 as free-running clocksource */ writel(0, base + TIMER_CTRL); writel(0xffffffff, base + TIMER_LOAD); writel(0xffffffff, base + TIMER_VALUE); writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, base + TIMER_CTRL); clocksource_mmio_init(base + TIMER_VALUE, name, rate, 200, 32, clocksource_mmio_readl_down); }
void __init freerun_timer_init(void __iomem *base, unsigned int timer_irq) { clksrc_base = base; /* setup timer 0 as free-running clocksource */ writel(0, clksrc_base + TIMER_CTRL); writel(0xffffffff, clksrc_base + TIMER_LOAD); writel(0xffffffff, clksrc_base + TIMER_VALUE); writel(0xffffffff, clksrc_base + TIMER_BGLOAD); writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC | TIMER_CTRL_IE, clksrc_base + TIMER_CTRL); setup_irq(timer_irq, &freerun_timer_irq); clocksource_mmio_init(clksrc_base + TIMER_VALUE, "free-run-timer", CONFIG_DEFAULT_TIMERCLK, 200, 32, clocksource_mmio_readl_down); }
static void integrator_clocksource_init(unsigned long inrate) { void __iomem *base = (void __iomem *)TIMER2_VA_BASE; u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; if (rate >= 1500000) { rate /= 16; ctrl |= TIMER_CTRL_DIV16; } writel(0xffff, base + TIMER_LOAD); writel(ctrl, base + TIMER_CTRL); clocksource_mmio_init(base + TIMER_VALUE, "timer2", rate, 200, 16, clocksource_mmio_readl_down); }
static void __init moxart_timer_init(struct device_node *node) { int ret, irq; unsigned long pclk; struct clk *clk; base = of_iomap(node, 0); if (!base) panic("%s: of_iomap failed\n", node->full_name); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("%s: irq_of_parse_and_map failed\n", node->full_name); ret = setup_irq(irq, &moxart_timer_irq); if (ret) panic("%s: setup_irq failed\n", node->full_name); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("%s: of_clk_get failed\n", node->full_name); pclk = clk_get_rate(clk); if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, "moxart_timer", pclk, 200, 32, clocksource_mmio_readl_down)) panic("%s: clocksource_mmio_init failed\n", node->full_name); clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); writel(~0, base + TIMER2_BASE + REG_LOAD); writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR); moxart_clockevent.cpumask = cpumask_of(0); moxart_clockevent.irq = irq; /* * documentation is not publicly available: * min_delta / max_delta obtained by trial-and-error, * max_delta 0xfffffffe should be ok because count * register size is u32 */ clockevents_config_and_register(&moxart_clockevent, pclk, 0x4, 0xfffffffe); }
static void __init rps_clocksource_init(void __iomem *base, ulong ref_rate) { int ret; ulong clock_rate; /* use prescale 16 */ clock_rate = ref_rate / 16; iowrite32(TIMER_MAX_VAL, base + TIMER_LOAD); iowrite32(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16, base + TIMER_CTRL); ret = clocksource_mmio_init(base + TIMER_CURR, "rps_clocksource_timer", clock_rate, 250, TIMER_BITS, clocksource_mmio_readl_down); if (ret) panic("can't register clocksource\n"); setup_sched_clock(rps_read_sched_clock, TIMER_BITS, clock_rate); }
void __init pxa_timer_init(void) { unsigned long clock_tick_rate = get_clock_tick_rate(); writel_relaxed(0, OIER); writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate); ckevt_pxa_osmr0.cpumask = cpumask_of(0); setup_irq(IRQ_OST0, &pxa_ost0_irq); clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, clocksource_mmio_readl_up); clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, MIN_OSCR_DELTA * 2, 0x7fffffff); }
/* * Set up timer interrupt */ static void __init netx_timer_init(void) { /* disable timer initially */ writel(0, NETX_GPIO_COUNTER_CTRL(0)); /* Reset the timer value to zero */ writel(0, NETX_GPIO_COUNTER_CURRENT(0)); writel(LATCH, NETX_GPIO_COUNTER_MAX(0)); /* acknowledge interrupt */ writel(COUNTER_BIT(0), NETX_GPIO_IRQ); /* Enable the interrupt in the specific timer * register and start timer */ writel(COUNTER_BIT(0), NETX_GPIO_IRQ_ENABLE); writel(NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN, NETX_GPIO_COUNTER_CTRL(0)); setup_irq(NETX_IRQ_TIMER0, &netx_timer_irq); /* Setup timer one for clocksource */ writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); writel(0, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); writel(0xffffffff, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKSOURCE)); writel(NETX_GPIO_COUNTER_CTRL_RUN, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE)); clocksource_mmio_init(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE), "netx_timer", CLOCK_TICK_RATE, 200, 32, clocksource_mmio_readl_up); netx_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, netx_clockevent.shift); netx_clockevent.max_delta_ns = clockevent_delta2ns(0xfffffffe, &netx_clockevent); /* with max_delta_ns >= delta2ns(0x800) the system currently runs fine. * Adding some safety ... */ netx_clockevent.min_delta_ns = clockevent_delta2ns(0xa00, &netx_clockevent); netx_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&netx_clockevent); }
void __init orion_time_init(u32 _bridge_base, u32 _bridge_timer1_clr_mask, unsigned int irq, unsigned int tclk) { u32 u; /* */ bridge_base = (void __iomem *)_bridge_base; bridge_timer1_clr_mask = _bridge_timer1_clr_mask; ticks_per_jiffy = (tclk + HZ/2) / HZ; /* */ setup_sched_clock(orion_read_sched_clock, 32, tclk); /* */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); u = readl(bridge_base + BRIDGE_MASK_OFF); writel(u & ~BRIDGE_INT_TIMER0, bridge_base + BRIDGE_MASK_OFF); u = readl(timer_base + TIMER_CTRL_OFF); writel(u | TIMER0_EN | TIMER0_RELOAD_EN, timer_base + TIMER_CTRL_OFF); clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "orion_clocksource", tclk, 300, 32, clocksource_mmio_readl_down); /* */ setup_irq(irq, &orion_timer_irq); orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); orion_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&orion_clkevt); }
static void integrator_clocksource_init(unsigned long inrate, void __iomem *base) { u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; unsigned long rate = inrate; if (rate >= 1500000) { rate /= 16; ctrl |= TIMER_CTRL_DIV16; } writel(0xffff, base + TIMER_LOAD); writel(ctrl, base + TIMER_CTRL); clocksource_mmio_init(base + TIMER_VALUE, "timer2", rate, 200, 16, clocksource_mmio_readl_down); sched_clk_base = base; sched_clock_register(integrator_read_sched_clock, 16, rate); }
static void __init sa1100_timer_init(void) { OIER = 0; OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; setup_sched_clock(sa1100_read_sched_clock, 32, 3686400); clockevents_calc_mult_shift(&ckevt_sa1100_osmr0, 3686400, 4); ckevt_sa1100_osmr0.max_delta_ns = clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); ckevt_sa1100_osmr0.min_delta_ns = clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; ckevt_sa1100_osmr0.cpumask = cpumask_of(0); setup_irq(IRQ_OST0, &sa1100_timer_irq); clocksource_mmio_init(&OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, clocksource_mmio_readl_up); clockevents_register_device(&ckevt_sa1100_osmr0); }
static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) { unsigned long rate; if (!base) return -ENOMEM; if (IS_ERR(clock)) return PTR_ERR(clock); rate = clk_get_rate(clock); tcd = base; clocksource_mmio_init(tcd, "clps711x-clocksource", rate, 300, 16, clocksource_mmio_readw_down); sched_clock_register(clps711x_sched_clock_read, 16, rate); return 0; }
static int __init tcc_clockevent_init(struct clk *clock) { unsigned int c = clk_get_rate(clock); clocksource_mmio_init(timer_base + TC32MCNT_OFFS, "tcc_tc32", c, 200, 32, clocksource_mmio_readl_up); clockevent_tcc.mult = div_sc(c, NSEC_PER_SEC, clockevent_tcc.shift); clockevent_tcc.max_delta_ns = clockevent_delta2ns(0xfffffffe, &clockevent_tcc); clockevent_tcc.min_delta_ns = clockevent_delta2ns(0xff, &clockevent_tcc); clockevent_tcc.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_tcc); return 0; }
static void __init s5p_clocksource_init(void) { unsigned long pclk; unsigned long clock_rate; pclk = clk_get_rate(timerclk); clk_set_rate(tdiv_source, pclk / 2); clk_set_parent(tin_source, tdiv_source); clock_rate = clk_get_rate(tin_source); s5p_time_setup(timer_source.source_id, TCNT_MAX); s5p_time_start(timer_source.source_id, PERIODIC); setup_sched_clock(s5p_read_sched_clock, 32, clock_rate); if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer", clock_rate, 250, 32, clocksource_mmio_readl_down)) panic("s5p_clocksource_timer: can't register clocksource\n"); }
static int __init ftm_clocksource_init(unsigned long freq) { int err; ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); ftm_writel(~0u, priv->clksrc_base + FTM_MOD); ftm_reset_counter(priv->clksrc_base); sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps)); err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm", freq / (1 << priv->ps), 300, 16, clocksource_mmio_readl_up); if (err) { pr_err("ftm: init clock source mmio failed: %d\n", err); return err; } ftm_counter_enable(priv->clksrc_base); return 0; }
static void __init pxa_timer_init(void) { unsigned long clock_tick_rate = get_clock_tick_rate(); OIER = 0; OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate); clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4); ckevt_pxa_osmr0.max_delta_ns = clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); ckevt_pxa_osmr0.min_delta_ns = clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; ckevt_pxa_osmr0.cpumask = cpumask_of(0); setup_irq(IRQ_OST0, &pxa_ost0_irq); clocksource_mmio_init(&OSCR, "oscr0", clock_tick_rate, 200, 32, clocksource_mmio_readl_up); clockevents_register_device(&ckevt_pxa_osmr0); }
void __init nmdk_timer_init(void __iomem *base) { unsigned long rate; struct clk *clk0; mtu_base = base; clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); BUG_ON(clk_prepare(clk0) < 0); BUG_ON(clk_enable(clk0) < 0); rate = clk_get_rate(clk0); if (rate > 32000000) { rate /= 16; clk_prescale = MTU_CRn_PRESCALE_16; } else { clk_prescale = MTU_CRn_PRESCALE_1; } nmdk_cycle = (rate + HZ/2) / HZ; nmdk_clksrc_reset(); if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", rate, 200, 32, clocksource_mmio_readl_down)) pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK setup_sched_clock(nomadik_read_sched_clock, 32, rate); #endif setup_irq(IRQ_MTU0, &nmdk_timer_irq); nmdk_clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU); }