Exemple #1
0
/*
 * Clockevents init (sys timer)
 */
static void tick_tmr_init(void)
{
	volatile struct stm32_tim_regs	*tim;
	volatile u32			*rcc_enr, *rcc_rst;
	struct clock_event_device	*evt = &tick_tmr_clockevent;

	/* The target total timer divider (including the prescaler) */
	u32 div;
	/* The prescaler value will be (1 << psc_pwr) */
	int psc_pwr;

	/*
	 * If the timer is 16-bit, then (div >> psc_pwr) must not exceed
	 * (2**16 - 1).
	 */
	div = tick_tmr_clk / HZ;
	psc_pwr = ilog2(div) - TICK_TIM_COUNTER_BITWIDTH + 1;
	if (psc_pwr < 0)
		psc_pwr = 0;

	/*
	 * Setup reg bases
	 */
	tim = (struct stm32_tim_regs *)TICK_TIM_BASE;
	rcc_enr = (u32 *)TICK_TIM_RCC_ENR;
	rcc_rst = (u32 *)TICK_TIM_RCC_RST;

	/*
	 * Enable timer clock, and deinit registers
	 */
	*rcc_enr |= TICK_TIM_RCC_MSK;
	*rcc_rst |= TICK_TIM_RCC_MSK;
	*rcc_rst &= ~TICK_TIM_RCC_MSK;

	/*
	 * Select the counter mode:
	 * - upcounter;
	 * - auto-reload
	 */
	tim->cr1 = STM32_TIM_CR1_ARPE;
	tim->arr = (div >> psc_pwr);
	tim->psc = (1 << psc_pwr) - 1;

	/*
	 * Generate an update event to reload the Prescaler value immediately
	 */
	tim->egr = STM32_TIM_EGR_UG;

	/*
	 * Setup, and enable IRQ
	 */
	setup_irq(TICK_TIM_IRQ, &tick_tmr_irqaction);
	tim->dier |= STM32_TIM_DIER_UIE;

	/*
	 * For system timer we don't provide set_next_event method,
	 * so, I guess, setting mult, shift, max_delta_ns, min_delta_ns
	 * makes no sense (I verified that kernel works well without these).
	 * Nevertheless, some clocksource drivers with periodic-mode only do
	 * this. So, let's set them to some values too.
	 */
	clockevents_calc_mult_shift(evt, tick_tmr_clk / HZ, 5);
	evt->max_delta_ns = clockevent_delta2ns(0xFFFFFFF0, evt);
	evt->min_delta_ns = clockevent_delta2ns(0xF, evt);

	clockevents_register_device(evt);
}
/*
 * Setup the local clock events for a CPU.
 */
int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	struct kona_td kona_td;
	struct timer_ch_cfg config;

	/*
	 * TICK_TIMER_NAME can be either "aon-timer" or "slave-timer".
	 *
	 * We are currently using "slave-timer" at 1 MHz for better timer
	 * resolution and system performance
	 */
	kona_td = (struct kona_td)__get_cpu_var(percpu_kona_td);

	if (!kona_td.allocated) {
		kona_td.kona_timer =
		    kona_timer_request(TICK_TIMER_NAME,
				       TICK_TIMER_OFFSET + cpu);
		if (kona_td.kona_timer) {
			kona_td.allocated = true;
		} else {
			pr_err("%s: Failed to allocate %s channel %d as"
			       "CPU %d local tick device\n", __func__,
			       TICK_TIMER_NAME,
			       TICK_TIMER_OFFSET + cpu, cpu);
			return -ENXIO;
		}
	}

	/*
	 * In the future: The following codes should be one time configuration
	 */
	config.mode = MODE_ONESHOT;
	config.arg = evt;
	config.cb = kona_tick_interrupt_cb;
	kona_timer_config(kona_td.kona_timer, &config);

	irq_set_affinity(kona_td.kona_timer->irq, cpumask_of(cpu));

	evt->name = "local_timer";
	evt->cpumask = cpumask_of(cpu);
	evt->irq = kona_td.kona_timer->irq;
	evt->set_next_event = kona_tick_set_next_event;
	evt->set_mode = kona_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 250;
	evt->shift = 32;
	evt->mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, evt->shift);
	evt->max_delta_ns = clockevent_delta2ns(MAX_KONA_COUNT_CLOCK, evt);
	/* There is MIN_KONA_DELTA_CLOCK clock cycles delay in HUB Timer by
	 * ASIC limitation. When min_delta_ns set N, real requested load value
	 * in hrtimer becomes N - 1. So add 1 to be MIN_DELTA_CLOCK
	 */
	evt->min_delta_ns = clockevent_delta2ns(MIN_KONA_DELTA_CLOCK + 1, evt);

	per_cpu(percpu_kona_td, cpu) = kona_td;

	clockevents_register_device(evt);

	return 0;
}
Exemple #3
0
static void __init tegra_init_timer(void)
{
	struct clk *clk;
	unsigned long rate = clk_measure_input_freq();
	int ret;

	clk = clk_get_sys("timer", NULL);
	BUG_ON(IS_ERR(clk));
	clk_enable(clk);

	/*
	 * rtc registers are used by read_persistent_clock, keep the rtc clock
	 * enabled
	 */
	clk = clk_get_sys("rtc-tegra", NULL);
	BUG_ON(IS_ERR(clk));
	clk_enable(clk);

#ifdef CONFIG_HAVE_ARM_TWD
	twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600);
#endif

	switch (rate) {
	case 12000000:
		timer_writel(0x000b, TIMERUS_USEC_CFG);
		break;
	case 13000000:
		timer_writel(0x000c, TIMERUS_USEC_CFG);
		break;
	case 19200000:
		timer_writel(0x045f, TIMERUS_USEC_CFG);
		break;
	case 26000000:
		timer_writel(0x0019, TIMERUS_USEC_CFG);
		break;
	default:
		WARN(1, "Unknown clock rate");
	}

	init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32,
			       1000000, SC_MULT, SC_SHIFT);

	if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
		"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
		printk(KERN_ERR "Failed to register clocksource\n");
		BUG();
	}

	ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
	if (ret) {
		printk(KERN_ERR "Failed to register timer IRQ: %d\n", ret);
		BUG();
	}

	clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5);
	tegra_clockevent.max_delta_ns =
		clockevent_delta2ns(0x1fffffff, &tegra_clockevent);
	tegra_clockevent.min_delta_ns =
		clockevent_delta2ns(0x1, &tegra_clockevent);
	tegra_clockevent.cpumask = cpu_all_mask;
	tegra_clockevent.irq = tegra_timer_irq.irq;
	clockevents_register_device(&tegra_clockevent);
}
/*
 * This sets up the system timers, clock source and clock event.
 */
static void __init u300_timer_init(void)
{
	struct clk *clk;
	unsigned long rate;

	/* Clock the interrupt controller */
	clk = clk_get_sys("apptimer", NULL);
	BUG_ON(IS_ERR(clk));
	clk_enable(clk);
	rate = clk_get_rate(clk);

	init_sched_clock(&cd, u300_update_sched_clock, 32, rate);

	/*
	 * Disable the "OS" and "DD" timers - these are designed for Symbian!
	 * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
	 */
	writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_CRC);
	writel(U300_TIMER_APP_ROST_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_ROST);
	writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_DOST);
	writel(U300_TIMER_APP_RDDT_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RDDT);
	writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_DDDT);

	/* Reset the General Purpose timer 1. */
	writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT1);

	/* Set up the IRQ handler */
	setup_irq(IRQ_U300_TIMER_APP_GP1, &u300_timer_irq);

	/* Reset the General Purpose timer 2 */
	writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT2);
	/* Set this timer to run around forever */
	writel(0xFFFFFFFFU, U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2TC);
	/* Set continuous mode so it wraps around */
	writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
	       U300_TIMER_APP_VBASE + U300_TIMER_APP_SGPT2M);
	/* Disable timer interrupts */
	writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2IE);
	/* Then enable the GP2 timer to use as a free running us counter */
	writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_EGPT2);

	/* Use general purpose timer 2 as clock source */
	if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
			"GPT2", rate, 300, 32, clocksource_mmio_readl_up))
		pr_err("timer: failed to initialize U300 clock source\n");

	clockevents_calc_mult_shift(&clockevent_u300_1mhz,
				    rate, APPTIMER_MIN_RANGE);
	/* 32bit counter, so 32bits delta is max */
	clockevent_u300_1mhz.max_delta_ns =
		clockevent_delta2ns(0xffffffff, &clockevent_u300_1mhz);
	/* This timer is slow enough to set for 1 cycle == 1 MHz */
	clockevent_u300_1mhz.min_delta_ns =
		clockevent_delta2ns(1, &clockevent_u300_1mhz);
	clockevent_u300_1mhz.cpumask = cpumask_of(0);
	clockevents_register_device(&clockevent_u300_1mhz);
	/*
	 * TODO: init and register the rest of the timers too, they can be
	 * used by hrtimers!
	 */
}
/*
 * Setup the local clock events for a CPU.
 */
static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
	struct clock_event_device **this_cpu_clk;
	int cpu = smp_processor_id();

	/*
	 * If the basic setup for this CPU has been done before don't
	 * bother with the below.
	 */
	if (per_cpu(percpu_setup_called, cpu)) {
		__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
		clockevents_register_device(*__this_cpu_ptr(twd_evt));
		enable_percpu_irq(clk->irq, 0);
		return 0;
	}
	per_cpu(percpu_setup_called, cpu) = true;

	/*
	 * This stuff only need to be done once for the entire TWD cluster
	 * during the runtime of the system.
	 */
	if (!common_setup_called) {
		twd_clk = twd_get_clock();

		/*
		 * We use IS_ERR_OR_NULL() here, because if the clock stubs
		 * are active we will get a valid clk reference which is
		 * however NULL and will return the rate 0. In that case we
		 * need to calibrate the rate instead.
		 */
		if (!IS_ERR_OR_NULL(twd_clk))
			twd_timer_rate = clk_get_rate(twd_clk);
		else
			twd_calibrate_rate();

		common_setup_called = true;
	}

	/*
	 * The following is done once per CPU the first time .setup() is
	 * called.
	 */
	__raw_writel(0, twd_base + TWD_TIMER_CONTROL);

	clk->name = "local_timer";
	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
			CLOCK_EVT_FEAT_C3STOP;
	clk->rating = 350;
	clk->set_mode = twd_set_mode;
	clk->set_next_event = twd_set_next_event;
	clk->irq = twd_ppi;

#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)
	printk(KERN_INFO "I-pipe, %lu.%03lu MHz timer\n",
	       twd_timer_rate / 1000000,
	       (twd_timer_rate % 1000000) / 1000);
	clk->ipipe_timer = __this_cpu_ptr(&twd_itimer);
	clk->ipipe_timer->irq = clk->irq;
	clk->ipipe_timer->ack = twd_ack;
	clk->ipipe_timer->min_delay_ticks = 0xf;
#endif

	this_cpu_clk = __this_cpu_ptr(twd_evt);
	*this_cpu_clk = clk;

	clockevents_config_and_register(clk, twd_timer_rate,
					0xf, 0xffffffff);
	enable_percpu_irq(clk->irq, 0);

	return 0;
}
void xen_setup_cpu_clockevents(void)
{
	BUG_ON(preemptible());

	clockevents_register_device(&__get_cpu_var(xen_clock_events));
}
Exemple #7
0
void __init tegra_init_timer(struct device_node *np)
{
	struct clk *clk;
	int ret;
	unsigned long rate;
	struct resource res;

	if (of_address_to_resource(np, 0, &res)) {
		pr_err("%s:No memory resources found\n", __func__);
		return;
	}

	timer_reg_base = ioremap(res.start, resource_size(&res));
	if (!timer_reg_base) {
		pr_err("%s:Can't map timer registers\n", __func__);
		BUG();
	}
	timer_reg_base_pa = res.start;

	tegra_timer_irq.irq = irq_of_parse_and_map(np, 0);
	if (tegra_timer_irq.irq <= 0) {
		pr_err("%s:Failed to map timer IRQ\n", __func__);
		BUG();
	}

	clk = of_clk_get(np, 0);
	if (IS_ERR(clk))
		clk = clk_get_sys("timer", NULL);

	if (IS_ERR(clk)) {
		pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
		rate = 12000000;
	} else {
		clk_prepare_enable(clk);
		rate = clk_get_rate(clk);
	}

	switch (rate) {
	case 12000000:
		timer_writel(0x000b, TIMERUS_USEC_CFG);
		break;
	case 12800000:
		timer_writel(0x043F, TIMERUS_USEC_CFG);
		break;
	case 13000000:
		timer_writel(0x000c, TIMERUS_USEC_CFG);
		break;
	case 19200000:
		timer_writel(0x045f, TIMERUS_USEC_CFG);
		break;
	case 26000000:
		timer_writel(0x0019, TIMERUS_USEC_CFG);
		break;
#ifndef CONFIG_ARCH_TEGRA_2x_SOC
	case 16800000:
		timer_writel(0x0453, TIMERUS_USEC_CFG);
		break;
	case 38400000:
		timer_writel(0x04BF, TIMERUS_USEC_CFG);
		break;
	case 48000000:
		timer_writel(0x002F, TIMERUS_USEC_CFG);
		break;
#endif
	default:
		if (tegra_platform_is_qt()) {
			timer_writel(0x000c, TIMERUS_USEC_CFG);
			break;
		}
		WARN(1, "Unknown clock rate");
	}


#ifdef CONFIG_PM_SLEEP
	hotplug_cpu_register(np);
#endif
	of_node_put(np);
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
	tegra20_init_timer();
#else
	tegra30_init_timer();
#endif

	ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
		"timer_us", 1000000, 300, 32,
		clocksource_mmio_readl_up);
	if (ret) {
		pr_err("%s: Failed to register clocksource: %d\n",
			__func__, ret);
		BUG();
	}

	ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
	if (ret) {
		pr_err("%s: Failed to register timer IRQ: %d\n",
			__func__, ret);
		BUG();
	}

	clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5);
	tegra_clockevent.max_delta_ns =
		clockevent_delta2ns(0x1fffffff, &tegra_clockevent);
	tegra_clockevent.min_delta_ns =
		clockevent_delta2ns(0x1, &tegra_clockevent);
	tegra_clockevent.cpumask = cpu_all_mask;
	tegra_clockevent.irq = tegra_timer_irq.irq;
	clockevents_register_device(&tegra_clockevent);

#ifndef CONFIG_ARM64
#ifdef CONFIG_ARM_ARCH_TIMER
	/* Architectural timers take precedence over broadcast timers.
	   Only register a broadcast clockevent device if architectural
	   timers do not exist or cannot be initialized. */
	if (tegra_init_arch_timer())
#endif
		/* Architectural timers do not exist or cannot be initialzied.
		   Fall back to using the broadcast timer as the sched clock. */
		setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
#endif

	register_syscore_ops(&tegra_timer_syscore_ops);

#ifndef CONFIG_ARM64
	late_time_init = tegra_init_late_timer;
#endif

	//arm_delay_ops.delay		= __tegra_delay;
	//arm_delay_ops.const_udelay	= __tegra_const_udelay;
	//arm_delay_ops.udelay		= __tegra_udelay;
}
Exemple #8
0
int __init tci6614_timer_init(void)
{
	struct device_node *node;
	struct clk *clk;
	static char err[] __initdata = KERN_ERR
		"%s: can't register clocksource!\n";
	void __iomem *base;
	int irqs[2];
	int i, error;
	u32 tgcr;

	node = of_find_matching_node(NULL, tci6614_timer_ids);
	if (!node) {
		pr_err("tci6614-timer: no matching node\n");
		return -ENODEV;
	}

	irqs[0]  = irq_of_parse_and_map(node, 0);
	irqs[1]  = irq_of_parse_and_map(node, 1);
	if (irqs[0] == NO_IRQ || irqs[1] == NO_IRQ) {
		pr_err("tci6614-timer: failed to map interrupts\n");
		return -ENODEV;
	}

	base = of_iomap(node, 0);
	if (!base) {
		pr_err("tci6614-timer: failed to map registers\n");
		return -ENODEV;
	}

	clk = of_clk_get(node, 0);
	if (!clk) {
		pr_err("tci6614-timer: failed to get clock\n");
		iounmap(base);
		return -ENODEV;
	}

	error = clk_prepare_enable(clk);
	if (error) {
		pr_err("tci6614-timer: failed to enable clock\n");
		iounmap(base);
		clk_put(clk);
		return -ENODEV;
	}

	/* Disabled, Internal clock source */
	__raw_writel(0, base + TCR);

	/* reset both timers, no pre-scaler for timer34 */
	tgcr = 0;
	__raw_writel(tgcr, base + TGCR);

	/* Set both timers to unchained 32-bit */
	tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
	__raw_writel(tgcr, base + TGCR);

	/* Unreset timers */
	tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
		(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
	__raw_writel(tgcr, base + TGCR);

	/* Init both counters to zero */
	__raw_writel(0, base + TIM12);
	__raw_writel(0, base + TIM34);

	/* Init of each timer as a 32-bit timer */
	for (i=0; i< ARRAY_SIZE(timers); i++) {
		struct timer_s *t = &timers[i];

		t->base = base;
		t->irqaction.name = t->name;
		t->irqaction.dev_id = (void *)t;

		setup_irq(irqs[i], &t->irqaction);
	}

	tci6614_clock_tick_rate = clk_get_rate(clk);

	/* setup clocksource */
	tci6614_source.name = timers[TID_CLOCKSOURCE].name;
	if (clocksource_register_hz(&tci6614_source, tci6614_clock_tick_rate))
		printk(err, tci6614_source.name);

	setup_sched_clock(tci6614_read_sched_clock, 32,
			  tci6614_clock_tick_rate);

	/* setup clockevent */
	tci6614_event.name = timers[TID_CLOCKEVENT].name;
	tci6614_event.mult = div_sc(tci6614_clock_tick_rate, NSEC_PER_SEC,
					 tci6614_event.shift);
	tci6614_event.max_delta_ns =
		clockevent_delta2ns(0xfffffffe, &tci6614_event);
	tci6614_event.min_delta_ns = 50000; /* 50 usec */

	tci6614_event.cpumask = cpumask_of(0);
	clockevents_register_device(&tci6614_event);

	for (i=0; i< ARRAY_SIZE(timers); i++)
		timer32_config(&timers[i]);

	pr_info("tci6614 clock @%d MHz\n", tci6614_clock_tick_rate);

	return 0;
}
void tick_setup_hrtimer_broadcast(void)
{
	hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	bctimer.function = bc_handler;
	clockevents_register_device(&ce_broadcast_hrtimer);
}
Exemple #10
0
/*
 * The clock management driver isn't initialized at this point, so the
 * clocks need to be enabled here manually and then tagged as used in
 * the clock driver initialization
 */
static void __init lpc32xx_timer_init(void)
{
	u32 clkrate, pllreg;

	/* Enable timer clock */
	__raw_writel(LPC32XX_CLKPWR_TMRPWMCLK_TIMER0_EN |
		LPC32XX_CLKPWR_TMRPWMCLK_TIMER1_EN,
		LPC32XX_CLKPWR_TIMERS_PWMS_CLK_CTRL_1);

	/*
	 * The clock driver isn't initialized at this point. So determine if
	 * the SYSCLK is driven from the PLL397 or main oscillator and then use
	 * it to compute the PLL frequency and the PCLK divider to get the base
	 * timer rates. This rate is needed to compute the tick rate.
	 */
	if (clk_is_sysclk_mainosc() != 0)
		clkrate = LPC32XX_MAIN_OSC_FREQ;
	else
		clkrate = 397 * LPC32XX_CLOCK_OSC_FREQ;

	/* Get ARM HCLKPLL register and convert it into a frequency */
	pllreg = __raw_readl(LPC32XX_CLKPWR_HCLKPLL_CTRL) & 0x1FFFF;
	clkrate = clk_get_pllrate_from_reg(clkrate, pllreg);

	/* Get PCLK divider and divide ARM PLL clock by it to get timer rate */
	clkrate = clkrate / clk_get_pclk_div();

	/* Initial timer setup */
	__raw_writel(0, LCP32XX_TIMER_TCR(LPC32XX_TIMER0_BASE));
	__raw_writel(LCP32XX_TIMER_CNTR_MTCH_BIT(0),
		LCP32XX_TIMER_IR(LPC32XX_TIMER0_BASE));
	__raw_writel(1, LCP32XX_TIMER_MR0(LPC32XX_TIMER0_BASE));
	__raw_writel(LCP32XX_TIMER_CNTR_MCR_MTCH(0) |
		LCP32XX_TIMER_CNTR_MCR_STOP(0) |
		LCP32XX_TIMER_CNTR_MCR_RESET(0),
		LCP32XX_TIMER_MCR(LPC32XX_TIMER0_BASE));

	/* Setup tick interrupt */
	setup_irq(IRQ_LPC32XX_TIMER0, &lpc32xx_timer_irq);

	/* Setup the clockevent structure. */
	lpc32xx_clkevt.mult = div_sc(clkrate, NSEC_PER_SEC,
		lpc32xx_clkevt.shift);
	lpc32xx_clkevt.max_delta_ns = clockevent_delta2ns(-1,
		&lpc32xx_clkevt);
	lpc32xx_clkevt.min_delta_ns = clockevent_delta2ns(1,
		&lpc32xx_clkevt) + 1;
	lpc32xx_clkevt.cpumask = cpumask_of(0);
	clockevents_register_device(&lpc32xx_clkevt);

	/* Use timer1 as clock source. */
	__raw_writel(LCP32XX_TIMER_CNTR_TCR_RESET,
		LCP32XX_TIMER_TCR(LPC32XX_TIMER1_BASE));
	__raw_writel(0, LCP32XX_TIMER_PR(LPC32XX_TIMER1_BASE));
	__raw_writel(0, LCP32XX_TIMER_MCR(LPC32XX_TIMER1_BASE));
	__raw_writel(LCP32XX_TIMER_CNTR_TCR_EN,
		LCP32XX_TIMER_TCR(LPC32XX_TIMER1_BASE));
	lpc32xx_clksrc.mult = clocksource_hz2mult(clkrate,
		lpc32xx_clksrc.shift);
	clocksource_register(&lpc32xx_clksrc);
}
Exemple #11
0
int __cpuinit mips_clockevent_init(void)
{
	uint64_t mips_freq = mips_hpt_frequency;
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *cd;
	unsigned int irq;

	if (!cpu_has_counter || !mips_hpt_frequency)
		return -ENXIO;

#ifdef CONFIG_MIPS_MT_SMTC
	setup_smtc_dummy_clockevent_device();

	/*
	 * On SMTC we only register VPE0's compare interrupt as clockevent
	 * device.
	 */
	if (cpu)
		return 0;
#endif

	if (!c0_compare_int_usable())
		return -ENXIO;

	/*
	 * With vectored interrupts things are getting platform specific.
	 * get_c0_compare_int is a hook to allow a platform to return the
	 * interrupt number of it's liking.
	 */
	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	if (get_c0_compare_int)
		irq = get_c0_compare_int();

	cd = &per_cpu(mips_clockevent_device, cpu);

	cd->name		= "MIPS";
	cd->features		= CLOCK_EVT_FEAT_ONESHOT;

	/* Calculate the min / max delta */
	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
	cd->shift		= 32;
	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);

	cd->rating		= 300;
	cd->irq			= irq;
#ifdef CONFIG_MIPS_MT_SMTC
	cd->cpumask		= CPU_MASK_ALL;
#else
	cd->cpumask		= cpumask_of_cpu(cpu);
#endif
	cd->set_next_event	= mips_next_event;
	cd->set_mode		= mips_set_mode;
	cd->event_handler	= mips_event_handler;

	clockevents_register_device(cd);

	if (cp0_timer_irq_installed)
		return 0;

	cp0_timer_irq_installed = 1;

#ifdef CONFIG_MIPS_MT_SMTC
#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
	setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
#else
	setup_irq(irq, &c0_compare_irqaction);
#endif

	return 0;
}
Exemple #12
0
void __init nmdk_timer_init(void)
{
    unsigned long rate;
    struct clk *clk0;
    struct clk *clk1;
    u32 cr;

    clk0 = clk_get_sys("mtu0", NULL);
    BUG_ON(IS_ERR(clk0));

    clk1 = clk_get_sys("mtu1", NULL);
    BUG_ON(IS_ERR(clk1));

    clk_enable(clk0);
    clk_enable(clk1);

    /*
     * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
     * use a divide-by-16 counter if it's more than 16MHz
     */
    cr = MTU_CRn_32BITS;;
    rate = clk_get_rate(clk0);
    if (rate > 16 << 20) {
        rate /= 16;
        cr |= MTU_CRn_PRESCALE_16;
    } else {
        cr |= MTU_CRn_PRESCALE_1;
    }
    clocksource_calc_mult_shift(&nmdk_clksrc, rate, MTU_MIN_RANGE);

    /* Timer 0 is the free running clocksource */
    writel(cr, mtu_base + MTU_CR(0));
    writel(0, mtu_base + MTU_LR(0));
    writel(0, mtu_base + MTU_BGLR(0));
    writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));

    /* Now the scheduling clock is ready */
    nmdk_clksrc.read = nmdk_read_timer;

    if (clocksource_register(&nmdk_clksrc))
        pr_err("timer: failed to initialize clock source %s\n",
               nmdk_clksrc.name);

    /* Timer 1 is used for events, fix according to rate */
    cr = MTU_CRn_32BITS;
    rate = clk_get_rate(clk1);
    if (rate > 16 << 20) {
        rate /= 16;
        cr |= MTU_CRn_PRESCALE_16;
    } else {
        cr |= MTU_CRn_PRESCALE_1;
    }
    clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);

    writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */

    nmdk_clkevt.max_delta_ns =
        clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
    nmdk_clkevt.min_delta_ns =
        clockevent_delta2ns(0x00000002, &nmdk_clkevt);
    nmdk_clkevt.cpumask	= cpumask_of(0);

    /* Register irq and clockevents */
    setup_irq(IRQ_MTU0, &nmdk_timer_irq);
    clockevents_register_device(&nmdk_clkevt);
}
void __init sunxi_timer_init(void)
{
#ifdef CONFIG_OF
	struct device_node *node;
	struct clk *clk;
#endif
	unsigned long rate = 0;
	int ret, irq;
	u32 val;

#ifdef CONFIG_OF
	node = of_find_matching_node(NULL, sunxi_timer_dt_ids);
	if (!node)
		panic("No sunxi timer node");

	timer_base = of_iomap(node, 0);
	if (!timer_base)
		panic("Can't map registers");

	irq = irq_of_parse_and_map(node, 0);
	if (irq <= 0)
		panic("Can't parse IRQ");

	clk = of_clk_get(node, 0);
	if (IS_ERR(clk))
		panic("Can't get timer clock");

	rate = clk_get_rate(clk);
#else
	timer_base = (void __iomem *)SUNXI_TIMER_VBASE;
	irq = SUNXI_IRQ_TIMER0;
#if defined(CONFIG_ARCH_SUN8IW3P1) && defined(CONFIG_FPGA_V4_PLATFORM)
	rate = 32000; /* it is proved by test that clk-src=32000, prescale=1 on fpga */
#else
	rate = 24000000;
#endif
#endif

	writel(rate / (TIMER_SCAL * HZ),
	       timer_base + TIMER0_INTVAL_REG);

	/* set clock source to HOSC, 16 pre-division */
	val = readl(timer_base + TIMER0_CTL_REG);
	val &= ~(0x07 << 4);
	val &= ~(0x03 << 2);
	val |= (4 << 4) | (1 << 2);
	writel(val, timer_base + TIMER0_CTL_REG);

	/* set mode to auto reload */
	val = readl(timer_base + TIMER0_CTL_REG);
	writel(val | TIMER0_CTL_AUTORELOAD, timer_base + TIMER0_CTL_REG);

	ret = setup_irq(irq, &sunxi_timer_irq);
	if (ret)
		pr_warn("failed to setup irq %d\n", irq);

	/* Enable timer0 interrupt */
	val = readl(timer_base + TIMER_CTL_REG);
	writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);

	sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL,
				NSEC_PER_SEC,
				sunxi_clockevent.shift);
	sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0x7fffffff,
							    &sunxi_clockevent);
	sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x10,
							    &sunxi_clockevent);
	sunxi_clockevent.cpumask = cpumask_of(0);

	clockevents_register_device(&sunxi_clockevent);
}
Exemple #14
0
void __init nmdk_timer_init(void)
{
	unsigned long rate;
	struct clk *clk0;
	unsigned long min_delta_ticks;

	clk0 = clk_get_sys("mtu0", NULL);
	BUG_ON(IS_ERR(clk0));

	clk_enable(clk0);

	/*
	 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
	 * for ux500.
	 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
	 * At 32 MHz, the timer (with 32 bit counter) can be programmed
	 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
	 * with 16 gives too low timer resolution.
	 */
	rate = clk_get_rate(clk0);
	if (rate > 32000000) {
		rate /= 16;
		clk_prescale = MTU_CRn_PRESCALE_16;
	} else {
		clk_prescale = MTU_CRn_PRESCALE_1;
	}

	nmdk_cycle = (rate + HZ/2) / HZ;


	/* Timer 0 is the free running clocksource */
	nmdk_clksrc_reset();

	if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
			rate, 200, 32, clocksource_mmio_readl_down))
		pr_err("timer: failed to initialize clock source %s\n",
		       "mtu_0");

#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
	setup_sched_clock(nomadik_read_sched_clock, 32, rate);
#endif

	/* Timer 1 is used for events */

	clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);

	nmdk_clkevt.max_delta_ns =
		clockevent_delta2ns(0xffffffff, &nmdk_clkevt);

	/* When ulppll is disabled timer is working on 32kHz clock. */
	min_delta_ticks = prcmu_is_ulppll_disabled()? 0x5 : 0x2;
	nmdk_clkevt.min_delta_ns =
		clockevent_delta2ns(min_delta_ticks, &nmdk_clkevt);
	nmdk_clkevt.cpumask	= cpumask_of(0);

	/* Register irq and clockevents */
	setup_irq(IRQ_MTU0, &nmdk_timer_irq);
	clockevents_register_device(&nmdk_clkevt);
#ifdef ARCH_HAS_READ_CURRENT_TIMER
	if (!prcmu_is_ulppll_disabled())
		set_delay_fn(nmdk_timer_delay_loop);
#endif

}
Exemple #15
0
/**
 * dw_apb_clockevent_register() - register the clock with the generic layer
 *
 * @dw_ced:	The APB clock to register as a clock_event_device.
 */
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced)
{
	apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL);
	clockevents_register_device(&dw_ced->ced);
	apbt_enable_int(&dw_ced->timer);
}
Exemple #16
0
int __cpuinit r4k_clockevent_init(void)
{
	uint64_t mips_freq = mips_hpt_frequency;
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *cd;
	unsigned int irq;

	if (!cpu_has_counter || !mips_hpt_frequency)
		return -ENXIO;

	if (!c0_compare_int_usable())
#if defined(CONFIG_MACH_AR934x) || defined(CONFIG_MACH_AR7100)
		/*
		 * The above test seems to randomly fail on Wasp. This
		 * results in timer isr not getting registered. Later,
		 * when the cpu receives a timer interrupt and tries
		 * to handle it, the corresponding data structures are
		 * not initialzed properly resulting in a panic
		 */
		printk("%s: Ignoring int_usable failure\n", __func__);
#else
		return -ENXIO;
#endif

	/*
	 * With vectored interrupts things are getting platform specific.
	 * get_c0_compare_int is a hook to allow a platform to return the
	 * interrupt number of it's liking.
	 */
	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	if (get_c0_compare_int)
		irq = get_c0_compare_int();

	cd = &per_cpu(mips_clockevent_device, cpu);

	cd->name		= "MIPS";
	cd->features		= CLOCK_EVT_FEAT_ONESHOT;

	/* Calculate the min / max delta */
	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
	cd->shift		= 32;
	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);

	cd->rating		= 300;
	cd->irq			= irq;
	cd->cpumask		= cpumask_of(cpu);
	cd->set_next_event	= mips_next_event;
	cd->set_mode		= mips_set_clock_mode;
	cd->event_handler	= mips_event_handler;

	clockevents_register_device(cd);

	if (cp0_timer_irq_installed)
		return 0;

	cp0_timer_irq_installed = 1;

	setup_irq(irq, &c0_compare_irqaction);

	return 0;
}
Exemple #17
0
static void __init msm_timer_init(void)
{
	int i;
	int res;
	int global_offset = 0;

	if (cpu_is_msm7x01()) {
		msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
		msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
	} else if (cpu_is_msm7x30()) {
		msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE + 0x04;
		msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x24;
	} else if (cpu_is_qsd8x50()) {
		msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE;
		msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10;
	} else if (cpu_is_msm8x60() || cpu_is_msm8960()) {
		msm_clocks[MSM_CLOCK_GPT].regbase = MSM_TMR_BASE + 0x04;
		msm_clocks[MSM_CLOCK_DGT].regbase = MSM_TMR_BASE + 0x24;

		/* Use CPU0's timer as the global timer. */
		global_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
	} else
		BUG();

#ifdef CONFIG_ARCH_MSM_SCORPIONMP
	writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
#endif

	for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) {
		struct msm_clock *clock = &msm_clocks[i];
		struct clock_event_device *ce = &clock->clockevent;
		struct clocksource *cs = &clock->clocksource;

		clock->local_counter = clock->regbase + TIMER_COUNT_VAL;
		clock->global_counter = clock->local_counter + global_offset;

		writel(0, clock->regbase + TIMER_ENABLE);
		writel(0, clock->regbase + TIMER_CLEAR);
		writel(~0, clock->regbase + TIMER_MATCH_VAL);

		ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
		/* allow at least 10 seconds to notice that the timer wrapped */
		ce->max_delta_ns =
			clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
		/* 4 gets rounded down to 3 */
		ce->min_delta_ns = clockevent_delta2ns(4, ce);
		ce->cpumask = cpumask_of(0);

		res = clocksource_register_hz(cs, clock->freq);
		if (res)
			printk(KERN_ERR "msm_timer_init: clocksource_register "
			       "failed for %s\n", cs->name);

		res = setup_irq(clock->irq.irq, &clock->irq);
		if (res)
			printk(KERN_ERR "msm_timer_init: setup_irq "
			       "failed for %s\n", cs->name);

		clockevents_register_device(ce);
	}
}
Exemple #18
0
static int __init l4x_timer_init_ret(void)
{
    int r;
    l4lx_thread_t thread;
    int irq;
    L4XV_V(f);

    timer_irq_cap = l4x_cap_alloc();
    if (l4_is_invalid_cap(timer_irq_cap)) {
        printk(KERN_ERR "l4timer: Failed to alloc\n");
        return -ENOMEM;
    }

    r = L4XV_FN_i(l4_error(l4_factory_create_irq(l4re_env()->factory,
                           timer_irq_cap)));
    if (r) {
        printk(KERN_ERR "l4timer: Failed to create irq: %d\n", r);
        goto out1;
    }

    if ((irq = l4x_register_irq(timer_irq_cap)) < 0) {
        r = -ENOMEM;
        goto out2;
    }

    printk("l4timer: Using IRQ%d\n", irq);

    setup_irq(irq, &l4timer_irq);

    L4XV_L(f);
    thread = l4lx_thread_create
             (timer_thread,                /* thread function */
              smp_processor_id(),          /* cpu */
              NULL,                        /* stack */
              &timer_irq_cap, sizeof(timer_irq_cap), /* data */
              l4x_cap_alloc(),             /* cap */
              PRIO_TIMER,                  /* prio */
              0,                           /* vcpup */
              "timer",                     /* name */
              NULL);
    L4XV_U(f);

    timer_srv = l4lx_thread_get_cap(thread);

    if (!l4lx_thread_is_valid(thread)) {
        printk(KERN_ERR "l4timer: Failed to create thread\n");
        r = -ENOMEM;
        goto out3;
    }


    l4timer_clockevent.irq = irq;
    l4timer_clockevent.mult =
        div_sc(1000000, NSEC_PER_SEC, l4timer_clockevent.shift);
    l4timer_clockevent.max_delta_ns =
        clockevent_delta2ns(0xffffffff, &l4timer_clockevent);
    l4timer_clockevent.min_delta_ns =
        clockevent_delta2ns(0xf, &l4timer_clockevent);
    l4timer_clockevent.cpumask = cpumask_of(0);
    clockevents_register_device(&l4timer_clockevent);

    return 0;

out3:
    l4x_unregister_irq(irq);
out2:
    L4XV_FN_v(l4_task_delete_obj(L4RE_THIS_TASK_CAP, timer_irq_cap));
out1:
    l4x_cap_free(timer_irq_cap);
    return r;
}
Exemple #19
0
void __init plat_time_init(void)
{
	struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
	unsigned long t;

	/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
	 * has been detected.  If so install the rtcmatch2 clocksource,
	 * otherwise don't bother.  Note that both bits being set is by
	 * no means a definite guarantee that the counters actually work
	 * (the 32S bit seems to be stuck set to 1 once a single clock-
	 * edge is detected, hence the timeouts).
	 */
	if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
		goto cntr_err;

	/*
	 * setup counter 1 (RTC) to tick at full speed
	 */
	t = 0xffffff;
	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
		asm volatile ("nop");
	if (!t)
		goto cntr_err;

	au_writel(0, SYS_RTCTRIM);	/* 32.768 kHz */
	au_sync();

	t = 0xffffff;
	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
		asm volatile ("nop");
	if (!t)
		goto cntr_err;
	au_writel(0, SYS_RTCWRITE);
	au_sync();

	t = 0xffffff;
	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
		asm volatile ("nop");
	if (!t)
		goto cntr_err;

	/* register counter1 clocksource and event device */
	clocksource_set_clock(&au1x_counter1_clocksource, 32768);
	clocksource_register(&au1x_counter1_clocksource);

	cd->shift = 32;
	cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
	cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
	cd->min_delta_ns = clockevent_delta2ns(8, cd);	/* ~0.25ms */
	clockevents_register_device(cd);
	setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction);

	printk(KERN_INFO "Alchemy clocksource installed\n");

	/* can now use 'wait' */
	allow_au1k_wait = 1;
	return;

cntr_err:
	/* counters unusable, use C0 counter */
	r4k_clockevent_init();
	init_r4k_clocksource();
	allow_au1k_wait = 0;
}
static void __init u300_timer_init(void)
{
	u300_enable_timer_clock();
	/*
	 * Disable the "OS" and "DD" timers - these are designed for Symbian!
	 * Example usage in cnh1601578 cpu subsystem pd_timer_app.c
	 */
	writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_CRC);
	writel(U300_TIMER_APP_ROST_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_ROST);
	writel(U300_TIMER_APP_DOST_TIMER_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_DOST);
	writel(U300_TIMER_APP_RDDT_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RDDT);
	writel(U300_TIMER_APP_DDDT_TIMER_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_DDDT);

	/* Reset the General Purpose timer 1. */
	writel(U300_TIMER_APP_RGPT1_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT1);

	/* Set up the IRQ handler */
	setup_irq(IRQ_U300_TIMER_APP_GP1, &u300_timer_irq);

	/* Reset the General Purpose timer 2 */
	writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT2);
	/* Set this timer to run around forever */
	writel(0xFFFFFFFFU, U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2TC);
	/* Set continuous mode so it wraps around */
	writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS,
	       U300_TIMER_APP_VBASE + U300_TIMER_APP_SGPT2M);
	/* Disable timer interrupts */
	writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2IE);
	/* Then enable the GP2 timer to use as a free running us counter */
	writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE,
		U300_TIMER_APP_VBASE + U300_TIMER_APP_EGPT2);

	/* This is a pure microsecond clock source */
	clocksource_u300_1mhz.mult =
		clocksource_khz2mult(1000, clocksource_u300_1mhz.shift);
	if (clocksource_register(&clocksource_u300_1mhz))
		printk(KERN_ERR "timer: failed to initialize clock "
		       "source %s\n", clocksource_u300_1mhz.name);

	clockevent_u300_1mhz.mult =
		div_sc(1000000, NSEC_PER_SEC, clockevent_u300_1mhz.shift);
	/* 32bit counter, so 32bits delta is max */
	clockevent_u300_1mhz.max_delta_ns =
		clockevent_delta2ns(0xffffffff, &clockevent_u300_1mhz);
	/* This timer is slow enough to set for 1 cycle == 1 MHz */
	clockevent_u300_1mhz.min_delta_ns =
		clockevent_delta2ns(1, &clockevent_u300_1mhz);
	clockevent_u300_1mhz.cpumask = cpumask_of(0);
	clockevents_register_device(&clockevent_u300_1mhz);
	/*
	 * TODO: init and register the rest of the timers too, they can be
	 * used by hrtimers!
	 */
}
Exemple #21
0
/*
 * timer_device_alloc_event()
 * 	Allocate a timer device event.
 */
static int timer_device_alloc_event(const char *name, int cpuid, const cpumask_t *mask)
{
	struct clock_event_device *dev;
	struct irqaction *action;

	/*
	 * Are we out of configured timers?
	 */
	timer_device_lock_acquire();
	if (timer_device_next_timer >= MAX_TIMERS) {
		timer_device_lock_release();
		printk(KERN_WARNING "out of timer event entries\n");
		return -1;
	}
	dev = &timer_device_devs[timer_device_next_timer];
	action = &timer_device_irqs[timer_device_next_timer];
	timer_device_next_timer++;
	timer_device_lock_release();

	/*
	 * Now allocate a timer to ourselves.
	 */
	dev->irq = timer_alloc();
	if (dev->irq == -1) {
		timer_device_lock_acquire();
		timer_device_next_timer--;
		timer_device_lock_release();
		printk(KERN_WARNING "out of hardware timers\n");
		return -1;
	}

	/*
	 * Init the IRQ action structure.  Make sure
	 * this in place before you register the clock
	 * event device.
	 */
	action->name = name;
	action->flags = IRQF_DISABLED | IRQF_TIMER;
	action->handler = timer_device_event;
	action->dev_id = dev;
	setup_irq(dev->irq, action);
	irq_set_affinity(dev->irq, mask);
	pic_disable_vector(dev->irq);

	/*
	 * init clock dev structure.
	 *
	 * The max_delta_ns needs to be less than a full timer's
	 * resolution to ensure that with overhead, we will be able to
	 * service the timer.  The usual approach is to use 31 bits
	 * instead of 32 for a 32 bit timer.
	 *
	 * The min_delta_ns is chosen to ensure that setting next event
	 * will never be requested with too small of value.
	 */
	dev->name = name;
	dev->rating = timer_device_clockbase.rating;
	dev->shift = timer_device_clockbase.shift;
	dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	dev->set_mode = timer_device_set_mode;
	dev->set_next_event = timer_device_set_next_event;
	dev->mult = div_sc(frequency, NSEC_PER_SEC, dev->shift);
	dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, dev);
	dev->min_delta_ns = clockevent_delta2ns(0xf, dev);
	dev->cpumask = mask;
	printk(KERN_NOTICE "timer[%d]: %s - created\n", dev->irq, dev->name);

	/*
	 * Now register the device.
	 */
	clockevents_register_device(dev);
	return dev->irq;
}
/*
 * Setup the local clock events for a CPU.
 */
int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	struct kona_td kona_td;
	struct timer_ch_cfg config;

	pr_info("local_timer_setup called for %d\n", cpu);
	/* allocate an AON timer channel as local tick timer
	 */
	kona_td = (struct kona_td)__get_cpu_var(percpu_kona_td);

	if (!kona_td.allocated) {
		kona_td.kona_timer =
		    kona_timer_request(TICK_TIMER_NAME,
				       TICK_TIMER_OFFSET + cpu);
		if (kona_td.kona_timer) {
			kona_td.allocated = true;
		} else { /* kona_td.kona_timer is already allocated, hence
			    we get the pointer and reuse the same. This is
			    done to ensure we don't use an extra channel for
			    timer event before local timer comes up
			    */
			kona_td.kona_timer = get_timer_ptr(TICK_TIMER_NAME,
					TICK_TIMER_OFFSET + cpu);
			if (!kona_td.kona_timer) {
				pr_err("%s: Failed to allocate %s channel %d"
					"as CPU %d local tick device\n",
					__func__,
				       TICK_TIMER_NAME,
				       TICK_TIMER_OFFSET + cpu, cpu);
				return -ENXIO;
			} else {
				kona_td.allocated = true;
			}
		}
	}

	/*
	 * In the future: The following codes should be one time configuration
	 */
	config.mode = MODE_ONESHOT;
	config.arg = evt;
	config.cb = kona_tick_interrupt_cb;
	kona_timer_config(kona_td.kona_timer, &config);

	irq_set_affinity(kona_td.kona_timer->irq, cpumask_of(cpu));

	evt->name = "local_timer";
	evt->cpumask = cpumask_of(cpu);
	evt->irq = kona_td.kona_timer->irq;
	evt->set_next_event = kona_tick_set_next_event;
	evt->set_mode = kona_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 250;
	evt->shift = 32;
	evt->mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, evt->shift);
	evt->max_delta_ns = clockevent_delta2ns(MAX_KONA_COUNT_CLOCK, evt);
	/* There is MIN_KONA_DELTA_CLOCK clock cycles delay in HUB Timer by
	 * ASIC limitation. When min_delta_ns set N, real requested load value
	 * in hrtimer becomes N - 1. So add 1 to be MIN_DELTA_CLOCK
	 */
	evt->min_delta_ns = clockevent_delta2ns(MIN_KONA_DELTA_CLOCK + 1, evt);

	per_cpu(percpu_kona_td, cpu) = kona_td;

	clockevents_register_device(evt);
	return 0;
}