/* * Setup the local event timer for @cpu */ void arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); clk->cpumask = cpumask_of(cpu); clockevents_config_and_register(clk, arc_get_core_freq(), 0, ARC_TIMER_MAX); /* * setup the per-cpu timer IRQ handler - for all cpus * For non boot CPU explicitly unmask at intc * setup_irq() -> .. -> irq_startup() already does this on boot-cpu */ if (!cpu) setup_irq(TIMER0_IRQ, &arc_timer_irq); else arch_unmask_irq(TIMER0_IRQ); }
/* * Setup the local event timer for @cpu * N.B. weak so that some exotic ARC SoCs can completely override it */ void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5); clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk); clk->cpumask = cpumask_of(cpu); clockevents_register_device(clk); /* * setup the per-cpu timer IRQ handler - for all cpus * For non boot CPU explicitly unmask at intc * setup_irq() -> .. -> irq_startup() already does this on boot-cpu */ if (!cpu) setup_irq(TIMER0_IRQ, &arc_timer_irq); else arch_unmask_irq(TIMER0_IRQ); }
static void arc_unmask_irq(struct irq_data *data) { arch_unmask_irq(data->irq); }