void __init edison_clockevents_init(unsigned int irq) { struct clock_event_device *evt = &clockevent_timer; unsigned long temp; evt->irq = (int)irq; evt->mult = div_sc((unsigned long)GLB_TIMER_FREQ_KHZ, NSEC_PER_MSEC, (int)evt->shift); //PIU Timer FRE = 12Mhz evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt); evt->min_delta_ns = clockevent_delta2ns(0xf, evt); //clear timer event flag PERI_W(WDT_STATUS, FLAG_EVENT); //Interrupt Set Enable Register temp = PERI_R(GIC_DIST_SET_EANBLE); temp = temp | (unsigned long)(0x1 << irq); PERI_W(GIC_DIST_SET_EANBLE, temp); //setup_irq(irq, &timer_irq); setup_irq(irq, &timer_irq); clockevents_register_device(evt); }
static void clockevents_config(struct clock_event_device *dev, u32 freq) { unsigned long sec; if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return; /* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values. */ sec = dev->max_delta_ticks; do_div(sec, freq); if (!sec) sec = 1; else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) sec = 600; clockevents_calc_mult_shift(dev, freq, sec); dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); }
static void integrator_clockevent_init(u32 khz) { struct clock_event_device *evt = &integrator_clockevent; unsigned int ctrl = 0; if (khz * 1000 > 0x100000 * HZ) { khz /= 256; ctrl |= TIMER_CTRL_DIV256; } else if (khz * 1000 > 0x10000 * HZ) { khz /= 16; ctrl |= TIMER_CTRL_DIV16; } timer_reload = khz * 1000 / HZ; writel(ctrl, clkevt_base + TIMER_CTRL); evt->irq = IRQ_TIMERINT1; evt->mult = div_sc(khz, NSEC_PER_MSEC, evt->shift); evt->max_delta_ns = clockevent_delta2ns(0xffff, evt); evt->min_delta_ns = clockevent_delta2ns(0xf, evt); setup_irq(IRQ_TIMERINT1, &integrator_timer_irq); clockevents_register_device(evt); }
static void __init pxa_timer_init(void) { unsigned long clock_tick_rate = get_clock_tick_rate(); OIER = 0; OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; set_oscr2ns_scale(clock_tick_rate); clocksource_calc_mult_shift(&cksrc_pxa_oscr0, CLOCK_TICK_RATE, 4); clockevents_calc_mult_shift(&ckevt_pxa_osmr0, CLOCK_TICK_RATE, 4); ckevt_pxa_osmr0.max_delta_ns = clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); ckevt_pxa_osmr0.min_delta_ns = clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; ckevt_pxa_osmr0.cpumask = cpumask_of(0); clocksource_register(&cksrc_pxa_oscr0); clockevents_register_device(&ckevt_pxa_osmr0); setup_irq(IRQ_OST0, &pxa_ost0_irq); rtc_calib_init(); }
static void mv_init_timer(void) { /* * Setup clocksource free running timer (no interrupt on reload) */ MV_REG_WRITE(CNTMR_VAL_REG(CLOCKSOURCE), 0xffffffff); MV_REG_WRITE(CNTMR_RELOAD_REG(CLOCKSOURCE), 0xffffffff); MV_REG_BIT_RESET(BRIDGE_INT_MASK_REG, BRIDGE_INT_TIMER(CLOCKSOURCE)); MV_REG_BIT_SET(CNTMR_CTRL_REG, TIMER_RELOAD_EN(CLOCKSOURCE) | TIMER_EN(CLOCKSOURCE)); /* * Register clocksource */ orion_clksrc.mult = clocksource_hz2mult(mvBoardTclkGet(), orion_clksrc.shift); clocksource_register(&orion_clksrc); /* * Connect and enable tick handler */ setup_irq(IRQ_BRIDGE, &orion_timer_irq); /* * Register clockevent */ orion_clkevt.mult = div_sc(mvBoardTclkGet(), NSEC_PER_SEC, orion_clkevt.shift); orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); clockevents_register_device(&orion_clkevt); }
static void s5pv210_init_dynamic_tick_timer(unsigned long rate) { tick_timer_mode = 1; s5pv210_tick_timer_stop(); s5pv210_tick_timer_start((rate / HZ) - 1, 1); clockevent_tick_timer.mult = div_sc(rate, NSEC_PER_SEC, clockevent_tick_timer.shift); clockevent_tick_timer.max_delta_ns = clockevent_delta2ns(-1, &clockevent_tick_timer); clockevent_tick_timer.min_delta_ns = clockevent_delta2ns(1, &clockevent_tick_timer); clockevent_tick_timer.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_tick_timer); printk(KERN_INFO "mult[%u]\n", clockevent_tick_timer.mult); printk(KERN_INFO "max_delta_ns[%llu]\n", clockevent_tick_timer.max_delta_ns); printk(KERN_INFO "min_delta_ns[%llu]\n", clockevent_tick_timer.min_delta_ns); printk(KERN_INFO "rate[%lu]\n", rate); printk(KERN_INFO "HZ[%d]\n", HZ); }
/* * Initialize the conversion factor and the min/max deltas of the clock event * structure and register the clock event source with the framework. */ void __init setup_mfgpt0_timer(void) { u32 basehi; struct clock_event_device *cd = &mfgpt_clockevent; unsigned int cpu = smp_processor_id(); cd->cpumask = cpumask_of(cpu); clockevent_set_clock(cd, MFGPT_TICK_RATE); cd->max_delta_ns = clockevent_delta2ns(0xffff, cd); cd->min_delta_ns = clockevent_delta2ns(0xf, cd); /* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */ _wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100); /* Enable Interrupt Gate 5 */ _wrmsr(DIVIL_MSR_REG(PIC_ZSEL_LOW), 0, 0x50000); /* get MFGPT base address */ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base); clockevents_register_device(cd); setup_irq(CS5536_MFGPT_INTR, &irq5); }
/* * time_init_deferred - called by start_kernel to set up timer/clock source * * Install the IRQ handler for the clock, setup timers. * This is done late, as that way, we can use ioremap(). * * This runs just before the delay loop is calibrated, and * is used for delay calibration. */ void __init time_init_deferred(void) { struct resource *resource = NULL; struct clock_event_device *ce_dev = &hexagon_clockevent_dev; ce_dev->cpumask = cpu_all_mask; if (!resource) resource = rtos_timer_device.resource; /* ioremap here means this has to run later, after paging init */ rtos_timer = ioremap(resource->start, resource_size(resource)); if (!rtos_timer) { release_mem_region(resource->start, resource_size(resource)); } clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000); /* Note: the sim generic RTOS clock is apparently really 18750Hz */ /* * Last arg is some guaranteed seconds for which the conversion will * work without overflow. */ clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4); ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev); ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev); #ifdef CONFIG_SMP setup_percpu_clockdev(); #endif clockevents_register_device(ce_dev); setup_irq(ce_dev->irq, &rtos_timer_intdesc); }
static void __init bcm2709_timer_init(void) { /* init high res timer */ bcm2709_clocksource_init(); /* * Make irqs happen for the system timer */ setup_irq(IRQ_TIMER3, &bcm2709_timer_irq); sched_clock_register(bcm2709_read_sched_clock, 32, STC_FREQ_HZ); timer0_clockevent.mult = div_sc(STC_FREQ_HZ, NSEC_PER_SEC, timer0_clockevent.shift); timer0_clockevent.max_delta_ns = clockevent_delta2ns(0xffffffff, &timer0_clockevent); timer0_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &timer0_clockevent); timer0_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&timer0_clockevent); register_current_timer_delay(&bcm2709_delay_timer); }
static int __init mxs_clockevent_init(struct clk *timer_clk) { unsigned int c = clk_get_rate(timer_clk); mxs_clockevent_device.mult = div_sc(c, NSEC_PER_SEC, mxs_clockevent_device.shift); mxs_clockevent_device.cpumask = cpumask_of(0); if (timrot_is_v1()) { mxs_clockevent_device.set_next_event = timrotv1_set_next_event; mxs_clockevent_device.max_delta_ns = clockevent_delta2ns(0xfffe, &mxs_clockevent_device); mxs_clockevent_device.min_delta_ns = clockevent_delta2ns(0xf, &mxs_clockevent_device); } else { mxs_clockevent_device.max_delta_ns = clockevent_delta2ns(0xfffffffe, &mxs_clockevent_device); mxs_clockevent_device.min_delta_ns = clockevent_delta2ns(0xf, &mxs_clockevent_device); } clockevents_register_device(&mxs_clockevent_device); return 0; }
/* * Register clock_event_device to MSS Timer1 */ static void __init timer_1_clockevents_init(unsigned int irq) { const u64 max_delay_in_sec = 5; timer_1_clockevent.irq = irq; /* * Set the fields required for the set_next_event method (tickless kernel support) */ clockevents_calc_mult_shift(&timer_1_clockevent, timer_ref_clk, max_delay_in_sec); timer_1_clockevent.max_delta_ns = max_delay_in_sec * NSEC_PER_SEC; timer_1_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &timer_1_clockevent); clockevents_register_device(&timer_1_clockevent); }
void __cpuinit setup_tile_timer(void) { struct clock_event_device *evt = &__get_cpu_var(tile_timer); /* Fill in fields that are speed-specific. */ clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt); /* Mark as being for this cpu only. */ evt->cpumask = cpumask_of(smp_processor_id()); /* Start out with timer not firing. */ arch_local_irq_mask_now(INT_TILE_TIMER); /* Register tile timer. */ clockevents_register_device(evt); }
static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) { struct clock_event_device *ced = &p->ced; sh_tmu_enable(p); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); ced->min_delta_ns = 5000; if (periodic) { p->periodic = (p->rate + HZ/2) / HZ; sh_tmu_set_next(p, p->periodic, 1); } }
/* * Setup the local event timer for @cpu * N.B. weak so that some exotic ARC SoCs can completely override it */ void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) { struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5); clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk); clk->cpumask = cpumask_of(cpu); clockevents_register_device(clk); /* * setup the per-cpu timer IRQ handler - for all cpus * For non boot CPU explicitly unmask at intc * setup_irq() -> .. -> irq_startup() already does this on boot-cpu */ if (!cpu) setup_irq(TIMER0_IRQ, &arc_timer_irq); else arch_unmask_irq(TIMER0_IRQ); }
static void __init msm_timer_init(void) { int i; int res; #ifdef CONFIG_ARCH_MSM8X60 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); #endif for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) { struct msm_clock *clock = &msm_clocks[i]; struct clock_event_device *ce = &clock->clockevent; struct clocksource *cs = &clock->clocksource; writel(0, clock->regbase + TIMER_ENABLE); writel(0, clock->regbase + TIMER_CLEAR); writel(~0, clock->regbase + TIMER_MATCH_VAL); ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift); /* allow at least 10 seconds to notice that the timer wrapped */ ce->max_delta_ns = clockevent_delta2ns(0xf0000000 >> clock->shift, ce); /* 4 gets rounded down to 3 */ ce->min_delta_ns = clockevent_delta2ns(4, ce); ce->cpumask = cpumask_of(0); cs->mult = clocksource_hz2mult(clock->freq, cs->shift); res = clocksource_register(cs); if (res) printk(KERN_ERR "msm_timer_init: clocksource_register " "failed for %s\n", cs->name); res = setup_irq(clock->irq.irq, &clock->irq); if (res) printk(KERN_ERR "msm_timer_init: setup_irq " "failed for %s\n", cs->name); clockevents_register_device(ce); } }
void __init txx9_clockevent_init(unsigned long baseaddr, int irq, unsigned int imbusclk) { struct clock_event_device *cd = &txx9tmr_clock_event_device; struct txx9_tmr_reg __iomem *tmrptr; tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); txx9tmr_stop_and_clear(tmrptr); __raw_writel(TIMER_CCD, &tmrptr->ccdr); __raw_writel(0, &tmrptr->itmr); txx9_tmrptr = tmrptr; clockevent_set_clock(cd, TIMER_CLK(imbusclk)); cd->max_delta_ns = clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); cd->min_delta_ns = clockevent_delta2ns(0xf, cd); cd->irq = irq; clockevents_register_device(cd); setup_irq(irq, &txx9tmr_irq); printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", baseaddr, irq); }
void __init plat_time_init(void) { struct clock_event_device *cd = &au1x_rtcmatch2_clockdev; unsigned long t; /* Check if firmware (YAMON, ...) has enabled 32kHz and clock * has been detected. If so install the rtcmatch2 clocksource, * otherwise don't bother. Note that both bits being set is by * no means a definite guarantee that the counters actually work * (the 32S bit seems to be stuck set to 1 once a single clock- * edge is detected, hence the timeouts). */ if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK)) goto cntr_err; /* * setup counter 1 (RTC) to tick at full speed */ t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */ au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; au_writel(0, SYS_RTCWRITE); au_sync(); t = 0xffffff; while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t) asm volatile ("nop"); if (!t) goto cntr_err; /* register counter1 clocksource and event device */ clocksource_set_clock(&au1x_counter1_clocksource, 32768); clocksource_register(&au1x_counter1_clocksource); cd->shift = 32; cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift); cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd); cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */ clockevents_register_device(cd); setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction); printk(KERN_INFO "Alchemy clocksource installed\n"); /* can now use 'wait' */ allow_au1k_wait = 1; return; cntr_err: /* counters unusable, use C0 counter */ r4k_clockevent_init(); init_r4k_clocksource(); allow_au1k_wait = 0; }
static void __init msm_timer_init(void) { int i; int res; int global_offset = 0; if (cpu_is_msm7x01()) { msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE; msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10; } else if (cpu_is_msm7x30()) { msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE + 0x04; msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x24; } else if (cpu_is_qsd8x50()) { msm_clocks[MSM_CLOCK_GPT].regbase = MSM_CSR_BASE; msm_clocks[MSM_CLOCK_DGT].regbase = MSM_CSR_BASE + 0x10; } else if (cpu_is_msm8x60() || cpu_is_msm8960()) { msm_clocks[MSM_CLOCK_GPT].regbase = MSM_TMR_BASE + 0x04; msm_clocks[MSM_CLOCK_DGT].regbase = MSM_TMR_BASE + 0x24; /* Use CPU0's timer as the global timer. */ global_offset = MSM_TMR0_BASE - MSM_TMR_BASE; } else BUG(); #ifdef CONFIG_ARCH_MSM_SCORPIONMP writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); #endif for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) { struct msm_clock *clock = &msm_clocks[i]; struct clock_event_device *ce = &clock->clockevent; struct clocksource *cs = &clock->clocksource; clock->local_counter = clock->regbase + TIMER_COUNT_VAL; clock->global_counter = clock->local_counter + global_offset; writel(0, clock->regbase + TIMER_ENABLE); writel(0, clock->regbase + TIMER_CLEAR); writel(~0, clock->regbase + TIMER_MATCH_VAL); ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift); /* allow at least 10 seconds to notice that the timer wrapped */ ce->max_delta_ns = clockevent_delta2ns(0xf0000000 >> clock->shift, ce); /* 4 gets rounded down to 3 */ ce->min_delta_ns = clockevent_delta2ns(4, ce); ce->cpumask = cpumask_of(0); res = clocksource_register_hz(cs, clock->freq); if (res) printk(KERN_ERR "msm_timer_init: clocksource_register " "failed for %s\n", cs->name); res = setup_irq(clock->irq.irq, &clock->irq); if (res) printk(KERN_ERR "msm_timer_init: setup_irq " "failed for %s\n", cs->name); clockevents_register_device(ce); } }
int __cpuinit r4k_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; unsigned int cpu = smp_processor_id(); struct clock_event_device *cd; unsigned int irq; if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; if (!c0_compare_int_usable()) #if defined(CONFIG_MACH_AR934x) || defined(CONFIG_MACH_AR7100) /* * The above test seems to randomly fail on Wasp. This * results in timer isr not getting registered. Later, * when the cpu receives a timer interrupt and tries * to handle it, the corresponding data structures are * not initialzed properly resulting in a panic */ printk("%s: Ignoring int_usable failure\n", __func__); #else return -ENXIO; #endif /* * With vectored interrupts things are getting platform specific. * get_c0_compare_int is a hook to allow a platform to return the * interrupt number of it's liking. */ irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; if (get_c0_compare_int) irq = get_c0_compare_int(); cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; cd->features = CLOCK_EVT_FEAT_ONESHOT; /* Calculate the min / max delta */ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); cd->shift = 32; cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->min_delta_ns = clockevent_delta2ns(0x300, cd); cd->rating = 300; cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; clockevents_register_device(cd); if (cp0_timer_irq_installed) return 0; cp0_timer_irq_installed = 1; setup_irq(irq, &c0_compare_irqaction); return 0; }
/* * timer_device_alloc_event() * Allocate a timer device event. */ static int timer_device_alloc_event(const char *name, int cpuid, const cpumask_t *mask) { struct clock_event_device *dev; struct irqaction *action; /* * Are we out of configured timers? */ timer_device_lock_acquire(); if (timer_device_next_timer >= MAX_TIMERS) { timer_device_lock_release(); printk(KERN_WARNING "out of timer event entries\n"); return -1; } dev = &timer_device_devs[timer_device_next_timer]; action = &timer_device_irqs[timer_device_next_timer]; timer_device_next_timer++; timer_device_lock_release(); /* * Now allocate a timer to ourselves. */ dev->irq = timer_alloc(); if (dev->irq == -1) { timer_device_lock_acquire(); timer_device_next_timer--; timer_device_lock_release(); printk(KERN_WARNING "out of hardware timers\n"); return -1; } /* * Init the IRQ action structure. Make sure * this in place before you register the clock * event device. */ action->name = name; action->flags = IRQF_DISABLED | IRQF_TIMER; action->handler = timer_device_event; action->dev_id = dev; setup_irq(dev->irq, action); irq_set_affinity(dev->irq, mask); pic_disable_vector(dev->irq); /* * init clock dev structure. * * The max_delta_ns needs to be less than a full timer's * resolution to ensure that with overhead, we will be able to * service the timer. The usual approach is to use 31 bits * instead of 32 for a 32 bit timer. * * The min_delta_ns is chosen to ensure that setting next event * will never be requested with too small of value. */ dev->name = name; dev->rating = timer_device_clockbase.rating; dev->shift = timer_device_clockbase.shift; dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; dev->set_mode = timer_device_set_mode; dev->set_next_event = timer_device_set_next_event; dev->mult = div_sc(frequency, NSEC_PER_SEC, dev->shift); dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, dev); dev->min_delta_ns = clockevent_delta2ns(0xf, dev); dev->cpumask = mask; printk(KERN_NOTICE "timer[%d]: %s - created\n", dev->irq, dev->name); /* * Now register the device. */ clockevents_register_device(dev); return dev->irq; }
static int __init calibrate_APIC_clock(void) { unsigned apic, apic_start; unsigned long tsc, tsc_start; int result; local_irq_disable(); /* * Put whatever arbitrary (but long enough) timeout * value into the APIC clock, we just want to get the * counter running for calibration. * * No interrupt enable ! */ __setup_APIC_LVTT(250000000, 0, 0); apic_start = apic_read(APIC_TMCCT); #ifdef CONFIG_X86_PM_TIMER if (apic_calibrate_pmtmr && pmtmr_ioport) { pmtimer_wait(5000); /* 5ms wait */ apic = apic_read(APIC_TMCCT); result = (apic_start - apic) * 1000L / 5; } else #endif { rdtscll(tsc_start); do { apic = apic_read(APIC_TMCCT); rdtscll(tsc); } while ((tsc - tsc_start) < TICK_COUNT && (apic_start - apic) < TICK_COUNT); result = (apic_start - apic) * 1000L * tsc_khz / (tsc - tsc_start); } local_irq_enable(); printk(KERN_DEBUG "APIC timer calibration result %d\n", result); printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", result / 1000 / 1000, result / 1000 % 1000); /* Calculate the scaled math multiplication factor */ lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, lapic_clockevent.shift); lapic_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); lapic_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &lapic_clockevent); calibration_result = result / HZ; /* * Do a sanity check on the APIC calibration result */ if (calibration_result < (1000000 / HZ)) { printk(KERN_WARNING "APIC frequency too slow, disabling apic timer\n"); return -1; } return 0; }
u32 ambarella_timer_resume(u32 level) { u32 timer_ctr_mask; #if !defined(CONFIG_MACH_HYACINTH_0) && !defined(CONFIG_MACH_HYACINTH_1) timer_ctr_mask = AMBARELLA_CE_TIMER_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_CTR_MASK; #endif amba_clrbitsl(TIMER_CTR_REG, timer_ctr_mask); #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) amba_writel(AMBARELLA_CS_TIMER_STATUS_REG, ambarella_timer_pm.timer_cs_status_reg); amba_writel(AMBARELLA_CS_TIMER_RELOAD_REG, ambarella_timer_pm.timer_cs_reload_reg); amba_writel(AMBARELLA_CS_TIMER_MATCH1_REG, ambarella_timer_pm.timer_cs_match1_reg); amba_writel(AMBARELLA_CS_TIMER_MATCH2_REG, ambarella_timer_pm.timer_cs_match2_reg); #endif if ((ambarella_timer_pm.timer_ce_status_reg == 0) && (ambarella_timer_pm.timer_ce_reload_reg == 0)){ amba_writel(AMBARELLA_CE_TIMER_STATUS_REG, AMBARELLA_TIMER_FREQ / HZ); } else { amba_writel(AMBARELLA_CE_TIMER_STATUS_REG, ambarella_timer_pm.timer_ce_status_reg); } amba_writel(AMBARELLA_CE_TIMER_RELOAD_REG, ambarella_timer_pm.timer_ce_reload_reg); amba_writel(AMBARELLA_CE_TIMER_MATCH1_REG, ambarella_timer_pm.timer_ce_match1_reg); amba_writel(AMBARELLA_CE_TIMER_MATCH2_REG, ambarella_timer_pm.timer_ce_match2_reg); #else /* defined(CONFIG_MACH_HYACINTH_0) || defined(CONFIG_MACH_HYACINTH_1) */ if (machine_is_hyacinth_0()) { timer_ctr_mask = AMBARELLA_CE_TIMER_AXI0_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_AXI0_CTR_MASK; #endif amba_clrbitsl(TIMER_CTR_REG, timer_ctr_mask); #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) amba_writel(AMBARELLA_CS_TIMER_AXI0_STATUS_REG, ambarella_timer_pm.timer_cs_status_reg); amba_writel(AMBARELLA_CS_TIMER_AXI0_RELOAD_REG, ambarella_timer_pm.timer_cs_reload_reg); amba_writel(AMBARELLA_CS_TIMER_AXI0_MATCH1_REG, ambarella_timer_pm.timer_cs_match1_reg); amba_writel(AMBARELLA_CS_TIMER_AXI0_MATCH2_REG, ambarella_timer_pm.timer_cs_match2_reg); #endif if ((ambarella_timer_pm.timer_ce_status_reg == 0) && (ambarella_timer_pm.timer_ce_reload_reg == 0)){ amba_writel(AMBARELLA_CE_TIMER_AXI0_STATUS_REG, AMBARELLA_TIMER_FREQ / HZ); } else { amba_writel(AMBARELLA_CE_TIMER_AXI0_STATUS_REG, ambarella_timer_pm.timer_ce_status_reg); } amba_writel(AMBARELLA_CE_TIMER_AXI0_RELOAD_REG, ambarella_timer_pm.timer_ce_reload_reg); amba_writel(AMBARELLA_CE_TIMER_AXI0_MATCH1_REG, ambarella_timer_pm.timer_ce_match1_reg); amba_writel(AMBARELLA_CE_TIMER_AXI0_MATCH2_REG, ambarella_timer_pm.timer_ce_match2_reg); } else if (machine_is_hyacinth_1()) { timer_ctr_mask = AMBARELLA_CE_TIMER_AXI1_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_AXI1_CTR_MASK; #endif amba_clrbitsl(TIMER_CTR_REG, timer_ctr_mask); #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) amba_writel(AMBARELLA_CS_TIMER_AXI1_STATUS_REG, ambarella_timer_pm.timer_cs_status_reg); amba_writel(AMBARELLA_CS_TIMER_AXI1_RELOAD_REG, ambarella_timer_pm.timer_cs_reload_reg); amba_writel(AMBARELLA_CS_TIMER_AXI1_MATCH1_REG, ambarella_timer_pm.timer_cs_match1_reg); amba_writel(AMBARELLA_CS_TIMER_AXI1_MATCH2_REG, ambarella_timer_pm.timer_cs_match2_reg); #endif if ((ambarella_timer_pm.timer_ce_status_reg == 0) && (ambarella_timer_pm.timer_ce_reload_reg == 0)){ amba_writel(AMBARELLA_CE_TIMER_AXI1_STATUS_REG, AMBARELLA_TIMER_FREQ / HZ); } else { amba_writel(AMBARELLA_CE_TIMER_AXI1_STATUS_REG, ambarella_timer_pm.timer_ce_status_reg); } amba_writel(AMBARELLA_CE_TIMER_AXI1_RELOAD_REG, ambarella_timer_pm.timer_ce_reload_reg); amba_writel(AMBARELLA_CE_TIMER_AXI1_MATCH1_REG, ambarella_timer_pm.timer_ce_match1_reg); amba_writel(AMBARELLA_CE_TIMER_AXI1_MATCH2_REG, ambarella_timer_pm.timer_ce_match2_reg); } #endif /* defined(CONFIG_MACH_HYACINTH_0) || defined(CONFIG_MACH_HYACINTH_1) */ if (ambarella_timer_pm.timer_clk != AMBARELLA_TIMER_FREQ) { clockevents_calc_mult_shift(&ambarella_clkevt, AMBARELLA_TIMER_FREQ, 5); ambarella_clkevt.max_delta_ns = clockevent_delta2ns(0xffffffff, &ambarella_clkevt); ambarella_clkevt.min_delta_ns = clockevent_delta2ns(1, &ambarella_clkevt); switch (ambarella_clkevt.mode) { case CLOCK_EVT_MODE_PERIODIC: ambarella_ce_timer_set_periodic(); break; case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_RESUME: break; } #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) clocksource_change_rating(&ambarella_cs_timer_clksrc, 0); ambarella_cs_timer_clksrc.mult = clocksource_hz2mult( AMBARELLA_TIMER_FREQ, ambarella_cs_timer_clksrc.shift); pr_debug("%s: mult = %u, shift = %u\n", ambarella_cs_timer_clksrc.name, ambarella_cs_timer_clksrc.mult, ambarella_cs_timer_clksrc.shift); clocksource_change_rating(&ambarella_cs_timer_clksrc, AMBARELLA_TIMER_RATING); #if defined(CONFIG_HAVE_SCHED_CLOCK) setup_sched_clock(ambarella_read_sched_clock, 32, AMBARELLA_TIMER_FREQ); #endif #endif } #if !defined(CONFIG_MACH_HYACINTH_0) && !defined(CONFIG_MACH_HYACINTH_1) timer_ctr_mask = AMBARELLA_CE_TIMER_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_CTR_MASK; #endif amba_setbitsl(TIMER_CTR_REG, (ambarella_timer_pm.timer_ctr_reg & timer_ctr_mask)); if (level) enable_irq(AMBARELLA_CE_TIMER_IRQ); #else /* defined(CONFIG_MACH_HYACINTH_0) || defined(CONFIG_MACH_HYACINTH_1) */ if (machine_is_hyacinth_0()) { timer_ctr_mask = AMBARELLA_CE_TIMER_AXI0_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_AXI0_CTR_MASK; #endif amba_setbitsl(TIMER_CTR_REG, (ambarella_timer_pm.timer_ctr_reg & timer_ctr_mask)); if (level) enable_irq(AMBARELLA_CE_TIMER_AXI0_IRQ); } else if (machine_is_hyacinth_1()) { timer_ctr_mask = AMBARELLA_CE_TIMER_AXI1_CTR_MASK; #if defined(CONFIG_AMBARELLA_SUPPORT_CLOCKSOURCE) timer_ctr_mask |= AMBARELLA_CS_TIMER_AXI1_CTR_MASK; #endif amba_setbitsl(TIMER_CTR_REG, (ambarella_timer_pm.timer_ctr_reg & timer_ctr_mask)); if (level) enable_irq(AMBARELLA_CE_TIMER_AXI1_IRQ); } #endif /* defined(CONFIG_MACH_HYACINTH_0) || defined(CONFIG_MACH_HYACINTH_1) */ #if defined(CONFIG_SMP) //percpu_timer_update_rate(amb_get_axi_clock_frequency(HAL_BASE_VP)); #endif return 0; }
return; } err = request_percpu_irq(clk->irq, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", clk->irq, err); return; } } twd_calibrate_rate(); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; clk->shift = 20; clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); clk->min_delta_ns = clockevent_delta2ns(0xf, clk); clockevents_register_device(clk); /* Make sure our local interrupt controller has this enabled */ gic_enable_ppi(clk->irq); }
/* * Setup the local clock events for a CPU. */ int __cpuinit local_timer_setup(struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); struct kona_td kona_td; struct timer_ch_cfg config; /* * TICK_TIMER_NAME can be either "aon-timer" or "slave-timer". * * We are currently using "slave-timer" at 1 MHz for better timer * resolution and system performance */ kona_td = (struct kona_td)__get_cpu_var(percpu_kona_td); if (!kona_td.allocated) { kona_td.kona_timer = kona_timer_request(TICK_TIMER_NAME, TICK_TIMER_OFFSET + cpu); if (kona_td.kona_timer) { kona_td.allocated = true; } else { pr_err("%s: Failed to allocate %s channel %d as" "CPU %d local tick device\n", __func__, TICK_TIMER_NAME, TICK_TIMER_OFFSET + cpu, cpu); return -ENXIO; } } /* * In the future: The following codes should be one time configuration */ config.mode = MODE_ONESHOT; config.arg = evt; config.cb = kona_tick_interrupt_cb; kona_timer_config(kona_td.kona_timer, &config); irq_set_affinity(kona_td.kona_timer->irq, cpumask_of(cpu)); evt->name = "local_timer"; evt->cpumask = cpumask_of(cpu); evt->irq = kona_td.kona_timer->irq; evt->set_next_event = kona_tick_set_next_event; evt->set_mode = kona_tick_set_mode; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 250; evt->shift = 32; evt->mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, evt->shift); evt->max_delta_ns = clockevent_delta2ns(MAX_KONA_COUNT_CLOCK, evt); /* There is MIN_KONA_DELTA_CLOCK clock cycles delay in HUB Timer by * ASIC limitation. When min_delta_ns set N, real requested load value * in hrtimer becomes N - 1. So add 1 to be MIN_DELTA_CLOCK */ evt->min_delta_ns = clockevent_delta2ns(MIN_KONA_DELTA_CLOCK + 1, evt); per_cpu(percpu_kona_td, cpu) = kona_td; clockevents_register_device(evt); return 0; }
static void __init u300_timer_init(void) { u300_enable_timer_clock(); /* * Disable the "OS" and "DD" timers - these are designed for Symbian! * Example usage in cnh1601578 cpu subsystem pd_timer_app.c */ writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE, U300_TIMER_APP_VBASE + U300_TIMER_APP_CRC); writel(U300_TIMER_APP_ROST_TIMER_RESET, U300_TIMER_APP_VBASE + U300_TIMER_APP_ROST); writel(U300_TIMER_APP_DOST_TIMER_DISABLE, U300_TIMER_APP_VBASE + U300_TIMER_APP_DOST); writel(U300_TIMER_APP_RDDT_TIMER_RESET, U300_TIMER_APP_VBASE + U300_TIMER_APP_RDDT); writel(U300_TIMER_APP_DDDT_TIMER_DISABLE, U300_TIMER_APP_VBASE + U300_TIMER_APP_DDDT); /* Reset the General Purpose timer 1. */ writel(U300_TIMER_APP_RGPT1_TIMER_RESET, U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT1); /* Set up the IRQ handler */ setup_irq(IRQ_U300_TIMER_APP_GP1, &u300_timer_irq); /* Reset the General Purpose timer 2 */ writel(U300_TIMER_APP_RGPT2_TIMER_RESET, U300_TIMER_APP_VBASE + U300_TIMER_APP_RGPT2); /* Set this timer to run around forever */ writel(0xFFFFFFFFU, U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2TC); /* Set continuous mode so it wraps around */ writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS, U300_TIMER_APP_VBASE + U300_TIMER_APP_SGPT2M); /* Disable timer interrupts */ writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE, U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2IE); /* Then enable the GP2 timer to use as a free running us counter */ writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE, U300_TIMER_APP_VBASE + U300_TIMER_APP_EGPT2); /* This is a pure microsecond clock source */ clocksource_u300_1mhz.mult = clocksource_khz2mult(1000, clocksource_u300_1mhz.shift); if (clocksource_register(&clocksource_u300_1mhz)) printk(KERN_ERR "timer: failed to initialize clock " "source %s\n", clocksource_u300_1mhz.name); clockevent_u300_1mhz.mult = div_sc(1000000, NSEC_PER_SEC, clockevent_u300_1mhz.shift); /* 32bit counter, so 32bits delta is max */ clockevent_u300_1mhz.max_delta_ns = clockevent_delta2ns(0xffffffff, &clockevent_u300_1mhz); /* This timer is slow enough to set for 1 cycle == 1 MHz */ clockevent_u300_1mhz.min_delta_ns = clockevent_delta2ns(1, &clockevent_u300_1mhz); clockevent_u300_1mhz.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_u300_1mhz); /* * TODO: init and register the rest of the timers too, they can be * used by hrtimers! */ }
void __init tegra_init_timer(struct device_node *np) { struct clk *clk; int ret; unsigned long rate; struct resource res; if (of_address_to_resource(np, 0, &res)) { pr_err("%s:No memory resources found\n", __func__); return; } timer_reg_base = ioremap(res.start, resource_size(&res)); if (!timer_reg_base) { pr_err("%s:Can't map timer registers\n", __func__); BUG(); } timer_reg_base_pa = res.start; tegra_timer_irq.irq = irq_of_parse_and_map(np, 0); if (tegra_timer_irq.irq <= 0) { pr_err("%s:Failed to map timer IRQ\n", __func__); BUG(); } clk = of_clk_get(np, 0); if (IS_ERR(clk)) clk = clk_get_sys("timer", NULL); if (IS_ERR(clk)) { pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n"); rate = 12000000; } else { clk_prepare_enable(clk); rate = clk_get_rate(clk); } switch (rate) { case 12000000: timer_writel(0x000b, TIMERUS_USEC_CFG); break; case 12800000: timer_writel(0x043F, TIMERUS_USEC_CFG); break; case 13000000: timer_writel(0x000c, TIMERUS_USEC_CFG); break; case 19200000: timer_writel(0x045f, TIMERUS_USEC_CFG); break; case 26000000: timer_writel(0x0019, TIMERUS_USEC_CFG); break; #ifndef CONFIG_ARCH_TEGRA_2x_SOC case 16800000: timer_writel(0x0453, TIMERUS_USEC_CFG); break; case 38400000: timer_writel(0x04BF, TIMERUS_USEC_CFG); break; case 48000000: timer_writel(0x002F, TIMERUS_USEC_CFG); break; #endif default: if (tegra_platform_is_qt()) { timer_writel(0x000c, TIMERUS_USEC_CFG); break; } WARN(1, "Unknown clock rate"); } #ifdef CONFIG_PM_SLEEP hotplug_cpu_register(np); #endif of_node_put(np); #ifdef CONFIG_ARCH_TEGRA_2x_SOC tegra20_init_timer(); #else tegra30_init_timer(); #endif ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up); if (ret) { pr_err("%s: Failed to register clocksource: %d\n", __func__, ret); BUG(); } ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { pr_err("%s: Failed to register timer IRQ: %d\n", __func__, ret); BUG(); } clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5); tegra_clockevent.max_delta_ns = clockevent_delta2ns(0x1fffffff, &tegra_clockevent); tegra_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &tegra_clockevent); tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_register_device(&tegra_clockevent); #ifndef CONFIG_ARM64 #ifdef CONFIG_ARM_ARCH_TIMER /* Architectural timers take precedence over broadcast timers. Only register a broadcast clockevent device if architectural timers do not exist or cannot be initialized. */ if (tegra_init_arch_timer()) #endif /* Architectural timers do not exist or cannot be initialzied. Fall back to using the broadcast timer as the sched clock. */ setup_sched_clock(tegra_read_sched_clock, 32, 1000000); #endif register_syscore_ops(&tegra_timer_syscore_ops); #ifndef CONFIG_ARM64 late_time_init = tegra_init_late_timer; #endif //arm_delay_ops.delay = __tegra_delay; //arm_delay_ops.const_udelay = __tegra_const_udelay; //arm_delay_ops.udelay = __tegra_udelay; }
static struct device_t * ce_samsung_timer_probe(struct driver_t * drv, struct dtnode_t * n) { struct ce_samsung_timer_pdata_t * pdat; struct clockevent_t * ce; struct device_t * dev; virtual_addr_t virt = phys_to_virt(dt_read_address(n)); char * clk = dt_read_string(n, "clock-name", NULL); int irq = dt_read_int(n, "interrupt", -1); int channel = dt_read_int(n, "timer-channel", -1); u64_t rate; if(!search_clk(clk)) return NULL; if(!irq_is_valid(irq)) return NULL; if(channel < 0 || channel > 3) return NULL; pdat = malloc(sizeof(struct ce_samsung_timer_pdata_t)); if(!pdat) return NULL; ce = malloc(sizeof(struct clockevent_t)); if(!ce) { free(pdat); return NULL; } pdat->virt = virt; pdat->clk = strdup(clk); pdat->irq = irq; pdat->channel = channel; clk_enable(pdat->clk); rate = samsung_timer_calc_tin(pdat->virt, pdat->clk, pdat->channel, 107); clockevent_calc_mult_shift(ce, rate, 10); ce->name = alloc_device_name(dt_read_name(n), -1); ce->min_delta_ns = clockevent_delta2ns(ce, 0x1); ce->max_delta_ns = clockevent_delta2ns(ce, 0xffffffff); ce->next = ce_samsung_timer_next, ce->priv = pdat; if(!request_irq(pdat->irq, ce_samsung_timer_interrupt, IRQ_TYPE_NONE, ce)) { clk_disable(pdat->clk); free(pdat->clk); free(ce->priv); free(ce); return NULL; } samsung_timer_enable(pdat->virt, pdat->channel, 1); samsung_timer_count(pdat->virt, pdat->channel, 0); samsung_timer_stop(pdat->virt, pdat->channel); if(!register_clockevent(&dev, ce)) { samsung_timer_irq_clear(pdat->virt, pdat->channel); samsung_timer_stop(pdat->virt, pdat->channel); samsung_timer_disable(pdat->virt, pdat->channel); clk_disable(pdat->clk); free_irq(pdat->irq); free(pdat->clk); free_device_name(ce->name); free(ce->priv); free(ce); return NULL; } dev->driver = drv; return dev; }
static void __init tegra_init_timer(void) { struct clk *clk; unsigned long rate = clk_measure_input_freq(); int ret; clk = clk_get_sys("timer", NULL); BUG_ON(IS_ERR(clk)); clk_enable(clk); /* * rtc registers are used by read_persistent_clock, keep the rtc clock * enabled */ clk = clk_get_sys("rtc-tegra", NULL); BUG_ON(IS_ERR(clk)); clk_enable(clk); #ifdef CONFIG_HAVE_ARM_TWD twd_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x600); #endif switch (rate) { case 12000000: timer_writel(0x000b, TIMERUS_USEC_CFG); break; case 13000000: timer_writel(0x000c, TIMERUS_USEC_CFG); break; case 19200000: timer_writel(0x045f, TIMERUS_USEC_CFG); break; case 26000000: timer_writel(0x0019, TIMERUS_USEC_CFG); break; default: WARN(1, "Unknown clock rate"); } init_fixed_sched_clock(&cd, tegra_update_sched_clock, 32, 1000000, SC_MULT, SC_SHIFT); if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { printk(KERN_ERR "Failed to register clocksource\n"); BUG(); } ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { printk(KERN_ERR "Failed to register timer IRQ: %d\n", ret); BUG(); } clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5); tegra_clockevent.max_delta_ns = clockevent_delta2ns(0x1fffffff, &tegra_clockevent); tegra_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &tegra_clockevent); tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_register_device(&tegra_clockevent); }
/* * Clockevents init (sys timer) */ static void tick_tmr_init(void) { volatile struct stm32_tim_regs *tim; volatile u32 *rcc_enr, *rcc_rst; struct clock_event_device *evt = &tick_tmr_clockevent; /* The target total timer divider (including the prescaler) */ u32 div; /* The prescaler value will be (1 << psc_pwr) */ int psc_pwr; /* * If the timer is 16-bit, then (div >> psc_pwr) must not exceed * (2**16 - 1). */ div = tick_tmr_clk / HZ; psc_pwr = ilog2(div) - TICK_TIM_COUNTER_BITWIDTH + 1; if (psc_pwr < 0) psc_pwr = 0; /* * Setup reg bases */ tim = (struct stm32_tim_regs *)TICK_TIM_BASE; rcc_enr = (u32 *)TICK_TIM_RCC_ENR; rcc_rst = (u32 *)TICK_TIM_RCC_RST; /* * Enable timer clock, and deinit registers */ *rcc_enr |= TICK_TIM_RCC_MSK; *rcc_rst |= TICK_TIM_RCC_MSK; *rcc_rst &= ~TICK_TIM_RCC_MSK; /* * Select the counter mode: * - upcounter; * - auto-reload */ tim->cr1 = STM32_TIM_CR1_ARPE; tim->arr = (div >> psc_pwr); tim->psc = (1 << psc_pwr) - 1; /* * Generate an update event to reload the Prescaler value immediately */ tim->egr = STM32_TIM_EGR_UG; /* * Setup, and enable IRQ */ setup_irq(TICK_TIM_IRQ, &tick_tmr_irqaction); tim->dier |= STM32_TIM_DIER_UIE; /* * For system timer we don't provide set_next_event method, * so, I guess, setting mult, shift, max_delta_ns, min_delta_ns * makes no sense (I verified that kernel works well without these). * Nevertheless, some clocksource drivers with periodic-mode only do * this. So, let's set them to some values too. */ clockevents_calc_mult_shift(evt, tick_tmr_clk / HZ, 5); evt->max_delta_ns = clockevent_delta2ns(0xFFFFFFF0, evt); evt->min_delta_ns = clockevent_delta2ns(0xF, evt); clockevents_register_device(evt); }
static int __init l4x_timer_init_ret(void) { int r; l4lx_thread_t thread; int irq; L4XV_V(f); timer_irq_cap = l4x_cap_alloc(); if (l4_is_invalid_cap(timer_irq_cap)) { printk(KERN_ERR "l4timer: Failed to alloc\n"); return -ENOMEM; } r = L4XV_FN_i(l4_error(l4_factory_create_irq(l4re_env()->factory, timer_irq_cap))); if (r) { printk(KERN_ERR "l4timer: Failed to create irq: %d\n", r); goto out1; } if ((irq = l4x_register_irq(timer_irq_cap)) < 0) { r = -ENOMEM; goto out2; } printk("l4timer: Using IRQ%d\n", irq); setup_irq(irq, &l4timer_irq); L4XV_L(f); thread = l4lx_thread_create (timer_thread, /* thread function */ smp_processor_id(), /* cpu */ NULL, /* stack */ &timer_irq_cap, sizeof(timer_irq_cap), /* data */ l4x_cap_alloc(), /* cap */ PRIO_TIMER, /* prio */ 0, /* vcpup */ "timer", /* name */ NULL); L4XV_U(f); timer_srv = l4lx_thread_get_cap(thread); if (!l4lx_thread_is_valid(thread)) { printk(KERN_ERR "l4timer: Failed to create thread\n"); r = -ENOMEM; goto out3; } l4timer_clockevent.irq = irq; l4timer_clockevent.mult = div_sc(1000000, NSEC_PER_SEC, l4timer_clockevent.shift); l4timer_clockevent.max_delta_ns = clockevent_delta2ns(0xffffffff, &l4timer_clockevent); l4timer_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &l4timer_clockevent); l4timer_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&l4timer_clockevent); return 0; out3: l4x_unregister_irq(irq); out2: L4XV_FN_v(l4_task_delete_obj(L4RE_THIS_TASK_CAP, timer_irq_cap)); out1: l4x_cap_free(timer_irq_cap); return r; }