Ejemplo n.º 1
0
void __init register_current_timer_delay(const struct delay_timer *timer)
{
	u32 new_mult, new_shift;
	u64 res;

	clocks_calc_mult_shift(&new_mult, &new_shift, timer->freq,
			       NSEC_PER_SEC, 3600);
	res = cyc_to_ns(1ULL, new_mult, new_shift);

	if (res > 1000) {
		pr_err("Ignoring delay timer %ps, which has insufficient resolution of %lluns\n",
			timer, res);
		return;
	}

	if (!delay_calibrated && (!delay_res || (res < delay_res))) {
		pr_info("Switching to timer-based delay loop, resolution %lluns\n", res);
		delay_timer			= timer;
		lpj_fine			= timer->freq / HZ;
		delay_res			= res;

		/* cpufreq may scale loops_per_jiffy, so keep a private copy */
		arm_delay_ops.ticks_per_jiffy	= lpj_fine;
		arm_delay_ops.delay		= __timer_delay;
		arm_delay_ops.const_udelay	= __timer_const_udelay;
		arm_delay_ops.udelay		= __timer_udelay;
	} else {
		pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
	}
}
Ejemplo n.º 2
0
static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
	unsigned long long ns_now;
	struct cyc2ns_data data;
	struct cyc2ns *c2n;

	ns_now = cycles_2_ns(tsc_now);

	/*
	 * Compute a new multiplier as per the above comment and ensure our
	 * time function is continuous; see the comment near struct
	 * cyc2ns_data.
	 */
	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
			       NSEC_PER_MSEC, 0);

	/*
	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
	 * not expected to be greater than 31 due to the original published
	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
	 */
	if (data.cyc2ns_shift == 32) {
		data.cyc2ns_shift = 31;
		data.cyc2ns_mul >>= 1;
	}
Ejemplo n.º 3
0
void __init edison_init_timer(void)
{

#if 1 //calculate mult and shift for sched_clock 
    u32 shift,mult;
#endif


#ifdef CONFIG_MSTAR_EDISON_BD_FPGA
    GLB_TIMER_FREQ_KHZ= 24*1000 ;              // PERIPHCLK = CPU Clock / 2,   
                                           // div 2 later,when CONFIG_GENERIC_CLOCKEVENTS
                                           // clock event will handle this value
//clock event will handle this value     
#else
    GLB_TIMER_FREQ_KHZ=(query_frequency()*1000/2); // PERIPHCLK = CPU Clock / 2  
                                             // div 2 later,when CONFIG_GENERIC_CLOCKEVENTS
                                             // clock event will handle this value
#endif 

    printk("Global Timer Frequency = %d MHz\n",GLB_TIMER_FREQ_KHZ/1000);
    printk("CPU Clock Frequency = %d MHz\n",query_frequency());


#ifdef CONFIG_HAVE_SCHED_CLOCK

#if 1 //calculate mult and shift for sched_clock 
    clocks_calc_mult_shift(&mult, &shift, (u32)(GLB_TIMER_FREQ_KHZ*1000), NSEC_PER_SEC,0);
    printk("fre = %d, mult= %u, shift= %u\n",(GLB_TIMER_FREQ_KHZ*1000),mult,shift);
    SC_SHIFT=shift;
    SC_MULT=mult;
#endif
    mstar_sched_clock_init((void __iomem *)(PERI_VIRT+0x200), (unsigned long)(GLB_TIMER_FREQ_KHZ*1000));
#endif

#ifdef CONFIG_GENERIC_CLOCKEVENTS

	//mstar_local_timer_init(((void __iomem *)PERI_ADDRESS(PERI_PHYS+0x600)));  //private_timer base
	edison_clocksource_init(EDISON_BASE_REG_TIMER1_PA);
	edison_clockevents_init(INT_WDT_IRQ);

#else
  
setup_irq(E_FIQ_EXTIMER0 , &edison_timer_irq);

//enable timer interrupt
SETREG16(EDISON_BASE_REG_TIMER0_PA,TIMER_INTERRUPT);

//set interval
interval = ( 12*1000*1000  ) / HZ  ;

OUTREG16(EDISON_BASE_REG_TIMER0_PA + ADDR_TIMER_MAX_LOW, (interval & 0xffff));
OUTREG16(EDISON_BASE_REG_TIMER0_PA + ADDR_TIMER_MAX_HIGH, (interval >>16));

//trig timer0
SETREG16(EDISON_BASE_REG_TIMER0_PA, TIMER_TRIG);

#endif

}
Ejemplo n.º 4
0
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
	unsigned long r, w;
	u64 res, wrap;
	char r_unit;

	if (cd.rate > rate)
		return;

	BUG_ON(bits > 32);
	WARN_ON(!irqs_disabled());
	read_sched_clock = read;
	sched_clock_mask = (1 << bits) - 1;
	cd.rate = rate;

	/* calculate the mult/shift to convert counter ticks to ns. */
	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);

	r = rate;
	if (r >= 4000000) {
		r /= 1000000;
		r_unit = 'M';
	} else if (r >= 1000) {
		r /= 1000;
		r_unit = 'k';
	} else
		r_unit = ' ';

	/* calculate how many ns until we wrap */
	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
	do_div(wrap, NSEC_PER_MSEC);
	w = wrap;

	/* calculate the ns resolution of this counter */
	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
		bits, r, r_unit, res, w);

	/*
	 * Start the timer to keep sched_clock() properly updated and
	 * sets the initial epoch.
	 */
	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
	update_sched_clock();

	/*
	 * Ensure that sched_clock() starts off at 0ns
	 */
	cd.epoch_ns = 0;

	/* Enable IRQ time accounting if we have a fast enough sched_clock */
	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
		enable_sched_clock_irqtime();

	pr_debug("Registered %pF as sched_clock source\n", read);
}
Ejemplo n.º 5
0
static int clocksource_init(void)
{
	/* reset time base */
	asm ("li 3,0 ; mttbu 3 ; mttbl 3 ;");

	clocks_calc_mult_shift(&cs.mult, &cs.shift,
				fsl_get_timebase_clock(), NSEC_PER_SEC, 10);

	return init_clock(&cs);
}
Ejemplo n.º 6
0
int __init omap_init_clocksource_32k(void)
{
	static char err[] __initdata = KERN_ERR
			"%s: can't register clocksource!\n";

	if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
		u32 pbase;
		unsigned long size = SZ_4K;
		void __iomem *base;
		struct clk *sync_32k_ick;

		if (cpu_is_omap16xx()) {
			pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
			size = SZ_1K;
		} else if (cpu_is_omap2420())
			pbase = OMAP2420_32KSYNCT_BASE + 0x10;
		else if (cpu_is_omap2430())
			pbase = OMAP2430_32KSYNCT_BASE + 0x10;
		else if (cpu_is_omap34xx())
			pbase = OMAP3430_32KSYNCT_BASE + 0x10;
		else if (cpu_is_omap44xx())
			pbase = OMAP4430_32KSYNCT_BASE + 0x10;
		else if (cpu_is_omap54xx())
			pbase = OMAP54XX_32KSYNCT_BASE + 0x30;
		else
			return -ENODEV;

		/* For this to work we must have a static mapping in io.c for this area */
		base = ioremap(pbase, size);
		if (!base)
			return -ENODEV;

		sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
		if (!IS_ERR(sync_32k_ick))
			clk_enable(sync_32k_ick);

		timer_32k_base = base;

		/*
		 * 120000 rough estimate from the calculations in
		 * __clocksource_updatefreq_scale.
		 */
		clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
				32768, NSEC_PER_SEC, 120000);

		if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
					  clocksource_mmio_readl_up))
			printk(err, "32k_counter");

		setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
	}
	return 0;
}
Ejemplo n.º 7
0
static void __init vmware_sched_clock_setup(void)
{
	struct cyc2ns_data *d = &vmware_cyc2ns;
	unsigned long long tsc_now = rdtsc();

	clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
			       vmware_tsc_khz, NSEC_PER_MSEC, 0);
	d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
					   d->cyc2ns_shift);

	pv_time_ops.sched_clock = vmware_sched_clock;
	pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
}
Ejemplo n.º 8
0
static int uemd_timer_probe(struct device_d *dev)
{
	int mode;
	struct clk *timer_clk;

	/* use only one timer */
	if (timer_base)
		return -EBUSY;

	timer_base = dev_request_mem_region(dev, 0);
	if (IS_ERR(timer_base)) {
		dev_err(dev, "could not get memory region\n");
		return PTR_ERR(timer_base);
	}

	timer_clk = clk_get(dev, NULL);
	if (IS_ERR(timer_clk)) {
		int ret = PTR_ERR(timer_clk);
		dev_err(dev, "clock not found: %d\n", ret);
		return ret;
	}

	/* Stop timer */
	__raw_writel(0, timer_base + TIMER_CONTROL);

	/* Setup */
	__raw_writel(0xffffffff, timer_base + TIMER_LOAD);
	__raw_writel(0xffffffff, timer_base + TIMER_VALUE);

	mode =	TIMER_CTRL_32BIT	|
		TIMER_CTRL_PERIODIC	|
		TIMER_CTRL_P1;
	__raw_writel(mode, timer_base + TIMER_CONTROL);

	/* Fire it up! */
	mode |= TIMER_CTRL_ENABLE;
	__raw_writel(mode, timer_base + TIMER_CONTROL);

	clocks_calc_mult_shift(&uemd_cs.mult, &uemd_cs.shift,
		clk_get_rate(timer_clk), NSEC_PER_SEC, 10);

	init_clock(&uemd_cs);

	return 0;
}
Ejemplo n.º 9
0
void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
	unsigned int clock_bits, unsigned long rate)
{
	unsigned long r, w;
	u64 res, wrap;
	char r_unit;

	sched_clock_update_fn = update;

	/* calculate the mult/shift to convert counter ticks to ns. */
	clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);

	r = rate;
	if (r >= 4000000) {
		r /= 1000000;
		r_unit = 'M';
	} else {
		r /= 1000;
		r_unit = 'k';
	}

	/* calculate how many ns until we wrap */
	wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
	do_div(wrap, NSEC_PER_MSEC);
	w = wrap;

	/* calculate the ns resolution of this counter */
	res = cyc_to_ns(1ULL, cd->mult, cd->shift);
	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
		clock_bits, r, r_unit, res, w);

	/*
	 * Start the timer to keep sched_clock() properly updated and
	 * sets the initial epoch.
	 */
	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
	update();

	/*
	 * Ensure that sched_clock() starts off at 0ns
	 */
	cd->epoch_ns = 0;
}
Ejemplo n.º 10
0
static int clocksource_init(void)
{
	clocks_calc_mult_shift(&jz4750_cs.mult, &jz4750_cs.shift,
		JZ_TIMER_CLOCK, NSEC_PER_SEC, 10);

	init_clock(&jz4750_cs);

	__raw_writel(TCU_OSTCSR_PRESCALE1 | TCU_OSTCSR_EXT_EN,
		(void *)TCU_OSTCSR);
	__raw_writel(0, (void *)TCU_OSTCNT);
	__raw_writel(0xffffffff, (void *)TCU_OSTDR);

	/* enable timer clock */
	__raw_writel(TCU_TSCR_OSTSC, (void *)TCU_TSCR);
	/* start counting up */
	__raw_writel(TCU_TESR_OSTST, (void *)TCU_TESR);

	return 0;
}
Ejemplo n.º 11
0
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
	unsigned long r, w;
	u64 res, wrap;
	char r_unit;

	BUG_ON(bits > 32);
	WARN_ON(!irqs_disabled());
	WARN_ON(read_sched_clock != jiffy_sched_clock_read);
	read_sched_clock = read;
	sched_clock_mask = (1 << bits) - 1;

	
	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);

	r = rate;
	if (r >= 4000000) {
		r /= 1000000;
		r_unit = 'M';
	} else if (r >= 1000) {
		r /= 1000;
		r_unit = 'k';
	} else
		r_unit = ' ';

	
	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
	do_div(wrap, NSEC_PER_MSEC);
	w = wrap;

	
	res = cyc_to_ns(1ULL, cd.mult, cd.shift);
	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
		bits, r, r_unit, res, w);

	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
	update_sched_clock();

	cd.epoch_ns = 0;

	pr_debug("Registered %pF as sched_clock source\n", read);
}
Ejemplo n.º 12
0
/**
 * omap_init_clocksource_32k - setup and register counter 32k as a
 * kernel clocksource
 * @pbase: base addr of counter_32k module
 * @size: size of counter_32k to map
 *
 * Returns 0 upon success or negative error code upon failure.
 *
 */
int __init omap_init_clocksource_32k(void __iomem *vbase)
{
	int ret;

	/*
	 * 32k sync Counter IP register offsets vary between the
	 * highlander version and the legacy ones.
	 * The 'SCHEME' bits(30-31) of the revision register is used
	 * to identify the version.
	 */
	if (__raw_readl(vbase + OMAP2_32KSYNCNT_REV_OFF) &
						OMAP2_32KSYNCNT_REV_SCHEME)
		sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_HIGH;
	else
		sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_LOW;

	/*
	 * 120000 rough estimate from the calculations in
	 * __clocksource_updatefreq_scale.
	 */
	clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
			32768, NSEC_PER_SEC, 120000);

	ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
				250, 32, clocksource_mmio_readl_up);
	if (ret) {
		pr_err("32k_counter: can't register clocksource\n");
		return ret;
	}

	setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
	register_persistent_clock(NULL, omap_read_persistent_clock);
	pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");

	return 0;
}