Esempio n. 1
0
static void mark_offset_hpet(void)
{
	unsigned long long this_offset, last_offset;
	unsigned long offset;

	write_seqlock(&monotonic_lock);
	last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	rdtsc(last_tsc_low, last_tsc_high);

	if (hpet_use_timer)
		offset = hpet_readl(HPET_T0_CMP) - hpet_tick_real;
	else
		offset = hpet_readl(HPET_COUNTER);
	if (unlikely(((offset - hpet_last) >= (2*hpet_tick_real))
	    && (hpet_last != 0))) {
		int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
		jiffies_64 += lost_ticks;
	}
	hpet_last = offset;

	/* update the monotonic base value */
	this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	monotonic_base += cycles_2_ns(this_offset - last_offset);
	write_sequnlock(&monotonic_lock);
}
Esempio n. 2
0
unsigned long long sched_clock(void)
{
	unsigned long long ticks64;
	unsigned long ticks2, ticks1;

	if (!iomapped)
		return jiffies_64*(1000000000UL/HZ);

	ticks2 = 0UL - (unsigned long) readl(clksrc_base + TIMER_VALUE);

	do {
		ticks1 = ticks2;
		ticks64 = free_timer_overflows;
		ticks2 = 0UL - (unsigned long) readl(clksrc_base + TIMER_VALUE);
	} while (ticks1 > ticks2);

	/*
	** If INT is not cleaned, means the function is called with irq_save.
	** The ticks has overflow but 'free_timer_overflows' is not be update.
	*/
	if (readl(clksrc_base + TIMER_MIS)) {
		ticks64 += 1;
		ticks2 = 0UL - (unsigned long) readl(clksrc_base + TIMER_VALUE);
	}

	return cycles_2_ns((ticks64 << 32) | ticks2);
}
Esempio n. 3
0
static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
	unsigned long long ns_now;
	struct cyc2ns_data data;
	struct cyc2ns *c2n;

	ns_now = cycles_2_ns(tsc_now);

	/*
	 * Compute a new multiplier as per the above comment and ensure our
	 * time function is continuous; see the comment near struct
	 * cyc2ns_data.
	 */
	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
			       NSEC_PER_MSEC, 0);

	/*
	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
	 * not expected to be greater than 31 due to the original published
	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
	 */
	if (data.cyc2ns_shift == 32) {
		data.cyc2ns_shift = 31;
		data.cyc2ns_mul >>= 1;
	}
Esempio n. 4
0
unsigned long omap_mpu_timer_ticks_to_usecs(unsigned long nr_ticks)
{
	unsigned long long nsec;

	nsec = cycles_2_ns((unsigned long long)nr_ticks);
	return (unsigned long)nsec / 1000;
}
Esempio n. 5
0
/*
 * Scheduler clock - returns current time in nanosec units.
 */
unsigned long long sched_clock(void)
{
	unsigned long ticks = 0 - omap_mpu_timer_read(0);
	unsigned long long ticks64;

	ticks64 = omap_mpu_timer1_overflows;
	ticks64 <<= 32;
	ticks64 |= ticks;

	return cycles_2_ns(ticks64);
}
static void mark_offset_tsc_hpet(void)
{
	unsigned long long this_offset, last_offset;
 	unsigned long offset, temp, hpet_current;

	write_seqlock(&monotonic_lock);
	last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	/*
	 * It is important that these two operations happen almost at
	 * the same time. We do the RDTSC stuff first, since it's
	 * faster. To avoid any inconsistencies, we need interrupts
	 * disabled locally.
	 */
	/*
	 * Interrupts are just disabled locally since the timer irq
	 * has the SA_INTERRUPT flag set. -arca
	 */
	/* read Pentium cycle counter */

	hpet_current = hpet_readl(HPET_COUNTER);
	rdtsc(last_tsc_low, last_tsc_high);

	/* lost tick compensation */
	offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
	if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
					&& detect_lost_ticks) {
		int lost_ticks = (offset - hpet_last) / hpet_tick;
		jiffies_64 += lost_ticks;
	}
	hpet_last = hpet_current;

	/* update the monotonic base value */
	this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	monotonic_base += cycles_2_ns(this_offset - last_offset);
	write_sequnlock(&monotonic_lock);

	/* calculate delay_at_last_interrupt */
	/*
	 * Time offset = (hpet delta) * ( usecs per HPET clock )
	 *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
	 *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
	 * Where,
	 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
	 */
	delay_at_last_interrupt = hpet_current - offset;
	ASM_MUL64_REG(temp, delay_at_last_interrupt,
			hpet_usec_quotient, delay_at_last_interrupt);
}
Esempio n. 7
0
/*
 * Scheduler clock - returns current time in nanosec units.
 */
unsigned long long sched_clock(void)
{
	unsigned long long this_offset;

	/*
	 * Fall back to jiffies if there's no TSC available:
	 */
	if (tsc_unstable || unlikely(tsc_disable))
		/* No locking but a rare wrong value is not a big deal: */
		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);

	/* read the Time Stamp Counter: */
	get_scheduled_cycles(this_offset);

	/* return the value in ns */
	return cycles_2_ns(this_offset);
}
Esempio n. 8
0
static unsigned long long monotonic_clock_hpet(void)
{
	unsigned long long last_offset, this_offset, base;
	unsigned seq;

	/* atomically read monotonic base & last_offset */
	do {
		seq = read_seqbegin(&monotonic_lock);
		last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
		base = monotonic_base;
	} while (read_seqretry(&monotonic_lock, seq));

	/* Read the Time Stamp Counter */
	rdtscll(this_offset);

	/* return the value in ns */
	return base + cycles_2_ns(this_offset - last_offset);
}
/*
 * Scheduler clock - returns current time in nanosec units.
 */
unsigned long long sched_clock(void)
{
	unsigned long long this_offset;

	/*
	 * In the NUMA case we dont use the TSC as they are not
	 * synchronized across all CPUs.
	 */
#ifndef CONFIG_NUMA
	if (!use_tsc)
#endif
		/* no locking but a rare wrong value is not a big deal */
		return jiffies_64 * (1000000000 / HZ);

	/* Read the Time Stamp Counter */
	rdtscll(this_offset);

	/* return the value in ns */
	return cycles_2_ns(this_offset);
}
Esempio n. 10
0
/* paravirt_ops.sched_clock = vmi_sched_clock */
unsigned long long vmi_sched_clock(void)
{
	return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
}
Esempio n. 11
0
unsigned long long sched_clock(void)
{
	return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles));
}
Esempio n. 12
0
unsigned long long sched_clock(void)
{
    return cycles_2_ns(read_cycles());
}