Beispiel #1
0
static void mark_offset_hpet(void)
{
	unsigned long long this_offset, last_offset;
	unsigned long offset;

	write_seqlock(&monotonic_lock);
	last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	rdtsc(last_tsc_low, last_tsc_high);

	if (hpet_use_timer)
		offset = hpet_readl(HPET_T0_CMP) - hpet_tick_real;
	else
		offset = hpet_readl(HPET_COUNTER);
	if (unlikely(((offset - hpet_last) >= (2*hpet_tick_real))
	    && (hpet_last != 0))) {
		int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1;
		jiffies_64 += lost_ticks;
	}
	hpet_last = offset;

	/* update the monotonic base value */
	this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	monotonic_base += cycles_2_ns(this_offset - last_offset);
	write_sequnlock(&monotonic_lock);
}
Beispiel #2
0
/*
 * Timer 1 for RTC, we do not use periodic interrupt feature,
 * even if HPET supports periodic interrupts on Timer 1.
 * The reason being, to set up a periodic interrupt in HPET, we need to
 * stop the main counter. And if we do that everytime someone diables/enables
 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
 * So, for the time being, simulate the periodic interrupt in software.
 *
 * hpet_rtc_timer_init() is called for the first time and during subsequent
 * interuppts reinit happens through hpet_rtc_timer_reinit().
 */
int hpet_rtc_timer_init(void)
{
	unsigned int cfg, cnt;
	unsigned long flags;

	if (!is_hpet_enabled())
		return 0;
	/*
	 * Set the counter 1 and enable the interrupts.
	 */
	if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
		hpet_rtc_int_freq = PIE_freq;
	else
		hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;

	local_irq_save(flags);

	cnt = hpet_readl(HPET_COUNTER);
	cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
	hpet_writel(cnt, HPET_T1_CMP);
	hpet_t1_cmp = cnt;

	cfg = hpet_readl(HPET_T1_CFG);
	cfg &= ~HPET_TN_PERIODIC;
	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
	hpet_writel(cfg, HPET_T1_CFG);

	local_irq_restore(flags);

	return 1;
}
Beispiel #3
0
static void delay_hpet(unsigned long loops)
{
	unsigned long hpet_start, hpet_end;
	unsigned long eax;

	/* loops is the number of cpu cycles. Convert it to hpet clocks */
	ASM_MUL64_REG(eax, loops, tsc_hpet_quotient, loops);

	hpet_start = hpet_readl(HPET_COUNTER);
	do {
		rep_nop();
		hpet_end = hpet_readl(HPET_COUNTER);
	} while ((hpet_end - hpet_start) < (loops));
}
static void mark_offset_tsc_hpet(void)
{
	unsigned long long this_offset, last_offset;
 	unsigned long offset, temp, hpet_current;

	write_seqlock(&monotonic_lock);
	last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	/*
	 * It is important that these two operations happen almost at
	 * the same time. We do the RDTSC stuff first, since it's
	 * faster. To avoid any inconsistencies, we need interrupts
	 * disabled locally.
	 */
	/*
	 * Interrupts are just disabled locally since the timer irq
	 * has the SA_INTERRUPT flag set. -arca
	 */
	/* read Pentium cycle counter */

	hpet_current = hpet_readl(HPET_COUNTER);
	rdtsc(last_tsc_low, last_tsc_high);

	/* lost tick compensation */
	offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
	if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
					&& detect_lost_ticks) {
		int lost_ticks = (offset - hpet_last) / hpet_tick;
		jiffies_64 += lost_ticks;
	}
	hpet_last = hpet_current;

	/* update the monotonic base value */
	this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
	monotonic_base += cycles_2_ns(this_offset - last_offset);
	write_sequnlock(&monotonic_lock);

	/* calculate delay_at_last_interrupt */
	/*
	 * Time offset = (hpet delta) * ( usecs per HPET clock )
	 *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
	 *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
	 * Where,
	 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
	 */
	delay_at_last_interrupt = hpet_current - offset;
	ASM_MUL64_REG(temp, delay_at_last_interrupt,
			hpet_usec_quotient, delay_at_last_interrupt);
}
Beispiel #5
0
int hpet_timer_stop_set_go(unsigned long tick)
{
	unsigned int cfg;

/*
 * Stop the timers and reset the main counter.
 */

	cfg = hpet_readl(HPET_CFG);
	cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
	hpet_writel(cfg, HPET_CFG);
	hpet_writel(0, HPET_COUNTER);
	hpet_writel(0, HPET_COUNTER + 4);

/*
 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
 * and period also hpet_tick.
 */
	if (hpet_use_timer) {
		hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
		    HPET_TN_32BIT, HPET_T0_CFG);
		hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
		hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
		cfg |= HPET_CFG_LEGACY;
	}
/*
 * Go!
 */

	cfg |= HPET_CFG_ENABLE;
	hpet_writel(cfg, HPET_CFG);

	return 0;
}
Beispiel #6
0
static void hpet_rtc_timer_reinit(void)
{
	unsigned int cfg, cnt, ticks_per_int, lost_ints;

	if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
		cfg = hpet_readl(HPET_T1_CFG);
		cfg &= ~HPET_TN_ENABLE;
		hpet_writel(cfg, HPET_T1_CFG);
		return;
	}

	if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
		hpet_rtc_int_freq = PIE_freq;
	else
		hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;

	/* It is more accurate to use the comparator value than current count.*/
	ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
	hpet_t1_cmp += ticks_per_int;
	hpet_writel(hpet_t1_cmp, HPET_T1_CMP);

	/*
	 * If the interrupt handler was delayed too long, the write above tries
	 * to schedule the next interrupt in the past and the hardware would
	 * not interrupt until the counter had wrapped around.
	 * So we have to check that the comparator wasn't set to a past time.
	 */
	cnt = hpet_readl(HPET_COUNTER);
	if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
		lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
		/* Make sure that, even with the time needed to execute
		 * this code, the next scheduled interrupt has been moved
		 * back to the future: */
		lost_ints++;

		hpet_t1_cmp += lost_ints * ticks_per_int;
		hpet_writel(hpet_t1_cmp, HPET_T1_CMP);

		if (PIE_on)
			PIE_count += lost_ints;

		if (printk_ratelimit())
			printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
			       hpet_rtc_int_freq);
	}
}
Beispiel #7
0
int hpet_arch_init(void)
{
	unsigned int id;
	u64 tmp;

	if (!hpet_address)
		return -1;
	set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
	__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);

/*
 * Read the period, compute tick and quotient.
 */

	id = hpet_readl(HPET_ID);

	if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
		return -1;

	hpet_period = hpet_readl(HPET_PERIOD);
	if (hpet_period < 100000 || hpet_period > 100000000)
		return -1;

	hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;

	hpet_use_timer = (id & HPET_ID_LEGSUP);

	/*
	 * hpet period is in femto seconds per cycle
	 * so we need to convert this to ns/cyc units
	 * aproximated by mult/2^shift
	 *
	 *  fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
	 *  fsec/cyc * 1ns/1000000fsec * 2^shift = mult
	 *  fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
	 *  (fsec/cyc << shift)/1000000 = mult
	 *  (hpet_period << shift)/FSEC_PER_NSEC = mult
	 */
	tmp = (u64)hpet_period << HPET_SHIFT;
	do_div(tmp, FSEC_PER_NSEC);
	clocksource_hpet.mult = (u32)tmp;
	clocksource_register(&clocksource_hpet);

	return hpet_timer_stop_set_go(hpet_tick);
}
Beispiel #8
0
/*
 * Calculate the TSC frequency from HPET reference
 */
static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
{
	u64 tmp;

	if (hpet2 < hpet1)
		hpet2 += 0x100000000ULL;
	hpet2 -= hpet1;
	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
	do_div(tmp, 1000000);
	do_div(deltatsc, tmp);

	return (unsigned long) deltatsc;
}
static void setup_APIC_timer(unsigned int clocks)
{
	unsigned long flags;

	local_irq_save(flags);

	/* wait for irq slice */
 	if (vxtime.hpet_address && hpet_use_timer) {
 		int trigger = hpet_readl(HPET_T0_CMP);
 		while (hpet_readl(HPET_COUNTER) >= trigger)
 			/* do nothing */ ;
 		while (hpet_readl(HPET_COUNTER) <  trigger)
 			/* do nothing */ ;
 	} else {
		int c1, c2;
		outb_p(0x00, 0x43);
		c2 = inb_p(0x40);
		c2 |= inb_p(0x40) << 8;
		do {
			c1 = c2;
			outb_p(0x00, 0x43);
			c2 = inb_p(0x40);
			c2 |= inb_p(0x40) << 8;
		} while (c2 - c1 < 300);
	}
	__setup_APIC_LVTT(clocks);
	/* Turn off PIT interrupt if we use APIC timer as main timer.
	   Only works with the PM timer right now
	   TBD fix it for HPET too. */
	if (vxtime.mode == VXTIME_PMTMR &&
		smp_processor_id() == boot_cpu_id &&
		apic_runs_main_timer == 1 &&
		!cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
		stop_timer_interrupt();
		apic_runs_main_timer++;
	}
	local_irq_restore(flags);
}
Beispiel #10
0
/*
 * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
 * occurs between the reads of the hpet & TSC.
 */
static void __init read_hpet_tsc(int *hpet, int *tsc)
{
	int tsc1, tsc2, hpet1, i;

	for (i = 0; i < MAX_TRIES; i++) {
		tsc1 = get_cycles_sync();
		hpet1 = hpet_readl(HPET_COUNTER);
		tsc2 = get_cycles_sync();
		if (tsc2 - tsc1 > TICK_MIN)
			break;
	}
	*hpet = hpet1;
	*tsc = tsc2;
}
Beispiel #11
0
static void setup_APIC_timer(unsigned int clocks)
{
	unsigned long flags;

	local_irq_save(flags);

	/* For some reasons this doesn't work on Simics, so fake it for now */ 
	if (!strstr(boot_cpu_data.x86_model_id, "Screwdriver")) { 
	__setup_APIC_LVTT(clocks);
		return;
	} 

	/* wait for irq slice */
 	if (vxtime.hpet_address) {
 		int trigger = hpet_readl(HPET_T0_CMP);
 		while (hpet_readl(HPET_COUNTER) >= trigger)
 			/* do nothing */ ;
 		while (hpet_readl(HPET_COUNTER) <  trigger)
 			/* do nothing */ ;
 	} else {
		int c1, c2;
		outb_p(0x00, 0x43);
		c2 = inb_p(0x40);
		c2 |= inb_p(0x40) << 8;
	do {
			c1 = c2;
			outb_p(0x00, 0x43);
			c2 = inb_p(0x40);
			c2 |= inb_p(0x40) << 8;
		} while (c2 - c1 < 300);
	}

	__setup_APIC_LVTT(clocks);

	local_irq_restore(flags);
}
Beispiel #12
0
/*
 * Read TSC and the reference counters. Take care of SMI disturbance
 */
static u64 tsc_read_refs(u64 *p, int hpet)
{
	u64 t1, t2;
	int i;

	for (i = 0; i < MAX_RETRIES; i++) {
		t1 = get_cycles();
		if (hpet)
			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
		else
			*p = acpi_pm_read_early();
		t2 = get_cycles();
		if ((t2 - t1) < SMI_TRESHOLD)
			return t2;
	}
	return ULLONG_MAX;
}
Beispiel #13
0
static __init int late_hpet_init(void)
{
	struct hpet_data	hd;
	unsigned int 		ntimer;

	if (!hpet_address)
        	return 0;

	memset(&hd, 0, sizeof(hd));

	ntimer = hpet_readl(HPET_ID);
	ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
	ntimer++;

	/*
	 * Register with driver.
	 * Timer0 and Timer1 is used by platform.
	 */
	hd.hd_phys_address = hpet_address;
	hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
	hd.hd_nirqs = ntimer;
	hd.hd_flags = HPET_DATA_PLATFORM;
	hpet_reserve_timer(&hd, 0);
#ifdef	CONFIG_HPET_EMULATE_RTC
	hpet_reserve_timer(&hd, 1);
#endif
	hd.hd_irq[0] = HPET_LEGACY_8254;
	hd.hd_irq[1] = HPET_LEGACY_RTC;
	if (ntimer > 2) {
		struct hpet		*hpet;
		struct hpet_timer	*timer;
		int			i;

		hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
		timer = &hpet->hpet_timers[2];
		for (i = 2; i < ntimer; timer++, i++)
			hd.hd_irq[i] = (timer->hpet_config &
					Tn_INT_ROUTE_CNF_MASK) >>
				Tn_INT_ROUTE_CNF_SHIFT;

	}

	hpet_alloc(&hd);
	return 0;
}
Beispiel #14
0
static unsigned long get_offset_hpet(void)
{
	register unsigned long eax, edx;

	eax = hpet_readl(HPET_COUNTER);
	eax -= hpet_last;	/* hpet delta */
	eax = min(hpet_tick, eax);
	/*
         * Time offset = (hpet delta) * ( usecs per HPET clock )
	 *             = (hpet delta) * ( usecs per tick / HPET clocks per tick)
	 *             = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
	 *
	 * Where,
	 * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
	 *
	 * Using a mull instead of a divl saves some cycles in critical path.
         */
	ASM_MUL64_REG(eax, edx, hpet_usec_quotient, eax);

	/* our adjusted time offset in microseconds */
	return edx;
}
Beispiel #15
0
void setup_APIC_timer(void * data)
{
	unsigned int clocks = (unsigned long) data, slice, t0, t1;
	int delta;

	/*
	 * ok, Intel has some smart code in their APIC that knows
	 * if a CPU was in 'hlt' lowpower mode, and this increases
	 * its APIC arbitration priority. To avoid the external timer
	 * IRQ APIC event being in synchron with the APIC clock we
	 * introduce an interrupt skew to spread out timer events.
	 *
	 * The number of slices within a 'big' timeslice is smp_num_cpus+1
	 */

	slice = clocks / (smp_num_cpus+1);
	printk(KERN_INFO "cpu: %d, clocks: %d, slice: %d\n",
		smp_processor_id(), clocks, slice);

	/*
	 * Wait for timer IRQ slice:
	 */

	if (hpet.address) {
		int trigger = hpet_readl(HPET_T0_CMP);
		while (hpet_readl(HPET_COUNTER) >= trigger);
		while (hpet_readl(HPET_COUNTER) <  trigger);
	} else {
		int c1, c2;
		outb_p(0x00, 0x43);
		c2 = inb_p(0x40);
		c2 |= inb_p(0x40) << 8;
		do {
			c1 = c2;
			outb_p(0x00, 0x43);
			c2 = inb_p(0x40);
			c2 |= inb_p(0x40) << 8;
		} while (c2 - c1 < 300);
	}

	__setup_APIC_LVTT(clocks);

	t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;

	/* Wait till TMCCT gets reloaded from TMICT... */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta >= 0);

	/* Now wait for our slice for real. */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta < 0);

	__setup_APIC_LVTT(clocks);

	printk(KERN_INFO "CPU%d<T0:%u,T1:%u,D:%d,S:%u,C:%u>\n",
			smp_processor_id(), t0, t1, delta, slice, clocks);
}
Beispiel #16
0
static cycle_t read_hpet(void)
{
	return (cycle_t)hpet_readl(HPET_COUNTER);
}