void
mips3_clockintr(struct clockframe *cfp)
{
	struct cpu_info * const ci = curcpu();
	uint32_t new_cnt;

	ci->ci_ev_count_compare.ev_count++;

	KASSERT((ci->ci_cycles_per_hz & ~(0xffffffff)) == 0);
	ci->ci_next_cp0_clk_intr += (uint32_t)(ci->ci_cycles_per_hz & 0xffffffff);
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);

	/* Check for lost clock interrupts */
	new_cnt = mips3_cp0_count_read();

	/* 
	 * Missed one or more clock interrupts, so let's start 
	 * counting again from the current value.
	 */
	if ((ci->ci_next_cp0_clk_intr - new_cnt) & 0x80000000) {

		ci->ci_next_cp0_clk_intr = new_cnt + curcpu()->ci_cycles_per_hz;
		mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
		curcpu()->ci_ev_count_compare_missed.ev_count++;
	}

	/*
	 * Since hardclock is at the end, we can invoke it by a tailcall.
	 */

	hardclock(cfp);
	/* caller should renable clock interrupts */
}
Exemplo n.º 2
0
static uint32_t
pwmclock_wait_edge(struct pwmclock_softc *sc)
{
	/* clear interrupt */
	bus_space_write_4(sc->sc_memt, sc->sc_regh, SM502_PWM1, sc->sc_reg);
	while ((bus_space_read_4(sc->sc_memt, sc->sc_regh, SM502_PWM1) &
	    SM502_PWM_INTR_PENDING) == 0);
	return mips3_cp0_count_read();
}
Exemplo n.º 3
0
static u_int
get_pwmclock_timecount(struct timecounter *tc)
{
	struct pwmclock_softc *sc = pwmclock;
	uint32_t now, diff;

	now = mips3_cp0_count_read();
	diff = now - sc->sc_last;
	return sc->sc_count + scale(diff, sc->sc_step);
}
Exemplo n.º 4
0
static void
pwmclock_start(void)
{
	struct pwmclock_softc *sc = pwmclock;
	sc->sc_count = 0;
	sc->sc_last = mips3_cp0_count_read();
	pwmclock_timecounter.tc_frequency = curcpu()->ci_cpu_freq / 2;
	tc_init(&pwmclock_timecounter);
	bus_space_write_4(sc->sc_memt, sc->sc_regh, SM502_PWM1, sc->sc_reg);
}
Exemplo n.º 5
0
/*
 * Wait for at least "n" microseconds.
 */
void
mips3_delay(int n)
{
	u_long divisor_delay;
	uint32_t cur, last, delta, usecs;

	last = mips3_cp0_count_read();
	delta = usecs = 0;

	divisor_delay = curcpu()->ci_divisor_delay;
	if (divisor_delay == 0) {
		/*
		 * Frequency values in curcpu() are not initialized.
		 * Assume faster frequency since longer delays are harmless.
		 * Note CPU_MIPS_DOUBLE_COUNT is ignored here.
		 */
#define FAST_FREQ	(300 * 1000 * 1000)	/* fast enough? */
		divisor_delay = FAST_FREQ / (1000 * 1000);
	}

	while (n > usecs) {
		cur = mips3_cp0_count_read();

		/*
		 * The MIPS3 CP0 counter always counts upto UINT32_MAX,
		 * so no need to check wrapped around case.
		 */
		delta += (cur - last);

		last = cur;

		while (delta >= divisor_delay) {
			/*
			 * delta is not so larger than divisor_delay here,
			 * and using DIV/DIVU ops could be much slower.
			 * (though longer delay may be harmless)
			 */
			usecs++;
			delta -= divisor_delay;
		}
	}
}
/*
 * Start the real-time and statistics clocks. Leave stathz 0 since there
 * are no other timers available.
 */
static void
mips3_init_cp0_clocks(void)
{
	struct cpu_info * const ci = curcpu();

	ci->ci_next_cp0_clk_intr = mips3_cp0_count_read() + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);

	mips3_init_tc();

}
Exemplo n.º 7
0
int
pwmclock_intr(void *cookie)
{
	struct clockframe *cf = cookie;
	struct pwmclock_softc *sc = pwmclock;
	uint32_t reg, now, diff;

	/* is it us? */
	reg = bus_space_read_4(sc->sc_memt, sc->sc_regh, SM502_PWM1);
	if ((reg & SM502_PWM_INTR_PENDING) == 0)
		return 0;

	/* yes, it's us, so clear the interrupt */
	bus_space_write_4(sc->sc_memt, sc->sc_regh, SM502_PWM1, sc->sc_reg);

	/*
	 * this looks kinda funny but what we want here is this:
	 * - reading the counter and changing the CPU clock should be as
	 *   close together as possible in order to remain halfway accurate
	 * - we need to use the previous sc_step in order to scale the
	 *   interval passed since the last clock interrupt correctly, so
	 *   we only change sc_step after doing that
	 */
	if (sc->sc_step_wanted != sc->sc_step) {

		REGVAL(LS2F_CHIPCFG0) =
		    (REGVAL(LS2F_CHIPCFG0) & ~LS2FCFG_FREQSCALE_MASK) |
		     sc->sc_step_wanted;
	}

	now = mips3_cp0_count_read();		
	diff = now - sc->sc_last;
	sc->sc_count += scale(diff, sc->sc_step);
	sc->sc_last = now;
	if (sc->sc_step_wanted != sc->sc_step) {
		sc->sc_step = sc->sc_step_wanted;
	}
		 
	hardclock(cf);

	return 1;
}
Exemplo n.º 8
0
void
cpu_boot_secondary_processors(void)
{
	for (struct cpu_info *ci = cpu_info_store.ci_next;
	     ci != NULL;
	     ci = ci->ci_next) {
		KASSERT(!CPU_IS_PRIMARY(ci));
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (! CPUSET_HAS_P(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		CPUSET_ADD(cpus_running, cpu_index(ci));
	}
}
Exemplo n.º 9
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}
Exemplo n.º 10
0
/*
 * Since it takes so long to read the complete time/date values from
 * the RTC over the SMBus, we only read the seconds value.
 * Later versions of the SWARM will hopefully have the RTC interrupt
 * attached so we can do the clock calibration much more quickly and
 * with a higher resolution.
 */
static void
rtc_cal_timer(void)
{
	uint32_t ctrdiff[NITERS], startctr, endctr;
	int sec, lastsec, i;

	if (rtcfound == 0) {
		printf("rtc_cal_timer before rtc attached\n");
		return;
	}
return;	/* XXX XXX */

	printf("%s: calibrating CPU clock", device_xname(the_rtc->sc_dev));

	/*
	 * Run the loop an extra time to wait for the second to tick over
	 * and to prime the cache.
	 */
	time_smbus_init(the_rtc->sc_smbus_chan);
	sec = RTC_SECONDS(the_rtc);
	endctr = mips3_cp0_count_read();

	for (i = 0; i < NITERS; i++) {
		int diff;

 again:
		lastsec = sec;
		startctr = endctr;
		
		/* Wait for the timer to tick over. */
		do {
			// time_smbus_init(the_rtc->sc_smbus_chan);
			sec = RTC_SECONDS(the_rtc);
		} while (lastsec == sec);
		endctr = mips3_cp0_count_read();

		diff = sec - lastsec;
		if (diff < 0)
			diff += 60;

		/* Sometimes we appear to skip a second.  Clock jitter? */
		if (diff > 1)
			goto again;

		if (endctr < startctr)
			ctrdiff[i] = 0xffffffff - startctr + endctr;
		else
			ctrdiff[i] = endctr - startctr;
	}
	printf("\n");

	/* Compute the number of cycles per second. */
	curcpu()->ci_cpu_freq = ((ctrdiff[1] + ctrdiff[2]) / 2);

	/* Compute the delay divisor. */
	curcpu()->ci_divisor_delay = curcpu()->ci_cpu_freq / 1000000;

	/* Compute clock cycles per hz */
	curcpu()->ci_cycles_per_hz = curcpu()->ci_cpu_freq / hz;

	printf("%s: timer calibration: %lu cycles/sec [(%u, %u)]\n",
	    device_xname(the_rtc->sc_dev), curcpu()->ci_cpu_freq,
	    ctrdiff[1], ctrdiff[2]);
}
Exemplo n.º 11
0
void
au_cal_timers(bus_space_tag_t st, bus_space_handle_t sh)
{
	uint32_t ctrdiff[4], startctr, endctr;
	uint32_t ctl, ctr, octr;
	int i;

	/* Enable the programmable counter 1. */
	ctl = bus_space_read_4(st, sh, PC_COUNTER_CONTROL);
	if ((ctl & (CC_EO | CC_EN1)) != (CC_EO | CC_EN1));
		SET_PC_REG(PC_COUNTER_CONTROL, 0, ctl | CC_EO | CC_EN1);

	/* Initialize for 16Hz. */
	SET_PC_REG(PC_TRIM1, CC_T1S, PC_RATE / 16 - 1);

	/* Run the loop an extra time to prime the cache. */
	for (i = 0; i < 4; i++) {
		/* Reset the counter. */
		SET_PC_REG(PC_COUNTER_WRITE1, CC_C1S, 0);

		/* Wait for 1/16th of a second. */
		//startctr = mips3_cp0_count_read();

		/* Wait for the PC to tick over. */
		ctr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		do {
			octr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		} while (ctr == octr);

		startctr = mips3_cp0_count_read();
		do {
			ctr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		} while (ctr == octr);	// while (ctr <= octr + 1);
		endctr = mips3_cp0_count_read();
		ctrdiff[i] = endctr - startctr;
	}

	/* Disable the counter (if it wasn't enabled already). */
	if ((ctl & (CC_EO | CC_EN1)) != (CC_EO | CC_EN1));
		SET_PC_REG(PC_COUNTER_CONTROL, 0, ctl);

	/* Compute the number of cycles per second. */
	curcpu()->ci_cpu_freq = ((ctrdiff[2] + ctrdiff[3]) / 2) * 16;

	/* Compute the number of ticks for hz. */
	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;

	/* Compute the delay divisor. */
	curcpu()->ci_divisor_delay =
	    ((curcpu()->ci_cpu_freq + 500000) / 1000000);

	/*
	 * To implement a more accurate microtime using the CP0 COUNT
	 * register we need to divide that register by the number of
	 * cycles per MHz.  But...
	 *
	 * DIV and DIVU are expensive on MIPS (eg 75 clocks on the
	 * R4000).  MULT and MULTU are only 12 clocks on the same CPU.
	 * On the SB1 these appear to be 40-72 clocks for DIV/DIVU and 3
	 * clocks for MUL/MULTU.
	 *
	 * The strategy we use to to calculate the reciprical of cycles
	 * per MHz, scaled by 1<<32.  Then we can simply issue a MULTU
	 * and pluck of the HI register and have the results of the
	 * division.
	 */
	curcpu()->ci_divisor_recip =
	    0x100000000ULL / curcpu()->ci_divisor_delay;

	/*
	 * Get correct cpu frequency if the CPU runs at twice the
	 * external/cp0-count frequency.
	 */
	if (mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
		curcpu()->ci_cpu_freq *= 2;

#ifdef DEBUG
	printf("Timer calibration: %lu cycles/sec [(%u, %u) * 16]\n",
	    curcpu()->ci_cpu_freq, ctrdiff[2], ctrdiff[3]);
#endif
}
Exemplo n.º 12
0
void
au_cal_timers(bus_space_tag_t st, bus_space_handle_t sh)
{
	struct cpu_info * const ci = curcpu();
	uint32_t ctrdiff[4], startctr, endctr;
	uint32_t ctl, ctr, octr;
	int i;

	/* Enable the programmable counter 1. */
	ctl = bus_space_read_4(st, sh, PC_COUNTER_CONTROL);
	if ((ctl & (CC_EO | CC_EN1)) != (CC_EO | CC_EN1))
		SET_PC_REG(PC_COUNTER_CONTROL, 0, ctl | CC_EO | CC_EN1);

	/* Initialize for 16Hz. */
	SET_PC_REG(PC_TRIM1, CC_T1S, PC_RATE / 16 - 1);

	/* Run the loop an extra time to prime the cache. */
	for (i = 0; i < 4; i++) {
		/* Reset the counter. */
		SET_PC_REG(PC_COUNTER_WRITE1, CC_C1S, 0);

		/* Wait for 1/16th of a second. */
		//startctr = mips3_cp0_count_read();

		/* Wait for the PC to tick over. */
		ctr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		do {
			octr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		} while (ctr == octr);

		startctr = mips3_cp0_count_read();
		do {
			ctr = bus_space_read_4(st, sh, PC_COUNTER_READ_1);
		} while (ctr == octr);	// while (ctr <= octr + 1);
		endctr = mips3_cp0_count_read();
		ctrdiff[i] = endctr - startctr;
	}

	/* Disable the counter (if it wasn't enabled already). */
	if ((ctl & (CC_EO | CC_EN1)) != (CC_EO | CC_EN1))
		SET_PC_REG(PC_COUNTER_CONTROL, 0, ctl);

	/* Compute the number of cycles per second. */
	ci->ci_cpu_freq = ((ctrdiff[2] + ctrdiff[3]) / 2) * 16;
	ci->ci_cctr_freq = ci->ci_cpu_freq;

	/* Compute the number of ticks for hz. */
	ci->ci_cycles_per_hz = (ci->ci_cpu_freq + hz / 2) / hz;

	/* Compute the delay divisor. */
	ci->ci_divisor_delay = (ci->ci_cpu_freq + 500000) / 1000000;

	/*
	 * Get correct cpu frequency if the CPU runs at twice the
	 * external/cp0-count frequency.
	 */
	if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
		ci->ci_cpu_freq *= 2;

#ifdef DEBUG
	printf("Timer calibration: %lu cycles/sec [(%u, %u) * 16]\n",
	    ci->ci_cpu_freq, ctrdiff[2], ctrdiff[3]);
#endif
}