Exemplo n.º 1
0
/*
 * Start the real-time and statistics clocks. Leave stathz 0 since there
 * are no other timers available.
 */
void
cp0_startclock(struct cpu_info *ci)
{
	int s;

#ifdef MULTIPROCESSOR
	if (!CPU_IS_PRIMARY(ci)) {
		s = splhigh();
		nanouptime(&ci->ci_schedstate.spc_runtime);
		splx(s);

		/* try to avoid getting clock interrupts early */
		cp0_set_compare(cp0_get_count() - 1);

		cp0_calibrate(ci);
	}
#endif

	/* Start the clock. */
	s = splclock();
	ci->ci_cpu_counter_interval =
	    (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz;
	ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval;
	cp0_set_compare(ci->ci_cpu_counter_last);
	ci->ci_clock_started++;
	splx(s);
}
Exemplo n.º 2
0
Arquivo: clock.c Projeto: MarginC/kame
/*
 *  Interrupt handler for targets using the internal count register
 *  as interval clock. Normally the system is run with the clock
 *  interrupt always enabled. Masking is done here and if the clock
 *  can not be run the tick is just counted and handled later when
 *  the clock is unmasked again.
 */
intrmask_t
clock_int5( intrmask_t mask, struct trap_frame *tf)
{
    u_int32_t clkdiff;

    /*
     * If clock is started count the tick, else just arm for a new.
     */
    if (clock_started && cpu_counter_interval != 0) {
        clkdiff = cp0_get_count() - cpu_counter_last;
        while (clkdiff >= cpu_counter_interval) {
            cpu_counter_last += cpu_counter_interval;
            clkdiff = cp0_get_count() - cpu_counter_last;
            pendingticks++;
        }
        cpu_counter_last += cpu_counter_interval;
        pendingticks++;
    } else {
        cpu_counter_last = cpu_counter_interval + cp0_get_count();
    }

    cp0_set_compare(cpu_counter_last);

    if ((tf->cpl & SPL_CLOCKMASK) == 0) {
        while (pendingticks) {
            hardclock(tf);
            pendingticks--;
        }
    }

    return CR_INT_5;	/* Clock is always on 5 */
}
Exemplo n.º 3
0
/*
 * Wait "n" nanoseconds.
 */
void
nanodelay(int n)
{
	int dly;
	int p, c;

	p = cp0_get_count();
	dly = ((sys_config.cpu[0].clock * n) / 1000000000) / 2;
	while (dly > 0) {
		c = cp0_get_count();
		dly -= c - p;
		p = c;
	}
}
Exemplo n.º 4
0
Arquivo: clock.c Projeto: MarginC/kame
/*
 * Return the best possible estimate of the time in the timeval
 * to which tvp points.  We guarantee that the time will be greater
 * than the value obtained by a previous call.
 */
void
microtime(struct timeval *tvp)
{
    static struct timeval lasttime;
    u_int32_t clkdiff;
    int s = splclock();

    *tvp = time;
    clkdiff = (cp0_get_count() - cpu_counter_last) * 1000;
    tvp->tv_usec += clkdiff / ticktime;
    while (tvp->tv_usec >= 1000000) {
        tvp->tv_sec++;
        tvp->tv_usec -= 1000000;
    }

    if (tvp->tv_sec == lasttime.tv_sec &&
            tvp->tv_usec <= lasttime.tv_usec) {
        tvp->tv_usec++;
        if (tvp->tv_usec >= 1000000) {
            tvp->tv_sec++;
            tvp->tv_usec -= 1000000;
        }
    }
    lasttime = *tvp;
    splx(s);
}
Exemplo n.º 5
0
/*
 *	Clock interrupt code for machines using the on cpu chip
 *	counter register. This register counts at half the pipeline
 *	frequency so the frequency must be known and the options
 *	register wired to allow it's use.
 *
 *	The code is enabled by setting 'cpu_counter_interval'.
 */
void
clock_int5_init(struct clock_softc *sc)
{
        int s;

        s = splclock();
        cpu_counter_interval = sys_config.cpu[0].clock / (hz * 2);
        cpu_counter_last = cp0_get_count() + cpu_counter_interval * 4;
        cp0_set_compare(cpu_counter_last);
        splx(s);
}
Exemplo n.º 6
0
void
clockattach(struct device *parent, struct device *self, void *aux)
{
	printf(": int 5\n");

	/*
	 * We need to register the interrupt now, for idle_mask to
	 * be computed correctly.
	 */
	set_intr(INTPRI_CLOCK, CR_INT_5, cp0_int5);
	evcount_attach(&cp0_clock_count, "clock", &cp0_clock_irq);

	/* try to avoid getting clock interrupts early */
	cp0_set_compare(cp0_get_count() - 1);

	md_startclock = cp0_startclock;
}
Exemplo n.º 7
0
/*
 *  Interrupt handler for targets using the internal count register
 *  as interval clock. Normally the system is run with the clock
 *  interrupt always enabled. Masking is done here and if the clock
 *  can not be run the tick is just counted and handled later when
 *  the clock is logically unmasked again.
 */
uint32_t
cp0_int5(uint32_t mask, struct trapframe *tf)
{
	u_int32_t clkdiff;
	struct cpu_info *ci = curcpu();

	/*
	 * If we got an interrupt before we got ready to process it,
	 * retrigger it as far as possible. cpu_initclocks() will
	 * take care of retriggering it correctly.
	 */
	if (ci->ci_clock_started == 0) {
		cp0_set_compare(cp0_get_count() - 1);

		return CR_INT_5;
	}

	/*
	 * Count how many ticks have passed since the last clock interrupt...
	 */
	clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
	while (clkdiff >= ci->ci_cpu_counter_interval) {
		ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
		clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
		ci->ci_pendingticks++;
	}
	ci->ci_pendingticks++;
	ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;

	/*
	 * Set up next tick, and check if it has just been hit; in this
	 * case count it and schedule one tick ahead.
	 */
	cp0_set_compare(ci->ci_cpu_counter_last);
	clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
	if ((int)clkdiff >= 0) {
		ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
		ci->ci_pendingticks++;
		cp0_set_compare(ci->ci_cpu_counter_last);
	}

	/*
	 * Process clock interrupt unless it is currently masked.
	 */
	if (tf->ipl < IPL_CLOCK) {
#ifdef MULTIPROCESSOR
		register_t sr;

		sr = getsr();
		ENABLEIPI();
#endif
		while (ci->ci_pendingticks) {
			cp0_clock_count.ec_count++;
			hardclock(tf);
			ci->ci_pendingticks--;
		}
#ifdef MULTIPROCESSOR
		setsr(sr);
#endif
	}

	return CR_INT_5;	/* Clock is always on 5 */
}