int
statclockhandler(void *aframe)
{
	struct clockframe *frame = aframe;
	int newint, r;
	int currentclock ;

	/* start the clock off again */
	bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
			TIMER_2_CLEAR, 0);

	do {
		r = random() & (statvar-1);
	} while (r == 0);
	newint = statmin + (r * statcountperusec);
	
	/* fetch the current count */
	currentclock = bus_space_read_4(clock_sc->sc_iot, clock_sc->sc_ioh,
		    TIMER_2_VALUE);

	/* 
	 * work out how much time has run, add another usec for time spent
	 * here
	 */
	r = ((statprev - currentclock) + statcountperusec);

	if (r < newint) {
		newint -= r;
		r = 0;
	}
	else 
		printf("statclockhandler: Statclock overrun\n");


	/* 
	 * update the clock to the new counter, this reloads the existing
	 * timer
	 */
	bus_space_write_4(clock_sc->sc_iot, clock_sc->sc_ioh,
	    		TIMER_2_LOAD, newint);
	statprev = newint;
	statclock(frame);
	if (r)
		/*
		 * We've completely overrun the previous interval,
		 * make sure we report the correct number of ticks. 
		 */
		statclock(frame);

	return 0;	/* Pass the interrupt on down the chain */
}
Exemple #2
0
/*
 * Maskable IPIs.
 *
 * These IPIs are received as non maskable, but are not processed in
 * the NMI handler; instead, they are processed from the soft interrupt
 * handler.
 *
 * XXX This is grossly suboptimal.
 */
void
m197_soft_ipi()
{
	struct cpu_info *ci = curcpu();
	struct trapframe faketf;
	int s;

	__mp_lock(&kernel_lock);
	s = splclock();

	if (ci->ci_h_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_h_sxip;
		faketf.tf_epsr = ci->ci_h_epsr;
		ci->ci_h_sxip = 0;
		hardclock((struct clockframe *)&faketf);
	}

	if (ci->ci_s_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_s_sxip;
		faketf.tf_epsr = ci->ci_s_epsr;
		ci->ci_s_sxip = 0;
		statclock((struct clockframe *)&faketf);
	}

	splx(s);
	__mp_unlock(&kernel_lock);
}
int
statclockhandler(void *cookie)
{
	struct clockframe *frame = cookie;

	statclock(frame);
	return 0;	/* Pass the interrupt on down the chain */
}
int
statclockintr(struct trapframe *frame)
{

	profclockintr(frame);
	statclock(TRAPF_USERMODE(frame));
	return (FILTER_HANDLED);
}
Exemple #5
0
static int
statintr(void *arg)
{
	struct clockframe *frame = arg;

	statclock(frame);

	return(1);
}
Exemple #6
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct proc *p;
	struct cpu_info *ci = curcpu();

	p = curproc;
	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
		struct process *pr = p->p_p;

		/*
		 * Run current process's virtual and profile time, as needed.
		 */
		if (CLKF_USERMODE(frame) &&
		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_ALRMPEND);
			need_proftick(p);
		}
		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_PROFPEND);
			need_proftick(p);
		}
	}

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);

	if (--ci->ci_schedstate.spc_rrticks <= 0)
		roundrobin(ci);

	/*
	 * If we are not the primary CPU, we're not allowed to do
	 * any more work.
	 */
	if (CPU_IS_PRIMARY(ci) == 0)
		return;

	tc_ticktock();
	ticks++;

	/*
	 * Update real-time timeout queue.
	 * Process callouts at a very low cpu priority, so we don't keep the
	 * relatively high clock interrupt priority any longer than necessary.
	 */
	if (timeout_hardclock_update())
		softintr_schedule(softclock_si);
}
/*
 * Level 14 (stat clock) interrupts from processor counter.
 */
int
statintr_4m(void *cap)
{
	struct clockframe *frame = cap;
	u_long newint;

	kpreempt_disable();

	/* read the limit register to clear the interrupt */
	*((volatile int *)&counterreg4m->t_limit);

	statclock(frame);

	/*
	 * Compute new randomized interval.
	 */
	newint = new_interval();

	/*
	 * Use the `non-resetting' limit register, so we don't
	 * loose the counter ticks that happened since this
	 * interrupt was raised.
	 */
	counterreg4m->t_limit_nr = tmr_ustolim4m(newint);

	/*
	 * The factor 8 is only valid for stathz==100.
	 * See also clock.c
	 */
	if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) {
		if (CLKF_LOPRI(frame, IPL_SCHED)) {
			/* No need to schedule a soft interrupt */
			spllowerschedclock();
			schedintr(cap);
		} else {
			/*
			 * We're interrupting a thread that may have the
			 * scheduler lock; run schedintr() on this CPU later.
			 */
			raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */
		}
	}
	kpreempt_enable();

	return (1);
}
Exemple #8
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct lwp *l;
	struct cpu_info *ci;

	ci = curcpu();
	l = ci->ci_data.cpu_onproc;

	timer_tick(l, CLKF_USERMODE(frame));

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);
	/*
	 * If no separate schedclock is provided, call it here
	 * at about 16 Hz.
	 */
	if (schedhz == 0) {
		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
			schedclock(l);
			ci->ci_schedstate.spc_schedticks = hardscheddiv;
		}
	}
	if ((--ci->ci_schedstate.spc_ticks) <= 0)
		sched_tick(ci);

	if (CPU_IS_PRIMARY(ci)) {
		hardclock_ticks++;
		tc_ticktock();
	}

	/*
	 * Update real-time timeout queue.
	 */
	callout_hardclock();

#ifdef KDTRACE_HOOKS
	cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)];
	if (func) {
		(*func)((struct clockframe *)frame);
	}
#endif
}
Exemple #9
0
/*
 * Level 14 (stat clock) interrupts from processor counter.
 */
int
statintr_4(void *cap)
{
	struct clockframe *frame = cap;
	u_long newint;

	/* read the limit register to clear the interrupt */
	*((volatile int *)&timerreg4->t_c14.t_limit);

	statclock(frame);

	/*
	 * Compute new randomized interval.
	 */
	newint = new_interval();

	/*
	 * The sun4/4c timer has no `non-resetting' register;
	 * use the current counter value to compensate the new
	 * limit value for the number of counter ticks elapsed.
	 */
	newint -= tmr_cnttous(timerreg4->t_c14.t_counter);
	timerreg4->t_c14.t_limit = tmr_ustolim(newint);

	/*
	 * The factor 8 is only valid for stathz==100.
	 * See also clock.c
	 */
	if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) {
		if (CLKF_LOPRI(frame, IPL_SCHED)) {
			/* No need to schedule a soft interrupt */
			spllowerschedclock();
			schedintr(cap);
		} else {
			/*
			 * We're interrupting a thread that may have the
			 * scheduler lock; run schedintr() later.
			 */
			sparc_softintr_schedule(sched_cookie);
		}
	}

	return (1);
}
static int
statintr(void *arg)
{
	struct saost_softc *sc = saost_sc;
	struct clockframe *frame = arg;
	uint32_t oscr, nextmatch, oldmatch;
	int s;

	bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAOST_SR, 2);

	/* schedule next clock intr */
	oldmatch = sc->sc_statclock_count;
	nextmatch = oldmatch + sc->sc_statclock_step;

	bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAOST_MR1, nextmatch);
	oscr = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SAOST_CR);

	if ((nextmatch > oldmatch &&
	     (oscr > nextmatch || oscr < oldmatch)) ||
	    (nextmatch < oldmatch && oscr > nextmatch && oscr < oldmatch)) {
		/*
		 * we couldn't set the matching register in time.
		 * just set it to some value so that next interrupt happens.
		 * XXX is it possible to compensate lost interrupts?
		 */

		s = splhigh();
		oscr = bus_space_read_4(sc->sc_iot, sc->sc_ioh, SAOST_CR);
		nextmatch = oscr + 10;
		bus_space_write_4(sc->sc_iot, sc->sc_ioh, SAOST_MR1, nextmatch);
		splx(s);
	}

	sc->sc_statclock_count = nextmatch;
	statclock(frame);

	return 1;
}
Exemple #11
0
int
amptimer_intr(void *frame)
{
	struct amptimer_softc	*sc = amptimer_cd.cd_devs[0];
	struct amptimer_pcpu_softc *pc = &sc->sc_pstat[CPU_INFO_UNIT(curcpu())];
	uint64_t		 now;
	uint64_t		 nextevent;
	uint32_t		 r, reg;
#if defined(USE_GTIMER_CMP)
	int			 skip = 1;
#else
	int64_t			 delay;
#endif
	int			 rc = 0;

	/*
	 * DSR - I know that the tick timer is 64 bits, but the following
	 * code deals with rollover, so there is no point in dealing
	 * with the 64 bit math, just let the 32 bit rollover
	 * do the right thing
	 */

	now = amptimer_readcnt64(sc);

	while (pc->pc_nexttickevent <= now) {
		pc->pc_nexttickevent += sc->sc_ticks_per_intr;
		pc->pc_ticks_err_sum += sc->sc_ticks_err_cnt;

		/* looping a few times is faster than divide */
		while (pc->pc_ticks_err_sum > hz) {
			pc->pc_nexttickevent += 1;
			pc->pc_ticks_err_sum -= hz;
		}

#ifdef AMPTIMER_DEBUG
		sc->sc_clk_count.ec_count++;
#endif
		rc = 1;
		hardclock(frame);
	}
	while (pc->pc_nextstatevent <= now) {
		do {
			r = random() & (sc->sc_statvar -1);
		} while (r == 0); /* random == 0 not allowed */
		pc->pc_nextstatevent += sc->sc_statmin + r;

		/* XXX - correct nextstatevent? */
#ifdef AMPTIMER_DEBUG
		sc->sc_stat_count.ec_count++;
#endif
		rc = 1;
		statclock(frame);
	}

	if (pc->pc_nexttickevent < pc->pc_nextstatevent)
		nextevent = pc->pc_nexttickevent;
	else
		nextevent = pc->pc_nextstatevent;

#if defined(USE_GTIMER_CMP)
again:
	reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIMER_CTRL);
	reg &= ~GTIMER_CTRL_COMP;
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIMER_CTRL, reg);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIMER_CMP_LOW,
	    nextevent & 0xffffffff);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIMER_CMP_HIGH,
	    nextevent >> 32);
	reg |= GTIMER_CTRL_COMP;
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIMER_CTRL, reg);

	now = amptimer_readcnt64(sc);
	if (now >= nextevent) {
		nextevent = now + skip;
		skip += 1;
		goto again;
	}
#else
	/* clear old status */
	bus_space_write_4(sc->sc_iot, sc->sc_pioh, PTIMER_STATUS,
	    PTIMER_STATUS_EVENT);

	delay = nextevent - now;
	if (delay < 0)
		delay = 1;

	reg = bus_space_read_4(sc->sc_iot, sc->sc_pioh, PTIMER_CTRL);
	if ((reg & (PTIMER_CTRL_ENABLE | PTIMER_CTRL_IRQEN)) !=
	    (PTIMER_CTRL_ENABLE | PTIMER_CTRL_IRQEN))
		bus_space_write_4(sc->sc_iot, sc->sc_pioh, PTIMER_CTRL,
		    (PTIMER_CTRL_ENABLE | PTIMER_CTRL_IRQEN));

	bus_space_write_4(sc->sc_iot, sc->sc_pioh, PTIMER_LOAD, delay);
#endif

	return (rc);
}
Exemple #12
0
void
interrupt(u_int64_t vector, struct trapframe *framep)
{
	struct thread *td;
	volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK;

	td = curthread;
	atomic_add_int(&td->td_intr_nesting_level, 1);

	/*
	 * Handle ExtINT interrupts by generating an INTA cycle to
	 * read the vector.
	 */
	if (vector == 0) {
		vector = ib->ib_inta;
		printf("ExtINT interrupt: vector=%ld\n", vector);
	}

	if (vector == 255) {/* clock interrupt */
		/* CTR0(KTR_INTR, "clock interrupt"); */
			
		cnt.v_intr++;
#ifdef EVCNT_COUNTERS
		clock_intr_evcnt.ev_count++;
#else
		intrcnt[INTRCNT_CLOCK]++;
#endif
		critical_enter();
#ifdef SMP
		clks[PCPU_GET(cpuid)]++;
		/* Only the BSP runs the real clock */
		if (PCPU_GET(cpuid) == 0) {
#endif
			handleclock(framep);
			/* divide hz (1024) by 8 to get stathz (128) */
			if ((++schedclk2 & 0x7) == 0)
				statclock((struct clockframe *)framep);
#ifdef SMP
		} else {
			ia64_set_itm(ia64_get_itc() + itm_reload);
			mtx_lock_spin(&sched_lock);
			hardclock_process(curthread, TRAPF_USERMODE(framep));
			if ((schedclk2 & 0x7) == 0)
				statclock_process(curkse, TRAPF_PC(framep),
				    TRAPF_USERMODE(framep));
			mtx_unlock_spin(&sched_lock);
		}
#endif
		critical_exit();
#ifdef SMP
	} else if (vector == ipi_vector[IPI_AST]) {
		asts[PCPU_GET(cpuid)]++;
		CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
	} else if (vector == ipi_vector[IPI_RENDEZVOUS]) {
		rdvs[PCPU_GET(cpuid)]++;
		CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
		smp_rendezvous_action();
	} else if (vector == ipi_vector[IPI_STOP]) {
		u_int32_t mybit = PCPU_GET(cpumask);

		CTR1(KTR_SMP, "IPI_STOP, cpuid=%d", PCPU_GET(cpuid));
		savectx(PCPU_GET(pcb));
		stopped_cpus |= mybit;
		while ((started_cpus & mybit) == 0)
			/* spin */;
		started_cpus &= ~mybit;
		stopped_cpus &= ~mybit;
		if (PCPU_GET(cpuid) == 0 && cpustop_restartfunc != NULL) {
			void (*f)(void) = cpustop_restartfunc;
			cpustop_restartfunc = NULL;
			(*f)();
		}
	} else if (vector == ipi_vector[IPI_TEST]) {
		CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid));
		mp_ipi_test++;
#endif
	} else {
		ints[PCPU_GET(cpuid)]++;
		ia64_dispatch_intr(framep, vector);
	}

	atomic_subtract_int(&td->td_intr_nesting_level, 1);
}