Exemplo n.º 1
0
static int
clkintr(struct trapframe *frame)
{

	if (timecounter->tc_get_timecount == i8254_get_timecount) {
		mtx_lock_spin(&clock_lock);
		if (i8254_ticked)
			i8254_ticked = 0;
		else {
			i8254_offset += i8254_max_count;
			i8254_lastcount = 0;
		}
		clkintr_pending = 0;
		mtx_unlock_spin(&clock_lock);
	}
	KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer"));

#ifdef KDTRACE_HOOKS
	/*
	 * If the DTrace hooks are configured and a callback function
	 * has been registered, then call it to process the high speed
	 * timers.
	 */
	int cpu = PCPU_GET(cpuid);
	if (lapic_cyclic_clock_func[cpu] != NULL)
		(*lapic_cyclic_clock_func[cpu])(frame);
#endif

#ifdef SMP
	if (smp_started)
		ipi_all_but_self(IPI_HARDCLOCK);
#endif 
	hardclockintr(frame);
	return (FILTER_HANDLED);
}
Exemplo n.º 2
0
static int
clkintr(struct trapframe *frame)
{

	if (timecounter->tc_get_timecount == i8254_get_timecount) {
		mtx_lock_spin(&clock_lock);
		if (i8254_ticked)
			i8254_ticked = 0;
		else {
			i8254_offset += i8254_max_count;
			i8254_lastcount = 0;
		}
		clkintr_pending = 0;
		mtx_unlock_spin(&clock_lock);
	}
	KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer"));

	if (using_atrtc_timer) {
#ifdef SMP
		if (smp_started)
			ipi_all_but_self(IPI_HARDCLOCK);
#endif
		hardclockintr(frame);
	} else {
		if (--pscnt <= 0) {
			pscnt = psratio;
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_STATCLOCK);
#endif
			statclockintr(frame);
		} else {
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_PROFCLOCK);
#endif
			profclockintr(frame);
		}
	}

	return (FILTER_HANDLED);
}
Exemplo n.º 3
0
/*
 * This routine receives statistical clock interrupts from the RTC.
 * As explained above, these occur at 128 interrupts per second.
 * When profiling, we receive interrupts at a rate of 1024 Hz.
 *
 * This does not actually add as much overhead as it sounds, because
 * when the statistical clock is active, the hardclock driver no longer
 * needs to keep (inaccurate) statistics on its own.  This decouples
 * statistics gathering from scheduling interrupts.
 *
 * The RTC chip requires that we read status register C (RTC_INTR)
 * to acknowledge an interrupt, before it will generate the next one.
 * Under high interrupt load, rtcintr() can be indefinitely delayed and
 * the clock can tick immediately after the read from RTC_INTR.  In this
 * case, the mc146818A interrupt signal will not drop for long enough
 * to register with the 8259 PIC.  If an interrupt is missed, the stat
 * clock will halt, considerably degrading system performance.  This is
 * why we use 'while' rather than a more straightforward 'if' below.
 * Stat clock ticks can still be lost, causing minor loss of accuracy
 * in the statistics, but the stat clock will no longer stop.
 */
static int
rtcintr(struct trapframe *frame)
{
	int flag = 0;

	while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
		flag = 1;
		if (--pscnt <= 0) {
			pscnt = psdiv;
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_STATCLOCK);
#endif
			statclockintr(frame);
		} else {
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_PROFCLOCK);
#endif
			profclockintr(frame);
		}
	}
	return(flag ? FILTER_HANDLED : FILTER_STRAY);
}
Exemplo n.º 4
0
static void
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	int cpu, ncpu, othercpus;
	struct _call_data data;

	othercpus = mp_ncpus - 1;
	if (CPU_ISFULLSET(&mask)) {
		if (othercpus < 1)
			return;
	} else {
		CPU_CLR(PCPU_GET(cpuid), &mask);
		if (CPU_EMPTY(&mask))
			return;
	}
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;		
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	if (CPU_ISFULLSET(&mask)) {
		ncpu = othercpus;
		ipi_all_but_self(vector);
	} else {
		ncpu = 0;
		while ((cpu = cpusetobj_ffs(&mask)) != 0) {
			cpu--;
			CPU_CLR(cpu, &mask);
			CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
			    vector);
			ipi_send_cpu(cpu, vector);
			ncpu++;
		}
	}
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}
Exemplo n.º 5
0
/* timer2 callback. */
static void
timer2cb(struct eventtimer *et, void *arg)
{

#ifdef SMP
	/* Broadcast interrupt to other CPUs for non-per-CPU timers */
	if (smp_started && (et->et_flags & ET_FLAGS_PERCPU) == 0)
		ipi_all_but_self(IPI_STATCLOCK);
#endif
	if (timertest) {
		if ((et->et_flags & ET_FLAGS_PERCPU) == 0 || curcpu == 0) {
			timerticks[1]++;
			if (timerticks[1] >= timer2hz * 2) {
				ET_LOCK();
				timercheck();
				ET_UNLOCK();
			}
		}
	}
	statclockhandler(curthread->td_intr_frame);
}
Exemplo n.º 6
0
/*
 * Reconfigure specified timer.
 * For per-CPU timers use IPI to make other CPUs to reconfigure.
 */
static void
configtimer(int i)
{
#ifdef SMP
	tc *conf;
	int cpu;

	critical_enter();
#endif
	/* Start/stop global timer or per-CPU timer of this CPU. */
	if (i == 0 ? timer1hz : timer2hz)
		et_start(timer[i], NULL, &timerperiod[i]);
	else
		et_stop(timer[i]);
#ifdef SMP
	if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
		critical_exit();
		return;
	}
	/* Set reconfigure flags for other CPUs. */
	CPU_FOREACH(cpu) {
		conf = DPCPU_ID_PTR(cpu, configtimer);
		atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
	}
	/* Send reconfigure IPI. */
	ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
	/* Wait for reconfiguration completed. */
restart:
	cpu_spinwait();
	CPU_FOREACH(cpu) {
		if (cpu == curcpu)
			continue;
		conf = DPCPU_ID_PTR(cpu, configtimer);
		if (atomic_load_acq_int(*conf + i))
			goto restart;
	}
	critical_exit();
#endif
}
Exemplo n.º 7
0
/*
 * Flush the TLB on all other CPU's
 */
static void
smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	u_int ncpu;
	struct _call_data data;

	ncpu = mp_ncpus - 1;	/* does not shootdown self */
	if (ncpu < 1)
		return;		/* no other cpus */
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	ipi_all_but_self(vector);
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}