Esempio n. 1
0
static int
clkintr(struct trapframe *frame)
{

	if (timecounter->tc_get_timecount == i8254_get_timecount) {
		mtx_lock_spin(&clock_lock);
		if (i8254_ticked)
			i8254_ticked = 0;
		else {
			i8254_offset += i8254_max_count;
			i8254_lastcount = 0;
		}
		clkintr_pending = 0;
		mtx_unlock_spin(&clock_lock);
	}
	KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer"));

#ifdef KDTRACE_HOOKS
	/*
	 * If the DTrace hooks are configured and a callback function
	 * has been registered, then call it to process the high speed
	 * timers.
	 */
	int cpu = PCPU_GET(cpuid);
	if (lapic_cyclic_clock_func[cpu] != NULL)
		(*lapic_cyclic_clock_func[cpu])(frame);
#endif

#ifdef SMP
	if (smp_started)
		ipi_all_but_self(IPI_HARDCLOCK);
#endif 
	hardclockintr(frame);
	return (FILTER_HANDLED);
}
Esempio n. 2
0
int
profclockintr(struct trapframe *frame)
{

	if (!using_atrtc_timer)
		hardclockintr(frame);
	if (profprocs != 0)
		profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	return (FILTER_HANDLED);
}
Esempio n. 3
0
static int
clkintr(struct trapframe *frame)
{

	if (timecounter->tc_get_timecount == i8254_get_timecount) {
		mtx_lock_spin(&clock_lock);
		if (i8254_ticked)
			i8254_ticked = 0;
		else {
			i8254_offset += i8254_max_count;
			i8254_lastcount = 0;
		}
		clkintr_pending = 0;
		mtx_unlock_spin(&clock_lock);
	}
	KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer"));

	if (using_atrtc_timer) {
#ifdef SMP
		if (smp_started)
			ipi_all_but_self(IPI_HARDCLOCK);
#endif
		hardclockintr(frame);
	} else {
		if (--pscnt <= 0) {
			pscnt = psratio;
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_STATCLOCK);
#endif
			statclockintr(frame);
		} else {
#ifdef SMP
			if (smp_started)
				ipi_all_but_self(IPI_PROFCLOCK);
#endif
			profclockintr(frame);
		}
	}

	return (FILTER_HANDLED);
}
Esempio n. 4
0
static int
ipi_handler(void *arg)
{
	u_int	cpu, ipi;

	cpu = PCPU_GET(cpuid);

	ipi = pic_ipi_read((int)arg);

	while ((ipi != 0x3ff)) {
		switch (ipi) {
		case IPI_RENDEZVOUS:
			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
			smp_rendezvous_action();
			break;

		case IPI_AST:
			CTR0(KTR_SMP, "IPI_AST");
			break;

		case IPI_STOP:
			/*
			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
			 * necessary to add it in the switch.
			 */
			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");

			savectx(&stoppcbs[cpu]);

			/*
			 * CPUs are stopped when entering the debugger and at
			 * system shutdown, both events which can precede a
			 * panic dump.  For the dump to be correct, all caches
			 * must be flushed and invalidated, but on ARM there's
			 * no way to broadcast a wbinv_all to other cores.
			 * Instead, we have each core do the local wbinv_all as
			 * part of stopping the core.  The core requesting the
			 * stop will do the l2 cache flush after all other cores
			 * have done their l1 flushes and stopped.
			 */
			cpu_idcache_wbinv_all();

			/* Indicate we are stopped */
			CPU_SET_ATOMIC(cpu, &stopped_cpus);

			/* Wait for restart */
			while (!CPU_ISSET(cpu, &started_cpus))
				cpu_spinwait();

			CPU_CLR_ATOMIC(cpu, &started_cpus);
			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
			CTR0(KTR_SMP, "IPI_STOP (restart)");
			break;
		case IPI_PREEMPT:
			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
			sched_preempt(curthread);
			break;
		case IPI_HARDCLOCK:
			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
			hardclockintr();
			break;
		case IPI_TLB:
			CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
			cpufuncs.cf_tlb_flushID();
			break;
		default:
			panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
		}

		pic_ipi_clear(ipi);
		ipi = pic_ipi_read(-1);
	}

	return (FILTER_HANDLED);
}