Exemplo n.º 1
0
void
dosoftint()
{
	struct cpu_info *ci = curcpu();
	int sir, q, mask;
#ifdef MULTIPROCESSOR
	register_t sr;

	/* Enable interrupts */
	sr = getsr();
	ENABLEIPI();
	__mp_lock(&kernel_lock);
#endif

	while ((sir = ci->ci_softpending) != 0) {
		atomic_clearbits_int(&ci->ci_softpending, sir);

		for (q = SI_NQUEUES - 1; q >= 0; q--) {
			mask = SINTMASK(q);
			if (sir & mask)
				softintr_dispatch(q);
		}
	}

#ifdef MULTIPROCESSOR
	__mp_unlock(&kernel_lock);
	setsr(sr);
#endif
}
Exemplo n.º 2
0
/*
 * Maskable IPIs.
 *
 * These IPIs are received as non maskable, but are not processed in
 * the NMI handler; instead, they are processed from the soft interrupt
 * handler.
 *
 * XXX This is grossly suboptimal.
 */
void
m197_soft_ipi()
{
	struct cpu_info *ci = curcpu();
	struct trapframe faketf;
	int s;

	__mp_lock(&kernel_lock);
	s = splclock();

	if (ci->ci_h_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_h_sxip;
		faketf.tf_epsr = ci->ci_h_epsr;
		ci->ci_h_sxip = 0;
		hardclock((struct clockframe *)&faketf);
	}

	if (ci->ci_s_sxip != 0) {
		faketf.tf_cpu = ci;
		faketf.tf_sxip = ci->ci_s_sxip;
		faketf.tf_epsr = ci->ci_s_epsr;
		ci->ci_s_sxip = 0;
		statclock((struct clockframe *)&faketf);
	}

	splx(s);
	__mp_unlock(&kernel_lock);
}
Exemplo n.º 3
0
void
softintr_biglock_wrap(void *arg)
{
	struct i386_soft_intrhand	*sih = arg;

	__mp_lock(&kernel_lock);
	(*sih->sih_fnwrap)(sih->sih_argwrap);
	__mp_unlock(&kernel_lock);
}
Exemplo n.º 4
0
/*
 * softintr_dispatch:
 *
 *	Process pending software interrupts.
 */
void
softintr_dispatch()
{
	struct alpha_soft_intr *asi;
	struct alpha_soft_intrhand *sih;
	u_int64_t n, i;

#if defined(MULTIPROCESSOR)
	__mp_lock(&kernel_lock);
#endif

	while ((n = atomic_loadlatch_ulong(&ssir, 0)) != 0) {
		for (i = 0; i < SI_NSOFT; i++) {
			if ((n & (1 << i)) == 0)
				continue;
	
			asi = &alpha_soft_intrs[i];

			for (;;) {
				mtx_enter(&asi->softintr_mtx);

				sih = TAILQ_FIRST(&asi->softintr_q);
				if (sih == NULL) {
					mtx_leave(&asi->softintr_mtx);
					break;
				}
				TAILQ_REMOVE(&asi->softintr_q, sih, sih_q);
				sih->sih_pending = 0;

				atomic_add_int(&uvmexp.softs, 1);

				mtx_leave(&asi->softintr_mtx);

				(*sih->sih_fn)(sih->sih_arg);
			}
		}
	}

#if defined(MULTIPROCESSOR)
	__mp_unlock(&kernel_lock);
#endif
}
Exemplo n.º 5
0
/*
 * Interrupt dispatcher.
 */
uint32_t
obio_iointr(uint32_t hwpend, struct trap_frame *frame)
{
	struct cpu_info *ci = curcpu();
	int cpuid = cpu_number();
	uint64_t imr, isr, mask;
	int ipl;
	int bit;
	struct intrhand *ih;
	int rc;
	uint64_t sum0 = CIU_IP2_SUM0(cpuid);
	uint64_t en0 = CIU_IP2_EN0(cpuid);

	isr = bus_space_read_8(&obio_tag, obio_h, sum0);
	imr = bus_space_read_8(&obio_tag, obio_h, en0);
	bit = 63;

	isr &= imr;
	if (isr == 0)
		return 0;	/* not for us */

	/*
	 * Mask all pending interrupts.
	 */
	bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr);

	/*
	 * If interrupts are spl-masked, mask them and wait for splx()
	 * to reenable them when necessary.
	 */
	if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) {
		isr &= ~mask;
		imr &= ~mask;
	}

	/*
	 * Now process allowed interrupts.
	 */
	if (isr != 0) {
		int lvl, bitno;
		uint64_t tmpisr;

		__asm__ (".set noreorder\n");
		ipl = ci->ci_ipl;
		__asm__ ("sync\n\t.set reorder\n");

		/* Service higher level interrupts first */
		for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) {
			tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]);
			if (tmpisr == 0)
				continue;
			for (bitno = bit, mask = 1UL << bitno; mask != 0;
			    bitno--, mask >>= 1) {
				if ((tmpisr & mask) == 0)
					continue;

				rc = 0;
				for (ih = (struct intrhand *)obio_intrhand[bitno];
					ih != NULL;
				    ih = ih->ih_next) {
#ifdef MULTIPROCESSOR
					u_int32_t sr;
#endif
					splraise(ih->ih_level);
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						sr = getsr();
						ENABLEIPI();
						if (ipl < IPL_SCHED)
							__mp_lock(&kernel_lock);
					}
#endif
					if ((*ih->ih_fun)(ih->ih_arg) != 0) {
						rc = 1;
						atomic_add_uint64(&ih->ih_count.ec_count, 1);
					}
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						if (ipl < IPL_SCHED)
							__mp_unlock(&kernel_lock);
						setsr(sr);
					}
#endif
					__asm__ (".set noreorder\n");
					ci->ci_ipl = ipl;
					__asm__ ("sync\n\t.set reorder\n");
				}
				if (rc == 0)
					printf("spurious crime interrupt %d\n", bitno);

				isr ^= mask;
				if ((tmpisr ^= mask) == 0)
					break;
			}
		}

		/*
		 * Reenable interrupts which have been serviced.
		 */
		bus_space_write_8(&obio_tag, obio_h, en0, imr);
	}
Exemplo n.º 6
0
int
m197_ipi_handler(struct trapframe *eframe)
{
	struct cpu_info *ci = curcpu();
	int ipi;
	u_int32_t arg1, arg2;

	if ((ipi = atomic_clear_int(&ci->ci_ipi)) == 0)
		return 1;

	/*
	 * Synchronous IPIs. There can only be one pending at the same time,
	 * sending processor will wait for us to have processed the current
	 * one before sending a new one.
	 * We process them ASAP, ignoring any other pending ipi - sender will
	 * take care of resending an ipi if necessary.
	 */
	if (ipi & CI_IPI_SYNCHRONOUS) {
		/* no need to use atomic ops, the other cpu waits */
		/* leave asynchronous ipi pending */
		ci->ci_ipi = ipi & ~CI_IPI_SYNCHRONOUS;

		arg1 = ci->ci_ipi_arg1;
		arg2 = ci->ci_ipi_arg2;

		if (ipi & CI_IPI_TLB_FLUSH_KERNEL) {
			cmmu_tlbis(ci->ci_cpuid, arg1, arg2);
		}
		else if (ipi & CI_IPI_TLB_FLUSH_USER) {
			cmmu_tlbiu(ci->ci_cpuid, arg1, arg2);
		}
		else if (ipi & CI_IPI_CACHE_FLUSH) {
			cmmu_cache_wbinv(ci->ci_cpuid, arg1, arg2);
		}
		else if (ipi & CI_IPI_ICACHE_FLUSH) {
			cmmu_icache_inv(ci->ci_cpuid, arg1, arg2);
		}
		else if (ipi & CI_IPI_DMA_CACHECTL) {
			dma_cachectl_local(arg1, arg2, DMA_CACHE_INV);
		}

		return 0;
	}

	/*
	 * Asynchronous IPIs. We can have as many bits set as possible.
	 */

	if (ipi & CI_IPI_CLOCK) {
		/*
		 * Even if the current spl level would allow it, we can
		 * not run the clock handlers from there because we would
		 * need to grab the kernel lock, which might already
		 * held by the other processor.
		 *
		 * Instead, schedule a soft interrupt. But remember the
		 * important fields from the exception frame first, so
		 * that a valid clockframe can be reconstructed from the
		 * soft interrupt handler (which can not get an exception
		 * frame).
		 */
		if (ipi & CI_IPI_HARDCLOCK) {
			ci->ci_h_sxip = eframe->tf_sxip;
			ci->ci_h_epsr = eframe->tf_epsr;
		}
		if (ipi & CI_IPI_STATCLOCK) {
			ci->ci_s_sxip = eframe->tf_sxip;
			ci->ci_s_epsr = eframe->tf_epsr;
		}

		/* inflict ourselves a soft ipi */
		ci->ci_softipi_cb = m197_soft_ipi;
	}

	if (ipi & CI_IPI_DDB) {
#ifdef DDB
		/*
		 * Another processor has entered DDB. Spin on the ddb lock
		 * until it is done.
		 */
		extern struct __mp_lock ddb_mp_lock;

		ci->ci_ddb_state = CI_DDB_PAUSE;

		__mp_lock(&ddb_mp_lock);
		__mp_unlock(&ddb_mp_lock);

		ci->ci_ddb_state = CI_DDB_RUNNING;

		/*
		 * If ddb is hoping to us, it's our turn to enter ddb now.
		 */
		if (ci->ci_cpuid == ddb_mp_nextcpu)
			Debugger();
#endif
	}
	if (ipi & CI_IPI_NOTIFY) {
		/* nothing to do! */
	}

	return 1;
}
Exemplo n.º 7
0
/*
 * Device interrupt handler for MVME197
 */
void
m197_intr(struct trapframe *eframe)
{
	u_int32_t psr;
	int level;
	struct intrhand *intr;
	intrhand_t *list;
	int ret;
	vaddr_t ivec;
	u_int8_t vec;

#ifdef MULTIPROCESSOR
	if (eframe->tf_mask < IPL_SCHED)
		__mp_lock(&kernel_lock);
#endif

	uvmexp.intrs++;

	level = *(u_int8_t *)M197_ILEVEL & 0x07;
	/* generate IACK and get the vector */
	ivec = M197_IACK + (level << 2) + 0x03;
	vec = *(volatile u_int8_t *)ivec;

	/* block interrupts at level or lower */
	m197_setipl(level);
	psr = get_psr();
	set_psr(psr & ~PSR_IND);

	list = &intr_handlers[vec];
	if (SLIST_EMPTY(list))
		printf("Spurious interrupt (level %x and vec %x)\n",
		    level, vec);

	/*
	 * Walk through all interrupt handlers in the chain for the
	 * given vector, calling each handler in turn, till some handler
	 * returns a value != 0.
	 */

	ret = 0;
	SLIST_FOREACH(intr, list, ih_link) {
		if (intr->ih_wantframe != 0)
			ret = (*intr->ih_fn)((void *)eframe);
		else
			ret = (*intr->ih_fn)(intr->ih_arg);
		if (ret != 0) {
			intr->ih_count.ec_count++;
			break;
		}
	}

	if (ret == 0) {
		printf("Unclaimed interrupt (level %x and vec %x)\n",
		    level, vec);
	}

#if 0
	/*
	 * Disable interrupts before returning to assembler,
	 * the spl will be restored later.
	 */
	set_psr(psr | PSR_IND);
#endif

#ifdef MULTIPROCESSOR
	if (eframe->tf_mask < IPL_SCHED)
		__mp_unlock(&kernel_lock);
#endif
}
Exemplo n.º 8
0
/*
 * Interrupt dispatcher.
 */
uint32_t
INTR_FUNCTIONNAME(uint32_t hwpend, struct trap_frame *frame)
{
	struct cpu_info *ci = curcpu();
	uint64_t imr, isr, mask;
	int ipl;
	int bit;
	struct intrhand *ih;
	int rc, ret;
	INTR_LOCAL_DECLS

	INTR_GETMASKS;

	isr &= imr;
	if (isr == 0)
		return 0;	/* not for us */

	/*
	 * Mask all pending interrupts.
	 */
	INTR_MASKPENDING;

	/*
	 * If interrupts are spl-masked, mask them and wait for splx()
	 * to reenable them when necessary.
	 */
	if ((mask = isr & INTR_IMASK(frame->ipl)) != 0) {
		isr &= ~mask;
		imr &= ~mask;
	}

	/*
	 * Now process allowed interrupts.
	 */
	if (isr != 0) {
		int lvl, bitno;
		uint64_t tmpisr;

		__asm__ (".set noreorder\n");
		ipl = ci->ci_ipl;
		__asm__ ("sync\n\t.set reorder\n");

		/* Service higher level interrupts first */
		for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) {
			tmpisr = isr & (INTR_IMASK(lvl) ^ INTR_IMASK(lvl - 1));
			if (tmpisr == 0)
				continue;
			for (bitno = bit, mask = 1UL << bitno; mask != 0;
			    bitno--, mask >>= 1) {
				if ((tmpisr & mask) == 0)
					continue;

				rc = 0;
				for (ih = INTR_HANDLER(bitno); ih != NULL;
				    ih = ih->ih_next) {
#ifdef MULTIPROCESSOR
					u_int32_t sr;
#endif
#if defined(INTR_HANDLER_SKIP)
					if (INTR_HANDLER_SKIP(ih) != 0)
						continue;
#endif
					splraise(ih->ih_level);
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						sr = getsr();
						ENABLEIPI();
						if (ipl < IPL_SCHED)
							__mp_lock(&kernel_lock);
					}
#endif
					ret = (*ih->ih_fun)(ih->ih_arg);
					if (ret != 0) {
						rc = 1;
						atomic_add_uint64(&ih->ih_count.ec_count, 1);
					}
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						if (ipl < IPL_SCHED)
							__mp_unlock(&kernel_lock);
						setsr(sr);
					}
#endif
					__asm__ (".set noreorder\n");
					ci->ci_ipl = ipl;
					__asm__ ("sync\n\t.set reorder\n");
					if (ret == 1)
						break;
				}
				if (rc == 0)
					INTR_SPURIOUS(bitno);

				isr ^= mask;
				if ((tmpisr ^= mask) == 0)
					break;
			}
		}

		/*
		 * Reenable interrupts which have been serviced.
		 */
		INTR_MASKRESTORE;
	}