示例#1
0
/*
 * Process pending interrupts
 */
void
splz(void)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;
	int irq;

	while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) {
		crit_enter_quick(td);
		if (gd->mi.gd_reqflags & RQF_IPIQ) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ);
			lwkt_process_ipiq();
		}
		if (gd->mi.gd_reqflags & RQF_INTPEND) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND);
			while ((irq = ffs(gd->gd_spending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_spending, 1 << irq);
				irq += FIRST_SOFTINT;
				sched_ithd_soft(irq);
			}
			while ((irq = ffs(gd->gd_fpending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_fpending, 1 << irq);
				sched_ithd_hard_virtual(irq);
			}
		}
		crit_exit_noyield(td);
	}
}
示例#2
0
/*
 * Allows an unprotected signal handler or mailbox to signal an interrupt
 *
 * For sched_ithd_hard_virtaul() to properly preempt via lwkt_schedule() we
 * cannot enter a critical section here.  We use td_nest_count instead.
 */
void
signalintr(int intr)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;

	if (td->td_critcount || td->td_nest_count) {
		atomic_set_int_nonlocked(&gd->gd_fpending, 1 << intr);
		atomic_set_int(&gd->mi.gd_reqflags, RQF_INTPEND);
	} else {
		++td->td_nest_count;
		atomic_clear_int(&gd->gd_fpending, 1 << intr);
		sched_ithd_hard_virtual(intr);
		--td->td_nest_count;
	}
}