示例#1
0
/*
 * NOTE: This routine must be called with interrupts disabled in the CPSR.
 */
static void
at91aic_calculate_masks(void)
{
	struct intrq *iq;
	struct intrhand *ih;
	int irq, ipl;

	/* First, figure out which IPLs each IRQ has. */
	for (irq = 0; irq < NIRQ; irq++) {
		int levels = 0;
		iq = &intrq[irq];
		at91_disable_irq(irq);
		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
		     ih = TAILQ_NEXT(ih, ih_list))
			levels |= (1U << ih->ih_ipl);
		iq->iq_levels = levels;
	}

	/* Next, figure out which IRQs are used by each IPL. */
	for (ipl = 0; ipl < NIPL; ipl++) {
		int aic_irqs = 0;
		for (irq = 0; irq < AIC_NIRQ; irq++) {
			if (intrq[irq].iq_levels & (1U << ipl))
				aic_irqs |= (1U << irq);
		}
		aic_imask[ipl] = aic_irqs;
	}

	aic_imask[IPL_NONE] = 0;

	/*
	 * splvm() blocks all interrupts that use the kernel memory
	 * allocation facilities.
	 */
	aic_imask[IPL_VM] |= aic_imask[IPL_NONE];

	/*
	 * splclock() must block anything that uses the scheduler.
	 */
	aic_imask[IPL_CLOCK] |= aic_imask[IPL_VM];

	/*
	 * splhigh() must block "everything".
	 */
	aic_imask[IPL_HIGH] |= aic_imask[IPL_CLOCK];

	/*
	 * Now compute which IRQs must be blocked when servicing any
	 * given IRQ.
	 */
	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
		iq = &intrq[irq];
		if (TAILQ_FIRST(&iq->iq_list) != NULL)
			at91_enable_irq(irq);
	}
	/*
	 * update current mask
	 */
	at91_set_intrmask(aic_imask[curcpl()]);
}
/*
 * NOTE: This routine must be called with interrupts disabled in the CPSR.
 */
static void
at91aic_calculate_masks(void)
{
	struct intrq *iq;
	struct intrhand *ih;
	int irq, ipl;

	/* First, figure out which IPLs each IRQ has. */
	for (irq = 0; irq < NIRQ; irq++) {
		int levels = 0;
		iq = &intrq[irq];
		at91_disable_irq(irq);
		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
		     ih = TAILQ_NEXT(ih, ih_list))
			levels |= (1U << ih->ih_ipl);
		iq->iq_levels = levels;
	}

	/* Next, figure out which IRQs are used by each IPL. */
	for (ipl = 0; ipl < NIPL; ipl++) {
		int aic_irqs = 0;
		for (irq = 0; irq < AIC_NIRQ; irq++) {
			if (intrq[irq].iq_levels & (1U << ipl))
				aic_irqs |= (1U << irq);
		}
		aic_imask[ipl] = aic_irqs;
	}

	/* IPL_NONE must open up all interrupts */
	KASSERT(aic_imask[IPL_NONE] == 0);
	KASSERT(aic_imask[IPL_SOFTCLOCK] == 0);
	KASSERT(aic_imask[IPL_SOFTBIO] == 0);
	KASSERT(aic_imask[IPL_SOFTNET] == 0);
	KASSERT(aic_imask[IPL_SOFTSERIAL] == 0);

	/*
	 * Enforce a hierarchy that gives "slow" device (or devices with
	 * limited input buffer space/"real-time" requirements) a better
	 * chance at not dropping data.
	 */
	aic_imask[IPL_SCHED] |= aic_imask[IPL_VM];
	aic_imask[IPL_HIGH] |= aic_imask[IPL_SCHED];

	/*
	 * Now compute which IRQs must be blocked when servicing any
	 * given IRQ.
	 */
	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
		iq = &intrq[irq];
		if (TAILQ_FIRST(&iq->iq_list) != NULL)
			at91_enable_irq(irq);
	}
	/*
	 * update current mask
	 */
	at91_set_intrmask(aic_imask[curcpl()]);
}