Esempio n. 1
0
static inline void
__raise(int ipl)
{
	if (curcpl() < ipl) {
		s3c2xx0_setipl(ipl);
	}
}
Esempio n. 2
0
/*
 * NOTE: This routine must be called with interrupts disabled in the CPSR.
 */
static void
at91aic_calculate_masks(void)
{
	struct intrq *iq;
	struct intrhand *ih;
	int irq, ipl;

	/* First, figure out which IPLs each IRQ has. */
	for (irq = 0; irq < NIRQ; irq++) {
		int levels = 0;
		iq = &intrq[irq];
		at91_disable_irq(irq);
		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
		     ih = TAILQ_NEXT(ih, ih_list))
			levels |= (1U << ih->ih_ipl);
		iq->iq_levels = levels;
	}

	/* Next, figure out which IRQs are used by each IPL. */
	for (ipl = 0; ipl < NIPL; ipl++) {
		int aic_irqs = 0;
		for (irq = 0; irq < AIC_NIRQ; irq++) {
			if (intrq[irq].iq_levels & (1U << ipl))
				aic_irqs |= (1U << irq);
		}
		aic_imask[ipl] = aic_irqs;
	}

	aic_imask[IPL_NONE] = 0;

	/*
	 * splvm() blocks all interrupts that use the kernel memory
	 * allocation facilities.
	 */
	aic_imask[IPL_VM] |= aic_imask[IPL_NONE];

	/*
	 * splclock() must block anything that uses the scheduler.
	 */
	aic_imask[IPL_CLOCK] |= aic_imask[IPL_VM];

	/*
	 * splhigh() must block "everything".
	 */
	aic_imask[IPL_HIGH] |= aic_imask[IPL_CLOCK];

	/*
	 * Now compute which IRQs must be blocked when servicing any
	 * given IRQ.
	 */
	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
		iq = &intrq[irq];
		if (TAILQ_FIRST(&iq->iq_list) != NULL)
			at91_enable_irq(irq);
	}
	/*
	 * update current mask
	 */
	at91_set_intrmask(aic_imask[curcpl()]);
}
/*
 * NOTE: This routine must be called with interrupts disabled in the CPSR.
 */
static void
at91aic_calculate_masks(void)
{
	struct intrq *iq;
	struct intrhand *ih;
	int irq, ipl;

	/* First, figure out which IPLs each IRQ has. */
	for (irq = 0; irq < NIRQ; irq++) {
		int levels = 0;
		iq = &intrq[irq];
		at91_disable_irq(irq);
		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
		     ih = TAILQ_NEXT(ih, ih_list))
			levels |= (1U << ih->ih_ipl);
		iq->iq_levels = levels;
	}

	/* Next, figure out which IRQs are used by each IPL. */
	for (ipl = 0; ipl < NIPL; ipl++) {
		int aic_irqs = 0;
		for (irq = 0; irq < AIC_NIRQ; irq++) {
			if (intrq[irq].iq_levels & (1U << ipl))
				aic_irqs |= (1U << irq);
		}
		aic_imask[ipl] = aic_irqs;
	}

	/* IPL_NONE must open up all interrupts */
	KASSERT(aic_imask[IPL_NONE] == 0);
	KASSERT(aic_imask[IPL_SOFTCLOCK] == 0);
	KASSERT(aic_imask[IPL_SOFTBIO] == 0);
	KASSERT(aic_imask[IPL_SOFTNET] == 0);
	KASSERT(aic_imask[IPL_SOFTSERIAL] == 0);

	/*
	 * Enforce a hierarchy that gives "slow" device (or devices with
	 * limited input buffer space/"real-time" requirements) a better
	 * chance at not dropping data.
	 */
	aic_imask[IPL_SCHED] |= aic_imask[IPL_VM];
	aic_imask[IPL_HIGH] |= aic_imask[IPL_SCHED];

	/*
	 * Now compute which IRQs must be blocked when servicing any
	 * given IRQ.
	 */
	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
		iq = &intrq[irq];
		if (TAILQ_FIRST(&iq->iq_list) != NULL)
			at91_enable_irq(irq);
	}
	/*
	 * update current mask
	 */
	at91_set_intrmask(aic_imask[curcpl()]);
}
static void vidcvideo_queue_dc_change(struct fb_devconfig *dc, int dc_change)
{
	dc->_internal_dc_changed |= dc_change;

	if (curcpl() == IPL_HIGH) {
		/* running in ddb or without interrupts */
	    	dc->dc_writeback_delay = 1;
		flush_dc_changes_to_screen(dc);
	} else {
		/*
		 * running with interrupts so handle this in the next
		 * vsync
		 */ 
		if (dc->dc_ih) {
			enable_irq(IRQ_FLYBACK);
		}
	}
}
Esempio n. 5
0
			vic1_irqs |= vic1_imask[ih->ih_ipl];
			vic2_irqs |= vic2_imask[ih->ih_ipl];
		}
		iq->iq_vic1_mask = vic1_irqs;
		iq->iq_vic2_mask = vic2_irqs;
	}
}

inline void
splx(int new)
{
	int	old;
	u_int	oldirqstate;

	oldirqstate = disable_interrupts(I32_bit);
	old = curcpl();
	set_curcpl(new);
	if (new != hardware_spl_level) {
		hardware_spl_level = new;
		ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
	}
	restore_interrupts(oldirqstate);

#ifdef __HAVE_FAST_SOFTINTS
	cpu_dosoftints();
#endif
}

int
_splraise(int ipl)
{