void ppc_smask_init() { int i; for (i = IPL_NONE; i <= IPL_HIGH; i++) { ppc_smask[i] = 0; if (i < IPL_SOFTCLOCK) ppc_smask[i] |= SI_TO_IRQBIT(SI_SOFTCLOCK); if (i < IPL_SOFTNET) ppc_smask[i] |= SI_TO_IRQBIT(SI_SOFTNET); if (i < IPL_SOFTTTY) ppc_smask[i] |= SI_TO_IRQBIT(SI_SOFTTTY); } }
void arm_init_smask(void) { static int inited = 0; int i; if (inited) return; inited = 1; for (i = IPL_NONE; i <= IPL_HIGH; i++) { arm_smask[i] = 0; if (i < IPL_SOFT) arm_smask[i] |= SI_TO_IRQBIT(SI_SOFT); if (i < IPL_SOFTCLOCK) arm_smask[i] |= SI_TO_IRQBIT(SI_SOFTCLOCK); if (i < IPL_SOFTNET) arm_smask[i] |= SI_TO_IRQBIT(SI_SOFTNET); if (i < IPL_SOFTTTY) arm_smask[i] |= SI_TO_IRQBIT(SI_SOFTTTY); } }
/* * Schedule a software interrupt. */ void softintr_schedule(void *arg) { struct soft_intrhand *sih = (struct soft_intrhand *)arg; struct soft_intrq *siq = sih->sih_siq; struct cpu_info *ci = curcpu(); mtx_enter(&siq->siq_mtx); if (sih->sih_pending == 0) { TAILQ_INSERT_TAIL(&siq->siq_list, sih, sih_list); sih->sih_pending = 1; atomic_setbits_int(&ci->ci_ipending, SI_TO_IRQBIT(siq->siq_si)); } mtx_leave(&siq->siq_mtx); }
void arm_setsoftintr(int si) { struct cpu_info *ci = curcpu(); int oldirqstate; /* XXX atomic? */ oldirqstate = disable_interrupts(I32_bit); ci->ci_ipending |= SI_TO_IRQBIT(si); restore_interrupts(oldirqstate); /* Process unmasked pending soft interrupts. */ if (ci->ci_ipending & arm_smask[ci->ci_cpl]) arm_do_pending_intr(ci->ci_cpl); }
void dosoftint(int pcpl) { struct cpu_info *ci = curcpu(); int sir, q, mask; ppc_intr_enable(1); KERNEL_LOCK(); while ((sir = (ci->ci_ipending & ppc_smask[pcpl])) != 0) { atomic_clearbits_int(&ci->ci_ipending, sir); for (q = SI_NQUEUES - 1; q >= 0; q--) { mask = SI_TO_IRQBIT(q); if (sir & mask) softintr_dispatch(q); } } KERNEL_UNLOCK(); (void)ppc_intr_disable(); }