void cpu_intr(int ppl, vaddr_t pc, uint32_t status) { struct cpu_info * const ci = curcpu(); uint32_t pending; int ipl; #ifdef DIAGNOSTIC const int mtx_count = ci->ci_mtx_count; const u_int biglock_count = ci->ci_biglock_count; const u_int blcnt = curlwp->l_blcnt; #endif KASSERT(ci->ci_cpl == IPL_HIGH); KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); ci->ci_data.cpu_nintr++; while (ppl < (ipl = splintr(&pending))) { KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); splx(ipl); /* lower to interrupt level */ KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); KASSERTMSG(ci->ci_cpl == ipl, "%s: cpl (%d) != ipl (%d)", __func__, ci->ci_cpl, ipl); KASSERT(pending != 0); cf.pc = pc; cf.sr = status; cf.intr = (ci->ci_idepth > 1); #ifdef MIPS3_ENABLE_CLOCK_INTR if (pending & MIPS_INT_MASK_5) { KASSERTMSG(ipl == IPL_SCHED, "%s: ipl (%d) != IPL_SCHED (%d)", __func__, ipl, IPL_SCHED); /* call the common MIPS3 clock interrupt handler */ mips3_clockintr(&cf); pending ^= MIPS_INT_MASK_5; } #endif if (pending != 0) { /* Process I/O and error interrupts. */ evbmips_iointr(ipl, pc, pending); } KASSERT(biglock_count == ci->ci_biglock_count); KASSERT(blcnt == curlwp->l_blcnt); KASSERT(mtx_count == ci->ci_mtx_count); /* * If even our spl is higher now (due to interrupting while * spin-lock is held and higher IPL spin-lock is locked, it * can no longer be locked so it's safe to lower IPL back * to ppl. */ (void) splhigh(); /* disable interrupts */ } KASSERT(ci->ci_cpl == IPL_HIGH); KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); }
void cpu_intr(int ppl, vaddr_t pc, uint32_t status) { uint32_t pending; int ipl; curcpu()->ci_data.cpu_nintr++; while (ppl < (ipl = splintr(&pending))) { splx(ipl); if (pending & MIPS_INT_MASK_5) { struct clockframe cf; cf.pc = pc; cf.sr = status; cf.intr = (curcpu()->ci_idepth > 1); mips3_clockintr(&cf); } if (pending & (MIPS_INT_MASK_0|MIPS_INT_MASK_1|MIPS_INT_MASK_2| MIPS_INT_MASK_3|MIPS_INT_MASK_4)) { /* Process I/O and error interrupts. */ (*algor_iointr)(ipl, pc, pending); } (void)splhigh(); } }
void mips3_clock_intr(vaddr_t pc, uint32_t status, uint32_t pending) { struct clockframe cf; cf.pc = pc; cf.sr = status; cf.intr = (curcpu()->ci_idepth > 1); mips3_clockintr(&cf); }
void cpu_intr(u_int32_t status, u_int32_t cause, u_int32_t pc, u_int32_t ipending) { struct clockframe cf; struct cpu_info *ci; ci = curcpu(); ci->ci_idepth++; uvmexp.intrs++; if (ipending & MIPS_INT_MASK_5) { cf.pc = pc; cf.sr = status; mips3_clockintr(&cf); /* Re-enable clock interrupts. */ cause &= ~MIPS_INT_MASK_5; _splset(MIPS_SR_INT_IE | ((status & ~cause) & MIPS_HARD_INT_MASK)); } if (ipending & (MIPS_INT_MASK_0|MIPS_INT_MASK_1|MIPS_INT_MASK_2| MIPS_INT_MASK_3|MIPS_INT_MASK_4)) { /* Process I/O and error interrupts. */ (*algor_iointr)(status, cause, pc, ipending); } ci->ci_idepth--; #ifdef __HAVE_FAST_SOFTINTS ipending &= (MIPS_SOFT_INT_MASK_1|MIPS_SOFT_INT_MASK_0); if (ipending == 0) return; _clrsoftintr(ipending); softintr_dispatch(ipending); #endif }