static __inline__ int apix_do_pending_hardint(struct cpu *cpu, struct regs *rp) { volatile uint16_t pending; uint_t newipl, oldipl; caddr_t newsp; while ((pending = LOWLEVEL_PENDING(cpu)) != 0) { newipl = bsrw_insn(pending); ASSERT(newipl <= LOCK_LEVEL); oldipl = cpu->cpu_pri; if (newipl <= oldipl || newipl <= cpu->cpu_base_spl) return (-1); /* * Run this interrupt in a separate thread. */ newsp = apix_intr_thread_prolog(cpu, newipl, (caddr_t)rp); ASSERT(newsp != NULL); switch_sp_and_call(newsp, apix_dispatch_pending_hardint, oldipl, 0); } return (0); }
static __inline__ int apix_do_pending_hilevel(struct cpu *cpu, struct regs *rp) { volatile uint16_t pending; uint_t newipl, oldipl; caddr_t newsp; while ((pending = HILEVEL_PENDING(cpu)) != 0) { newipl = bsrw_insn(pending); ASSERT(newipl > LOCK_LEVEL && newipl > cpu->cpu_base_spl); oldipl = cpu->cpu_pri; if (newipl <= oldipl) return (-1); /* * High priority interrupts run on this cpu's interrupt stack. */ if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) { newsp = cpu->cpu_intr_stack; switch_sp_and_call(newsp, apix_dispatch_pending_hilevel, newipl, 0); } else { /* already on the interrupt stack */ apix_dispatch_pending_hilevel(newipl, 0); } (void) apix_hilevel_intr_epilog(cpu, oldipl); } return (0); }
/* * Deliver any softints the current interrupt priority allows. * Called with interrupts disabled. */ void dosoftint(struct regs *regs) { struct cpu *cpu = CPU; int oldipl; caddr_t newsp; while (cpu->cpu_softinfo.st_pending) { oldipl = cpu->cpu_pri; newsp = dosoftint_prolog(cpu, (caddr_t)regs, cpu->cpu_softinfo.st_pending, oldipl); /* * If returned stack pointer is NULL, priority is too high * to run any of the pending softints now. * Break out and they will be run later. */ if (newsp == NULL) break; switch_sp_and_call(newsp, dispatch_softint, oldipl, 0); } }
/* * Deliver any softints the current interrupt priority allows. * Called with interrupts disabled. */ int apix_do_softint(struct regs *regs) { struct cpu *cpu = CPU; int oldipl; int newipl; volatile uint16_t pending; caddr_t newsp; while ((pending = cpu->cpu_softinfo.st_pending) != 0) { newipl = bsrw_insn(pending); oldipl = cpu->cpu_pri; if (newipl <= oldipl || newipl <= cpu->cpu_base_spl) return (-1); newsp = apix_do_softint_prolog(cpu, newipl, oldipl, (caddr_t)regs); ASSERT(newsp != NULL); switch_sp_and_call(newsp, apix_dispatch_softint, oldipl, 0); } return (0); }
/* * Interrupt service routine, called with interrupts disabled. */ void apix_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp) { struct cpu *cpu = CPU; int vector = rp->r_trapno, newipl, oldipl = cpu->cpu_pri, ret; apix_vector_t *vecp = NULL; #ifdef TRAPTRACE ttp->ttr_marker = TT_INTERRUPT; ttp->ttr_cpuid = cpu->cpu_id; ttp->ttr_ipl = 0xff; ttp->ttr_pri = (uchar_t)oldipl; ttp->ttr_spl = cpu->cpu_base_spl; ttp->ttr_vector = 0xff; #endif /* TRAPTRACE */ cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR); ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp; /* * If it's a softint go do it now. */ if (rp->r_trapno == T_SOFTINT) { /* * It might be the case that when an interrupt is triggered, * the spl is raised to high by splhigh(). Later when do_splx() * is called to restore the spl, both hardware and software * interrupt pending flags are check and an SOFTINT is faked * accordingly. */ (void) apix_do_pending_hilevel(cpu, rp); (void) apix_do_pending_hardint(cpu, rp); (void) apix_do_softint(rp); ASSERT(!interrupts_enabled()); #ifdef TRAPTRACE ttp->ttr_vector = T_SOFTINT; #endif return; } /* * Send EOI to local APIC */ newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno); #ifdef TRAPTRACE ttp->ttr_ipl = (uchar_t)newipl; #endif /* TRAPTRACE */ /* * Bail if it is a spurious interrupt */ if (newipl == -1) return; vector = rp->r_trapno; vecp = xv_vector(cpu->cpu_id, vector); #ifdef TRAPTRACE ttp->ttr_vector = (short)vector; #endif /* TRAPTRACE */ /* * Direct dispatch for IPI, MSI, MSI-X */ if (vecp && vecp->v_type != APIX_TYPE_FIXED && newipl > MAX(oldipl, cpu->cpu_base_spl)) { caddr_t newsp; if (newipl > LOCK_LEVEL) { if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) { newsp = cpu->cpu_intr_stack; switch_sp_and_call(newsp, apix_dispatch_hilevel, vector, 0); } else { apix_dispatch_hilevel(vector, 0); } (void) apix_hilevel_intr_epilog(cpu, oldipl); } else { newsp = apix_intr_thread_prolog(cpu, newipl, (caddr_t)rp); switch_sp_and_call(newsp, apix_dispatch_lowlevel, vector, oldipl); } } else { /* Add to per-pil pending queue */ apix_add_pending_hardint(vector); if (newipl <= MAX(oldipl, cpu->cpu_base_spl) || !apixs[cpu->cpu_id]->x_intr_pending) return; } if (apix_do_pending_hilevel(cpu, rp) < 0) return; do { ret = apix_do_pending_hardint(cpu, rp); /* * Deliver any pending soft interrupts. */ (void) apix_do_softint(rp); } while (!ret && LOWLEVEL_PENDING(cpu)); }
/*ARGSUSED*/ void do_interrupt(struct regs *rp, trap_trace_rec_t *ttp) { struct cpu *cpu = CPU; int newipl, oldipl = cpu->cpu_pri; uint_t vector; caddr_t newsp; #ifdef TRAPTRACE ttp->ttr_marker = TT_INTERRUPT; ttp->ttr_ipl = 0xff; ttp->ttr_pri = oldipl; ttp->ttr_spl = cpu->cpu_base_spl; ttp->ttr_vector = 0xff; #endif /* TRAPTRACE */ cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR); ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp; /* * If it's a softint go do it now. */ if (rp->r_trapno == T_SOFTINT) { dosoftint(rp); ASSERT(!interrupts_enabled()); return; } /* * Raise the interrupt priority. */ newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno); #ifdef TRAPTRACE ttp->ttr_ipl = newipl; #endif /* TRAPTRACE */ /* * Bail if it is a spurious interrupt */ if (newipl == -1) return; cpu->cpu_pri = newipl; vector = rp->r_trapno; #ifdef TRAPTRACE ttp->ttr_vector = vector; #endif /* TRAPTRACE */ if (newipl > LOCK_LEVEL) { /* * High priority interrupts run on this cpu's interrupt stack. */ if (hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) { newsp = cpu->cpu_intr_stack; switch_sp_and_call(newsp, dispatch_hilevel, vector, 0); } else { /* already on the interrupt stack */ dispatch_hilevel(vector, 0); } (void) hilevel_intr_epilog(cpu, newipl, oldipl, vector); } else { /* * Run this interrupt in a separate thread. */ newsp = intr_thread_prolog(cpu, (caddr_t)rp, newipl); switch_sp_and_call(newsp, dispatch_hardint, vector, oldipl); } #if !defined(__xpv) /* * Deliver any pending soft interrupts. */ if (cpu->cpu_softinfo.st_pending) dosoftint(rp); #endif /* !__xpv */ }