u_long scb_alloc(void (*func)(void *, u_long), void *arg) { u_long vec, idx; int s; s = splhigh(); /* * Allocate "downwards", to avoid bumping into * interrupts which are likely to be at the lower * vector numbers. */ for (vec = SCB_SIZE - SCB_VECSIZE; vec >= SCB_IOVECBASE; vec -= SCB_VECSIZE) { idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); if (scb_iovectab[idx].scb_func == scb_stray) { scb_iovectab[idx].scb_func = func; scb_iovectab[idx].scb_arg = arg; splx(s); return (vec); } } splx(s); return (SCB_ALLOC_FAILED); }
void sio_iointr(void *arg, unsigned long vec) { int irq; irq = SCB_VECTOIDX(vec - 0x800); #ifdef DIAGNOSTIC if (irq > ICU_LEN || irq < 0) panic("sio_iointr: irq out of range (%d)", irq); #endif if (!alpha_shared_intr_dispatch(sio_intr, irq)) alpha_shared_intr_stray(sio_intr, irq, "isa irq"); else alpha_shared_intr_reset_strays(sio_intr, irq); /* * Some versions of the machines which use the SIO * (or is it some PALcode revisions on those machines?) * require the non-specific EOI to be fed to the PIC(s) * by the interrupt handler. */ specific_eoi(irq); }
void jensenio_iointr(void *framep, u_long vec) { int irq; irq = SCB_VECTOIDX(vec - 0x800); if (!alpha_shared_intr_dispatch(jensenio_eisa_intr, irq)) alpha_shared_intr_stray(jensenio_eisa_intr, irq, "eisa irq"); jensenio_specific_eoi(irq); }
void eb66_iointr(void *arg, unsigned long vec) { int irq; irq = SCB_VECTOIDX(vec - 0x900); if (!alpha_shared_intr_dispatch(eb66_pci_intr, irq)) { alpha_shared_intr_stray(eb66_pci_intr, irq, "eb66 irq"); if (ALPHA_SHARED_INTR_DISABLE(eb66_pci_intr, irq)) eb66_intr_disable(irq); } else alpha_shared_intr_reset_strays(eb66_pci_intr, irq); }
static void dec_1000a_iointr(void *framep, unsigned long vec) { int irq; irq = SCB_VECTOIDX(vec - 0x900); if (!alpha_shared_intr_dispatch(dec_1000a_pci_intr, irq)) { alpha_shared_intr_stray(dec_1000a_pci_intr, irq, "dec_1000a irq"); if (ALPHA_SHARED_INTR_DISABLE(dec_1000a_pci_intr, irq)) dec_1000a_disable_intr(irq); } else alpha_shared_intr_reset_strays(dec_1000a_pci_intr, irq); }
void dec_550_iointr(void *arg, unsigned long vec) { int irq; irq = SCB_VECTOIDX(vec - 0x900); if (irq >= DEC_550_MAX_IRQ) panic("550_iointr: vec 0x%lx out of range", vec); if (!alpha_shared_intr_dispatch(dec_550_pci_intr, irq)) { alpha_shared_intr_stray(dec_550_pci_intr, irq, "dec 550 irq"); if (ALPHA_SHARED_INTR_DISABLE(dec_550_pci_intr, irq)) dec_550_intr_disable(irq); } else alpha_shared_intr_reset_strays(dec_550_pci_intr, irq); }
void scb_free(u_long vec) { u_long idx; int s; s = splhigh(); if (vec < SCB_IOVECBASE || vec >= SCB_SIZE || (vec & (SCB_VECSIZE - 1)) != 0) panic("scb_free: bad vector 0x%lx", vec); idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); if (scb_iovectab[idx].scb_func == scb_stray) panic("scb_free: vector 0x%lx is empty", vec); scb_iovectab[idx].scb_func = scb_stray; scb_iovectab[idx].scb_arg = (void *) vec; splx(s); }
void scb_set(u_long vec, void (*func)(void *, u_long), void *arg) { u_long idx; int s; s = splhigh(); if (vec < SCB_IOVECBASE || vec >= SCB_SIZE || (vec & (SCB_VECSIZE - 1)) != 0) panic("scb_set: bad vector 0x%lx", vec); idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); if (scb_iovectab[idx].scb_func != scb_stray) panic("scb_set: vector 0x%lx already occupied", vec); scb_iovectab[idx].scb_func = func; scb_iovectab[idx].scb_arg = arg; splx(s); }
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct cpu_info *ci = curcpu(); extern int schedhz; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) atomic_add_ulong(&ci->ci_intrdepth, 1); alpha_ipi_process(ci, framep); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); atomic_sub_ulong(&ci->ci_intrdepth, 1); #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ atomic_add_int(&uvmexp.intrs, 1); if (CPU_IS_PRIMARY(ci)) clk_count.ec_count++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 && schedhz != 0) schedclock(ci->ci_curproc); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ atomic_add_ulong(&ci->ci_intrdepth, 1); a0 = alpha_pal_rdmces(); if (platform.mcheck_handler) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); atomic_sub_ulong(&ci->ci_intrdepth, 1); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); atomic_add_ulong(&ci->ci_intrdepth, 1); atomic_add_int(&uvmexp.intrs, 1); scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; (*scb->scb_func)(scb->scb_arg, a1); atomic_sub_ulong(&ci->ci_intrdepth, 1); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }
#include <machine/atomic.h> #include <machine/autoconf.h> #include <machine/cpu.h> #include <machine/reg.h> #include <machine/rpb.h> #include <machine/frame.h> #include <machine/cpuconf.h> #include "apecs.h" #include "cia.h" #include "lca.h" #include "tcasic.h" extern struct evcount clk_count; struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]; void scb_stray(void *, u_long); /* * True if the system has any non-level interrupts which are shared * on the same pin. */ int intr_shared_edge; void scb_init(void) { u_long i; for (i = 0; i < SCB_NIOVECS; i++) {
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct proc *p; struct cpu_info *ci = curcpu(); extern int schedhz; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) { u_long pending_ipis, bit; #if 0 printf("CPU %lu got IPI\n", cpu_id); #endif #ifdef DIAGNOSTIC if (ci->ci_dev == NULL) { /* XXX panic? */ printf("WARNING: no device for ID %lu\n", ci->ci_cpuid); return; } #endif pending_ipis = atomic_loadlatch_ulong(&ci->ci_ipis, 0); for (bit = 0; bit < ALPHA_NIPIS; bit++) if (pending_ipis & (1UL << bit)) (*ipifuncs[bit])(); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); } #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ #if defined(MULTIPROCESSOR) /* XXX XXX XXX */ if (CPU_IS_PRIMARY(ci) == 0) return; #endif uvmexp.intrs++; clk_count.ec_count++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++schedclk2 & 0x3f) == 0 && (p = ci->ci_curproc) != NULL && schedhz != 0) schedclock(p); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ a0 = alpha_pal_rdmces(); if (platform.mcheck_handler) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); #if defined(MULTIPROCESSOR) /* XXX XXX XXX */ if (CPU_IS_PRIMARY(ci) == 0) return; #endif uvmexp.intrs++; scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; (*scb->scb_func)(scb->scb_arg, a1); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct cpu_info *ci = curcpu(); struct cpu_softc *sc = ci->ci_softc; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) atomic_inc_ulong(&ci->ci_intrdepth); alpha_ipi_process(ci, framep); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); atomic_dec_ulong(&ci->ci_intrdepth); #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ /* * We don't increment the interrupt depth for the * clock interrupt, since it is *sampled* from * the clock interrupt, so if we did, all system * time would be counted as interrupt time. */ sc->sc_evcnt_clock.ev_count++; ci->ci_data.cpu_nintr++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 && schedhz != 0) schedclock(ci->ci_curlwp); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ atomic_inc_ulong(&ci->ci_intrdepth); a0 = alpha_pal_rdmces(); if (platform.mcheck_handler != NULL && (void *)framep->tf_regs[FRAME_PC] != XentArith) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); atomic_dec_ulong(&ci->ci_intrdepth); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; int idx = SCB_VECTOIDX(a1 - SCB_IOVECBASE); bool mpsafe = scb_mpsafe[idx]; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); atomic_inc_ulong(&sc->sc_evcnt_device.ev_count); atomic_inc_ulong(&ci->ci_intrdepth); if (!mpsafe) { KERNEL_LOCK(1, NULL); } ci->ci_data.cpu_nintr++; scb = &scb_iovectab[idx]; (*scb->scb_func)(scb->scb_arg, a1); if (!mpsafe) KERNEL_UNLOCK_ONE(NULL); atomic_dec_ulong(&ci->ci_intrdepth); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }
#include <sys/kernel.h> #include <sys/time.h> #include <sys/intr.h> #include <sys/device.h> #include <sys/cpu.h> #include <sys/atomic.h> #include <machine/cpuvar.h> #include <machine/autoconf.h> #include <machine/reg.h> #include <machine/rpb.h> #include <machine/frame.h> #include <machine/cpuconf.h> #include <machine/alpha.h> struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]; static bool scb_mpsafe[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]; void netintr(void); void scb_stray(void *, u_long); void scb_init(void) { u_long i; for (i = 0; i < SCB_NIOVECS; i++) { scb_iovectab[i].scb_func = scb_stray; scb_iovectab[i].scb_arg = NULL; }