static int nexus_bind_intr(device_t bus __unused, device_t child __unused, struct resource *r, int cpu) { return (powerpc_bind_intr(rman_get_start(r), cpu)); }
t_Error XX_SetIntr(uintptr_t irq, t_Isr *f_Isr, t_Handle handle) { device_t dev; struct resource *r; unsigned int flags; int err; r = (struct resource *)irq; dev = rman_get_device(r); irq = rman_get_start(r); /* Handle preallocated interrupts */ if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { if (XX_IntrInfo[irq].handler != NULL) return (E_BUSY); XX_IntrInfo[irq].handler = f_Isr; XX_IntrInfo[irq].arg = handle; return (E_OK); } flags = INTR_TYPE_NET | INTR_MPSAFE; /* BMAN/QMAN Portal interrupts must be exlusive */ if (XX_IsPortalIntr(irq)) flags |= INTR_EXCL; err = bus_setup_intr(dev, r, flags, NULL, f_Isr, handle, &XX_IntrInfo[irq].cookie); if (err) goto finish; /* * XXX: Bind FMan IRQ to CPU0. Current interrupt subsystem directs each * interrupt to all CPUs. Race between an interrupt assertion and * masking may occur and interrupt handler may be called multiple times * per one interrupt. FMan doesn't support such a situation. Workaround * is to bind FMan interrupt to one CPU0 only. */ #ifdef SMP if (XX_FmanNeedsIntrFix(irq)) err = powerpc_bind_intr(irq, 0); #endif finish: return (err); }
static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (powerpc_bind_intr(rman_get_start(irq), cpu)); }