int ddi_intr_hilevel(dev_info_t *dip, uint_t inumber) { ddi_intr_handle_t hdl; ddi_intr_handle_t *hdl_p; size_t hdl_sz = 0; int actual, ret; uint_t high_pri, pri; DDI_INTR_APIDBG((CE_CONT, "ddi_intr_hilevel: name=%s%d dip=0x%p " "inum=0x%x\n", ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, inumber)); /* * The device driver may have already registed with the * framework. If so, first try to get the existing interrupt handle * for that given inumber and use that handle. */ if ((hdl = i_ddi_get_intr_handle(dip, inumber)) == NULL) { hdl_sz = sizeof (ddi_intr_handle_t) * (inumber + 1); hdl_p = kmem_zalloc(hdl_sz, KM_SLEEP); if ((ret = ddi_intr_alloc(dip, hdl_p, DDI_INTR_TYPE_FIXED, inumber, 1, &actual, DDI_INTR_ALLOC_NORMAL)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_intr_hilevel: " "ddi_intr_alloc failed, ret 0x%x\n", ret)); kmem_free(hdl_p, hdl_sz); return (0); } hdl = hdl_p[inumber]; } if ((ret = ddi_intr_get_pri(hdl, &pri)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_intr_hilevel: " "ddi_intr_get_pri failed, ret 0x%x\n", ret)); (void) ddi_intr_free(hdl); if (hdl_sz) kmem_free(hdl_p, hdl_sz); return (0); } high_pri = ddi_intr_get_hilevel_pri(); DDI_INTR_APIDBG((CE_CONT, "ddi_intr_hilevel: pri = %x, " "high_pri = %x\n", pri, high_pri)); /* Free the handle allocated here only if no existing handle exists */ if (hdl_sz) { (void) ddi_intr_free(hdl); kmem_free(hdl_p, hdl_sz); } return (pri >= high_pri); }
static int virtio_fixed_intr_setup(virtionet_state_t *sp, ddi_intr_handler_t inthandler) { int rc; int nintr; uint_t pri; rc = ddi_intr_get_nintrs(sp->dip, DDI_INTR_TYPE_FIXED, &nintr); if (rc != DDI_SUCCESS) { return (DDI_FAILURE); } ASSERT(nintr == 1); rc = ddi_intr_alloc(sp->dip, &sp->ihandle, DDI_INTR_TYPE_FIXED, 0, 1, &nintr, DDI_INTR_ALLOC_NORMAL); if (rc != DDI_SUCCESS) { return (DDI_FAILURE); } ASSERT(nintr == 1); rc = ddi_intr_get_pri(sp->ihandle, &pri); if (rc != DDI_SUCCESS) { (void) ddi_intr_free(sp->ihandle); return (DDI_FAILURE); } /* Test for high level mutex */ if (pri >= ddi_intr_get_hilevel_pri()) { cmn_err(CE_WARN, "Hi level interrupt not supported"); (void) ddi_intr_free(sp->ihandle); return (DDI_FAILURE); } rc = ddi_intr_add_handler(sp->ihandle, inthandler, sp, NULL); if (rc != DDI_SUCCESS) { (void) ddi_intr_free(sp->ihandle); return (DDI_FAILURE); } rc = ddi_intr_enable(sp->ihandle); if (rc != DDI_SUCCESS) { (void) ddi_intr_remove_handler(sp->ihandle); (void) ddi_intr_free(sp->ihandle); return (DDI_FAILURE); } return (DDI_SUCCESS); }
/* * Autoconfiguration entry points. */ int efe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { ddi_acc_handle_t pci; int types; int count; int actual; uint_t pri; efe_t *efep; mac_register_t *macp; switch (cmd) { case DDI_ATTACH: break; case DDI_RESUME: efep = ddi_get_driver_private(dip); return (efe_resume(efep)); default: return (DDI_FAILURE); } /* * PCI configuration. */ if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { efe_error(dip, "unable to setup PCI configuration!"); return (DDI_FAILURE); } pci_config_put16(pci, PCI_CONF_COMM, pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_MAE | PCI_COMM_ME); pci_config_teardown(&pci); if (ddi_intr_get_supported_types(dip, &types) != DDI_SUCCESS || !(types & DDI_INTR_TYPE_FIXED)) { efe_error(dip, "fixed interrupts not supported!"); return (DDI_FAILURE); } if (ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &count) != DDI_SUCCESS || count != 1) { efe_error(dip, "no fixed interrupts available!"); return (DDI_FAILURE); } /* * Initialize soft state. */ efep = kmem_zalloc(sizeof (efe_t), KM_SLEEP); ddi_set_driver_private(dip, efep); efep->efe_dip = dip; if (ddi_regs_map_setup(dip, 1, (caddr_t *)&efep->efe_regs, 0, 0, &efe_regs_acc_attr, &efep->efe_regs_acch) != DDI_SUCCESS) { efe_error(dip, "unable to setup register mapping!"); goto failure; } efep->efe_rx_ring = efe_ring_alloc(efep->efe_dip, RXDESCL); if (efep->efe_rx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate rx ring!"); goto failure; } efep->efe_tx_ring = efe_ring_alloc(efep->efe_dip, TXDESCL); if (efep->efe_tx_ring == NULL) { efe_error(efep->efe_dip, "unable to allocate tx ring!"); goto failure; } if (ddi_intr_alloc(dip, &efep->efe_intrh, DDI_INTR_TYPE_FIXED, 0, count, &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS || actual != count) { efe_error(dip, "unable to allocate fixed interrupt!"); goto failure; } if (ddi_intr_get_pri(efep->efe_intrh, &pri) != DDI_SUCCESS || pri >= ddi_intr_get_hilevel_pri()) { efe_error(dip, "unable to get valid interrupt priority!"); goto failure; } mutex_init(&efep->efe_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); mutex_init(&efep->efe_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); /* * Initialize device. */ mutex_enter(&efep->efe_intrlock); mutex_enter(&efep->efe_txlock); efe_reset(efep); mutex_exit(&efep->efe_txlock); mutex_exit(&efep->efe_intrlock); /* Use factory address as default */ efe_getaddr(efep, efep->efe_macaddr); /* * Enable the ISR. */ if (ddi_intr_add_handler(efep->efe_intrh, efe_intr, efep, NULL) != DDI_SUCCESS) { efe_error(dip, "unable to add interrupt handler!"); goto failure; } if (ddi_intr_enable(efep->efe_intrh) != DDI_SUCCESS) { efe_error(dip, "unable to enable interrupt!"); goto failure; } /* * Allocate MII resources. */ if ((efep->efe_miih = mii_alloc(efep, dip, &efe_mii_ops)) == NULL) { efe_error(dip, "unable to allocate mii resources!"); goto failure; } /* * Allocate MAC resources. */ if ((macp = mac_alloc(MAC_VERSION)) == NULL) { efe_error(dip, "unable to allocate mac resources!"); goto failure; } macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; macp->m_driver = efep; macp->m_dip = dip; macp->m_src_addr = efep->efe_macaddr; macp->m_callbacks = &efe_m_callbacks; macp->m_min_sdu = 0; macp->m_max_sdu = ETHERMTU; macp->m_margin = VLAN_TAGSZ; if (mac_register(macp, &efep->efe_mh) != 0) { efe_error(dip, "unable to register with mac!"); goto failure; } mac_free(macp); ddi_report_dev(dip); return (DDI_SUCCESS); failure: if (macp != NULL) { mac_free(macp); } if (efep->efe_miih != NULL) { mii_free(efep->efe_miih); } if (efep->efe_intrh != NULL) { (void) ddi_intr_disable(efep->efe_intrh); (void) ddi_intr_remove_handler(efep->efe_intrh); (void) ddi_intr_free(efep->efe_intrh); } mutex_destroy(&efep->efe_txlock); mutex_destroy(&efep->efe_intrlock); if (efep->efe_tx_ring != NULL) { efe_ring_free(&efep->efe_tx_ring); } if (efep->efe_rx_ring != NULL) { efe_ring_free(&efep->efe_rx_ring); } if (efep->efe_regs_acch != NULL) { ddi_regs_map_free(&efep->efe_regs_acch); } kmem_free(efep, sizeof (efe_t)); return (DDI_FAILURE); }
/* * config_handler and vq_handlers may be allocated on stack. * Take precautions not to loose them. */ static int virtio_register_intx(struct virtio_softc *sc, struct virtio_int_handler *config_handler, struct virtio_int_handler vq_handlers[]) { int vq_handler_count; int config_handler_count = 0; int actual; struct virtio_handler_container *vhc; int ret = DDI_FAILURE; /* Walk the handler table to get the number of handlers. */ for (vq_handler_count = 0; vq_handlers && vq_handlers[vq_handler_count].vh_func; vq_handler_count++) ; if (config_handler != NULL) config_handler_count = 1; vhc = kmem_zalloc(sizeof (struct virtio_handler_container) + sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP); vhc->nhandlers = vq_handler_count; (void) memcpy(vhc->vq_handlers, vq_handlers, sizeof (struct virtio_int_handler) * vq_handler_count); if (config_handler != NULL) { (void) memcpy(&vhc->config_handler, config_handler, sizeof (struct virtio_int_handler)); } /* Just a single entry for a single interrupt. */ sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate a fixed interrupt: %d", ret); goto out_int_alloc; } ASSERT(actual == 1); sc->sc_intr_num = 1; ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed"); goto out_prio; } ret = ddi_intr_add_handler(sc->sc_intr_htable[0], virtio_intx_dispatch, sc, vhc); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); goto out_add_handlers; } /* We know we are not using MSI, so set the config offset. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; return (DDI_SUCCESS); out_add_handlers: out_prio: (void) ddi_intr_free(sc->sc_intr_htable[0]); out_int_alloc: kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t)); kmem_free(vhc, sizeof (struct virtio_int_handler) * (vq_handler_count + config_handler_count)); return (ret); }
static int virtio_register_msi(struct virtio_softc *sc, struct virtio_int_handler *config_handler, struct virtio_int_handler vq_handlers[], int intr_types) { int count, actual; int int_type; int i; int handler_count; int ret; /* If both MSI and MSI-x are reported, prefer MSI-x. */ int_type = DDI_INTR_TYPE_MSI; if (intr_types & DDI_INTR_TYPE_MSIX) int_type = DDI_INTR_TYPE_MSIX; /* Walk the handler table to get the number of handlers. */ for (handler_count = 0; vq_handlers && vq_handlers[handler_count].vh_func; handler_count++) ; /* +1 if there is a config change handler. */ if (config_handler != NULL) handler_count++; /* Number of MSIs supported by the device. */ ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed"); return (ret); } /* * Those who try to register more handlers then the device * supports shall suffer. */ ASSERT(handler_count <= count); sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0, handler_count, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret); goto out_msi_alloc; } if (actual != handler_count) { dev_err(sc->sc_dev, CE_WARN, "Not enough MSI available: need %d, available %d", handler_count, actual); goto out_msi_available; } sc->sc_intr_num = handler_count; sc->sc_intr_config = B_FALSE; if (config_handler != NULL) { sc->sc_intr_config = B_TRUE; } /* Assume they are all same priority */ ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed"); goto out_msi_prio; } /* Add the vq handlers */ for (i = 0; vq_handlers[i].vh_func; i++) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) { (void) ddi_intr_remove_handler( sc->sc_intr_htable[i]); } goto out_add_handlers; } } /* Don't forget the config handler */ if (config_handler != NULL) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], config_handler->vh_func, sc, config_handler->vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) { (void) ddi_intr_remove_handler( sc->sc_intr_htable[i]); } goto out_add_handlers; } } /* We know we are using MSI, so set the config offset. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap); /* Just in case. */ if (ret != DDI_SUCCESS) sc->sc_intr_cap = 0; out_add_handlers: out_msi_prio: out_msi_available: for (i = 0; i < actual; i++) (void) ddi_intr_free(sc->sc_intr_htable[i]); out_msi_alloc: kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count); return (ret); }
/** * Sets IRQ for VMMDev. * * @returns Solaris error code. * @param pDip Pointer to the device info structure. */ static int VBoxGuestSolarisAddIRQ(dev_info_t *pDip) { LogFlow((DEVICE_NAME "::AddIRQ: pDip=%p\n", pDip)); int IntrType = 0; int rc = ddi_intr_get_supported_types(pDip, &IntrType); if (rc == DDI_SUCCESS) { /* We won't need to bother about MSIs. */ if (IntrType & DDI_INTR_TYPE_FIXED) { int IntrCount = 0; rc = ddi_intr_get_nintrs(pDip, IntrType, &IntrCount); if ( rc == DDI_SUCCESS && IntrCount > 0) { int IntrAvail = 0; rc = ddi_intr_get_navail(pDip, IntrType, &IntrAvail); if ( rc == DDI_SUCCESS && IntrAvail > 0) { /* Allocated kernel memory for the interrupt handles. The allocation size is stored internally. */ g_pIntr = RTMemAlloc(IntrCount * sizeof(ddi_intr_handle_t)); if (g_pIntr) { int IntrAllocated; rc = ddi_intr_alloc(pDip, g_pIntr, IntrType, 0, IntrCount, &IntrAllocated, DDI_INTR_ALLOC_NORMAL); if ( rc == DDI_SUCCESS && IntrAllocated > 0) { g_cIntrAllocated = IntrAllocated; uint_t uIntrPriority; rc = ddi_intr_get_pri(g_pIntr[0], &uIntrPriority); if (rc == DDI_SUCCESS) { /* Initialize the mutex. */ mutex_init(&g_IrqMtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uIntrPriority)); /* Assign interrupt handler functions and enable interrupts. */ for (int i = 0; i < IntrAllocated; i++) { rc = ddi_intr_add_handler(g_pIntr[i], (ddi_intr_handler_t *)VBoxGuestSolarisISR, NULL /* No Private Data */, NULL); if (rc == DDI_SUCCESS) rc = ddi_intr_enable(g_pIntr[i]); if (rc != DDI_SUCCESS) { /* Changing local IntrAllocated to hold so-far allocated handles for freeing. */ IntrAllocated = i; break; } } if (rc == DDI_SUCCESS) return rc; /* Remove any assigned handlers */ LogRel((DEVICE_NAME ":failed to assign IRQs allocated=%d\n", IntrAllocated)); for (int x = 0; x < IntrAllocated; x++) ddi_intr_remove_handler(g_pIntr[x]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get priority of interrupt. rc=%d\n", rc)); /* Remove allocated IRQs, too bad we can free only one handle at a time. */ for (int k = 0; k < g_cIntrAllocated; k++) ddi_intr_free(g_pIntr[k]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", IntrCount)); RTMemFree(g_pIntr); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", IntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get or insufficient available IRQs. rc=%d IntrAvail=%d\n", rc, IntrAvail)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get or insufficient number of IRQs. rc=%d IntrCount=%d\n", rc, IntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: invalid irq type. IntrType=%#x\n", IntrType)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get supported interrupt types\n")); return rc; }
/** * Sets IRQ for VMMDev. * * @returns Solaris error code. * @param pDip Pointer to the device info structure. */ static int vgdrvSolarisAddIRQ(dev_info_t *pDip) { LogFlow(("vgdrvSolarisAddIRQ: pDip=%p\n", pDip)); /* Get the types of interrupt supported for this hardware. */ int fIntrType = 0; int rc = ddi_intr_get_supported_types(pDip, &fIntrType); if (rc == DDI_SUCCESS) { /* We only support fixed interrupts at this point, not MSIs. */ if (fIntrType & DDI_INTR_TYPE_FIXED) { /* Verify the number of interrupts supported by this device. There can only be one fixed interrupt. */ int cIntrCount = 0; rc = ddi_intr_get_nintrs(pDip, fIntrType, &cIntrCount); if ( rc == DDI_SUCCESS && cIntrCount == 1) { /* Allocated kernel memory for the interrupt handle. The allocation size is stored internally. */ g_pahIntrs = RTMemAllocZ(cIntrCount * sizeof(ddi_intr_handle_t)); if (g_pahIntrs) { /* Allocate the interrupt for this device and verify the allocation. */ int cIntrAllocated; rc = ddi_intr_alloc(pDip, g_pahIntrs, fIntrType, 0 /* interrupt number */, cIntrCount, &cIntrAllocated, DDI_INTR_ALLOC_NORMAL); if ( rc == DDI_SUCCESS && cIntrAllocated == 1) { /* Get the interrupt priority assigned by the system. */ uint_t uIntrPriority; rc = ddi_intr_get_pri(g_pahIntrs[0], &uIntrPriority); if (rc == DDI_SUCCESS) { /* Check if the interrupt priority is scheduler level or above, if so we need to use a high-level and low-level interrupt handlers with corresponding mutexes. */ cmn_err(CE_CONT, "!vboxguest: uIntrPriority=%d hilevel_pri=%d\n", uIntrPriority, ddi_intr_get_hilevel_pri()); if (uIntrPriority >= ddi_intr_get_hilevel_pri()) { /* Initialize the high-level mutex. */ mutex_init(&g_HighLevelIrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(uIntrPriority)); /* Assign interrupt handler function to the interrupt handle. */ rc = ddi_intr_add_handler(g_pahIntrs[0], (ddi_intr_handler_t *)&vgdrvSolarisHighLevelISR, NULL /* pvArg1 */, NULL /* pvArg2 */); if (rc == DDI_SUCCESS) { /* Add the low-level interrupt handler. */ rc = ddi_intr_add_softint(pDip, &g_hSoftIntr, DDI_INTR_SOFTPRI_MAX, (ddi_intr_handler_t *)&vgdrvSolarisISR, NULL /* pvArg1 */); if (rc == DDI_SUCCESS) { /* Initialize the low-level mutex at the corresponding level. */ mutex_init(&g_IrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_SOFTPRI_MAX)); g_fSoftIntRegistered = true; /* Enable the high-level interrupt. */ rc = ddi_intr_enable(g_pahIntrs[0]); if (rc == DDI_SUCCESS) return rc; LogRel((DEVICE_NAME "::AddIRQ: failed to enable interrupt. rc=%d\n", rc)); mutex_destroy(&g_IrqMtx); } else LogRel((DEVICE_NAME "::AddIRQ: failed to add soft interrupt handler. rc=%d\n", rc)); ddi_intr_remove_handler(g_pahIntrs[0]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to add high-level interrupt handler. rc=%d\n", rc)); mutex_destroy(&g_HighLevelIrqMtx); } else { /* Interrupt handler runs at reschedulable level, initialize the mutex at the given priority. */ mutex_init(&g_IrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(uIntrPriority)); /* Assign interrupt handler function to the interrupt handle. */ rc = ddi_intr_add_handler(g_pahIntrs[0], (ddi_intr_handler_t *)vgdrvSolarisISR, NULL /* pvArg1 */, NULL /* pvArg2 */); if (rc == DDI_SUCCESS) { /* Enable the interrupt. */ rc = ddi_intr_enable(g_pahIntrs[0]); if (rc == DDI_SUCCESS) return rc; LogRel((DEVICE_NAME "::AddIRQ: failed to enable interrupt. rc=%d\n", rc)); mutex_destroy(&g_IrqMtx); } } } else LogRel((DEVICE_NAME "::AddIRQ: failed to get priority of interrupt. rc=%d\n", rc)); Assert(cIntrAllocated == 1); ddi_intr_free(g_pahIntrs[0]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", cIntrCount)); RTMemFree(g_pahIntrs); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", cIntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get or insufficient number of IRQs. rc=%d cIntrCount=%d\n", rc, cIntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: fixed-type interrupts not supported. IntrType=%#x\n", fIntrType)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get supported interrupt types. rc=%d\n", rc)); return rc; }
extern uint32_t emlxs_event_queue_create(emlxs_hba_t *hba) { emlxs_event_queue_t *eventq = &EVENTQ; char buf[40]; #ifdef MSI_SUPPORT ddi_intr_handle_t handle; uint32_t intr_pri; int32_t actual; uint32_t ret; #endif /* MSI_SUPPORT */ ddi_iblock_cookie_t iblock; /* Clear the queue */ bzero(eventq, sizeof (emlxs_event_queue_t)); /* Initialize */ (void) sprintf(buf, "?%s%d_evt_lock control variable", DRIVER_NAME, hba->ddiinst); cv_init(&eventq->lock_cv, buf, CV_DRIVER, NULL); (void) sprintf(buf, "?%s%d_evt_lock mutex", DRIVER_NAME, hba->ddiinst); if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) { /* Get the current interrupt block cookie */ (void) ddi_get_iblock_cookie(hba->dip, (uint_t)EMLXS_INUMBER, &iblock); /* Create the mutex lock */ mutex_init(&eventq->lock, buf, MUTEX_DRIVER, (void *)iblock); } #ifdef MSI_SUPPORT else { /* Allocate a temporary interrupt handle */ actual = 0; ret = ddi_intr_alloc(hba->dip, &handle, DDI_INTR_TYPE_FIXED, EMLXS_MSI_INUMBER, 1, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS || actual == 0) { cmn_err(CE_WARN, "?%s%d: Unable to allocate temporary interrupt " "handle. ret=%d actual=%d", DRIVER_NAME, hba->ddiinst, ret, actual); bzero(eventq, sizeof (emlxs_event_queue_t)); return (0); } /* Get the current interrupt priority */ ret = ddi_intr_get_pri(handle, &intr_pri); if (ret != DDI_SUCCESS) { cmn_err(CE_WARN, "?%s%d: Unable to get interrupt priority. ret=%d", DRIVER_NAME, hba->ddiinst, ret); bzero(eventq, sizeof (emlxs_event_queue_t)); return (0); } /* Create the log mutex lock */ mutex_init(&eventq->lock, buf, MUTEX_DRIVER, (void *)((unsigned long)intr_pri)); /* Free the temporary handle */ (void) ddi_intr_free(handle); } #endif return (1); } /* emlxs_event_queue_create() */
int ddi_add_intr(dev_info_t *dip, uint_t inumber, ddi_iblock_cookie_t *iblock_cookiep, ddi_idevice_cookie_t *idevice_cookiep, uint_t (*int_handler)(caddr_t int_handler_arg), caddr_t int_handler_arg) { ddi_intr_handle_t *hdl_p; size_t hdl_sz; int actual, ret; uint_t pri; DDI_INTR_APIDBG((CE_CONT, "ddi_add_intr: name=%s%d dip=0x%p " "inum=0x%x\n", ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, inumber)); hdl_sz = sizeof (ddi_intr_handle_t) * (inumber + 1); hdl_p = kmem_zalloc(hdl_sz, KM_SLEEP); if ((ret = ddi_intr_alloc(dip, hdl_p, DDI_INTR_TYPE_FIXED, inumber, 1, &actual, DDI_INTR_ALLOC_NORMAL)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_add_intr: " "ddi_intr_alloc failed, ret 0x%x\n", ret)); kmem_free(hdl_p, hdl_sz); return (DDI_INTR_NOTFOUND); } if ((ret = ddi_intr_get_pri(hdl_p[inumber], &pri)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_add_intr: " "ddi_intr_get_pri failed, ret 0x%x\n", ret)); (void) ddi_intr_free(hdl_p[inumber]); kmem_free(hdl_p, hdl_sz); return (DDI_FAILURE); } if ((ret = ddi_intr_add_handler(hdl_p[inumber], (ddi_intr_handler_t *) int_handler, int_handler_arg, NULL)) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_add_intr: " "ddi_intr_add_handler failed, ret 0x%x\n", ret)); (void) ddi_intr_free(hdl_p[inumber]); kmem_free(hdl_p, hdl_sz); return (DDI_FAILURE); } if ((ret = ddi_intr_enable(hdl_p[inumber])) != DDI_SUCCESS) { DDI_INTR_APIDBG((CE_CONT, "ddi_add_intr: " "ddi_intr_enable failed, ret 0x%x\n", ret)); (void) ddi_intr_remove_handler(hdl_p[inumber]); (void) ddi_intr_free(hdl_p[inumber]); kmem_free(hdl_p, hdl_sz); return (DDI_FAILURE); } if (iblock_cookiep) *iblock_cookiep = (ddi_iblock_cookie_t)(uintptr_t)pri; if (idevice_cookiep) { idevice_cookiep->idev_vector = 0; idevice_cookiep->idev_priority = pri; } kmem_free(hdl_p, hdl_sz); return (DDI_SUCCESS); }