static int vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) { device_t dev; int nmsix, cnt, required; dev = sc->vtpci_dev; /* Allocate an additional vector for the config changes. */ required = nvectors + 1; nmsix = pci_msix_count(dev); if (nmsix < required) return (1); cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { sc->vtpci_nmsix_resources = required; return (0); } pci_release_msi(dev); return (1); }
void isci_interrupt_setup(struct isci_softc *isci) { uint8_t max_msix_messages = SCI_MAX_MSIX_MESSAGES_PER_CONTROLLER * isci->controller_count; BOOL use_msix = FALSE; uint32_t force_legacy_interrupts = 0; TUNABLE_INT_FETCH("hw.isci.force_legacy_interrupts", &force_legacy_interrupts); if (!force_legacy_interrupts && pci_msix_count(isci->device) >= max_msix_messages) { isci->num_interrupts = max_msix_messages; pci_alloc_msix(isci->device, &isci->num_interrupts); if (isci->num_interrupts == max_msix_messages) use_msix = TRUE; } if (use_msix == TRUE) isci_interrupt_setup_msix(isci); else isci_interrupt_setup_legacy(isci); }
int mps_pci_setup_interrupts(struct mps_softc *sc) { device_t dev; int error; u_int irq_flags; dev = sc->mps_dev; #if 0 /* XXX swildner */ if ((sc->disable_msix == 0) && ((msgs = pci_msix_count(dev)) >= MPS_MSI_COUNT)) error = mps_alloc_msix(sc, MPS_MSI_COUNT); #endif sc->mps_irq_rid[0] = 0; sc->mps_irq_type[0] = pci_alloc_1intr(dev, sc->enable_msi, &sc->mps_irq_rid[0], &irq_flags); sc->mps_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->mps_irq_rid[0], irq_flags); if (sc->mps_irq[0] == NULL) { device_printf(dev, "Cannot allocate interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, sc->mps_irq[0], INTR_MPSAFE, mps_intr, sc, &sc->mps_intrhand[0], NULL); if (error) device_printf(dev, "Cannot setup interrupt\n"); return (error); }
static int ciss_setup_msix(struct ciss_softc *sc) { int i, count, error; i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); count = pci_msix_count(sc->ciss_dev); if (count < CISS_MSI_COUNT) { count = pci_msi_count(sc->ciss_dev); if (count < CISS_MSI_COUNT) return (EINVAL); } count = MIN(count, CISS_MSI_COUNT); error = pci_alloc_msix(sc->ciss_dev, &count); if (error) { error = pci_alloc_msi(sc->ciss_dev, &count); if (error) return (EINVAL); } sc->ciss_msi = count; for (i = 0; i < count; i++) sc->ciss_irq_rid[i] = i + 1; return (0); }
static int ntb_setup_interrupts(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; uint64_t mask; int rc; ntb->allocated_interrupts = 0; /* * On SOC, disable all interrupts. On XEON, disable all but Link * Interrupt. The rest will be unmasked as callbacks are registered. */ mask = 0; if (ntb->type == NTB_XEON) mask = (1 << XEON_LINK_DB); db_iowrite(ntb, ntb->reg_ofs.ldb_mask, ~mask); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->limits.max_db_bits); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; ntb_create_callbacks(ntb, num_vectors); if (ntb->type == NTB_XEON) rc = ntb_setup_xeon_msix(ntb, num_vectors); else rc = ntb_setup_soc_msix(ntb, num_vectors); if (rc != 0) device_printf(ntb->device, "Error allocating MSI-X interrupts: %d\n", rc); if (ntb->type == NTB_XEON && rc == ENOSPC) rc = ntb_setup_legacy_interrupt(ntb); return (rc); }
int mpr_pci_setup_interrupts(struct mpr_softc *sc) { device_t dev; int i, error, msgs; dev = sc->mpr_dev; error = ENXIO; if ((sc->disable_msix == 0) && ((msgs = pci_msix_count(dev)) >= MPR_MSI_COUNT)) error = mpr_alloc_msix(sc, MPR_MSI_COUNT); if ((error != 0) && (sc->disable_msi == 0) && ((msgs = pci_msi_count(dev)) >= MPR_MSI_COUNT)) error = mpr_alloc_msi(sc, MPR_MSI_COUNT); if (error != 0) { sc->mpr_flags |= MPR_FLAGS_INTX; sc->mpr_irq_rid[0] = 0; sc->mpr_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->mpr_irq_rid[0], RF_SHAREABLE | RF_ACTIVE); if (sc->mpr_irq[0] == NULL) { mpr_printf(sc, "Cannot allocate INTx interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, sc->mpr_irq[0], INTR_TYPE_BIO | INTR_MPSAFE, NULL, mpr_intr, sc, &sc->mpr_intrhand[0]); if (error) mpr_printf(sc, "Cannot setup INTx interrupt\n"); } else { sc->mpr_flags |= MPR_FLAGS_MSI; for (i = 0; i < MPR_MSI_COUNT; i++) { sc->mpr_irq_rid[i] = i + 1; sc->mpr_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->mpr_irq_rid[i], RF_ACTIVE); if (sc->mpr_irq[i] == NULL) { mpr_printf(sc, "Cannot allocate MSI interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, sc->mpr_irq[i], INTR_TYPE_BIO | INTR_MPSAFE, NULL, mpr_intr_msi, sc, &sc->mpr_intrhand[i]); if (error) { mpr_printf(sc, "Cannot setup MSI interrupt %d\n", i); break; } } } return (error); }
static int vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) { device_t dev; int nmsix, cnt, required; dev = sc->vtpci_dev; nmsix = pci_msix_count(dev); if (nmsix < 1) return (1); /* An additional vector is needed for the config changes. */ required = nvectors + 1; if (nmsix >= required) { cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) goto out; pci_release_msi(dev); } /* Attempt shared MSIX configuration. */ required = 2; if (nmsix >= required) { cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { sc->vtpci_flags |= VIRTIO_PCI_FLAG_SHARED_MSIX; goto out; } pci_release_msi(dev); } return (1); out: sc->vtpci_nintr_res = required; sc->vtpci_flags |= VIRTIO_PCI_FLAG_MSIX; if (bootverbose) { if (sc->vtpci_flags & VIRTIO_PCI_FLAG_SHARED_MSIX) device_printf(dev, "using shared virtqueue MSIX\n"); else device_printf(dev, "using per virtqueue MSIX\n"); } return (0); }
static int sfxge_intr_setup_msix(struct sfxge_softc *sc) { struct sfxge_intr *intr; struct resource *resp; device_t dev; int count; int rid; dev = sc->dev; intr = &sc->intr; /* Check if MSI-X is available. */ count = pci_msix_count(dev); if (count == 0) return (EINVAL); /* Limit the number of interrupts to the number of CPUs. */ if (count > mp_ncpus) count = mp_ncpus; /* Not very likely these days... */ if (count > EFX_MAXRSS) count = EFX_MAXRSS; rid = PCIR_BAR(4); resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (resp == NULL) return (ENOMEM); if (pci_alloc_msix(dev, &count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); return (ENOMEM); } /* Allocate interrupt handlers. */ if (sfxge_intr_alloc(sc, count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); pci_release_msi(dev); return (ENOMEM); } intr->type = EFX_INTR_MESSAGE; intr->n_alloc = count; intr->msix_res = resp; return (0); }
static int sfxge_intr_setup_msix(struct sfxge_softc *sc) { struct sfxge_intr *intr; struct resource *resp; device_t dev; int count; int rid; dev = sc->dev; intr = &sc->intr; /* Check if MSI-X is available. */ count = pci_msix_count(dev); if (count == 0) return (EINVAL); /* Do not try to allocate more than already estimated EVQ maximum */ KASSERT(sc->evq_max > 0, ("evq_max is zero")); count = MIN(count, sc->evq_max); rid = PCIR_BAR(4); resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (resp == NULL) return (ENOMEM); if (pci_alloc_msix(dev, &count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); return (ENOMEM); } /* Allocate interrupt handlers. */ if (sfxge_intr_alloc(sc, count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); pci_release_msi(dev); return (ENOMEM); } intr->type = EFX_INTR_MESSAGE; intr->n_alloc = count; intr->msix_res = resp; return (0); }
/* * This function is used by device drivers like pci_intr_map(). * * "ihps" is the array of vector numbers which MSI-X used instead of IRQ number. * "count" can not decrease. * If "count" struct intrsource cannot be allocated, return non-zero value. */ int pci_msix_alloc_exact(const struct pci_attach_args *pa, pci_intr_handle_t **ihps, int count) { int hw_max; KASSERT(count > 0); hw_max = pci_msix_count(pa->pa_pc, pa->pa_tag); if (hw_max == 0) return ENODEV; if (count > hw_max) { DPRINTF(("over hardware max MSI-X count %d\n", hw_max)); return EINVAL; } return x86_pci_msix_alloc_exact(ihps, count, pa); }
/* * This function is used by device drivers like pci_intr_map(). * * "ihps" is the array of vector numbers which MSI-X used instead of IRQ number. * "count" can decrease if enough struct intrsources cannot be allocated. * if count == 0, return non-zero value. */ int pci_msix_alloc(const struct pci_attach_args *pa, pci_intr_handle_t **ihps, int *count) { int hw_max; KASSERT(*count > 0); hw_max = pci_msix_count(pa->pa_pc, pa->pa_tag); if (hw_max == 0) return ENODEV; if (*count > hw_max) { DPRINTF(("cut off MSI-X count to %d\n", hw_max)); *count = hw_max; /* cut off hw_max */ } return x86_pci_msix_alloc(ihps, count, pa); }
/* * Interrupt setup and handlers */ static int ioat_setup_intr(struct ioat_softc *ioat) { uint32_t num_vectors; int error; boolean_t use_msix; boolean_t force_legacy_interrupts; use_msix = FALSE; force_legacy_interrupts = FALSE; if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { num_vectors = 1; pci_alloc_msix(ioat->device, &num_vectors); if (num_vectors == 1) use_msix = TRUE; } if (use_msix) { ioat->rid = 1; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_ACTIVE); } else { ioat->rid = 0; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_SHAREABLE | RF_ACTIVE); } if (ioat->res == NULL) { ioat_log_message(0, "bus_alloc_resource failed\n"); return (ENOMEM); } ioat->tag = NULL; error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); if (error != 0) { ioat_log_message(0, "bus_setup_intr failed\n"); return (error); } ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); return (0); }
/* * This function is used by device drivers like pci_intr_map(). * Futhermore, this function can map each handle to a MSI-X table index. * * "ihps" is the array of vector numbers which MSI-X used instead of IRQ number. * "count" can not decrease. * "map" size must be equal to "count". * If "count" struct intrsource cannot be allocated, return non-zero value. * e.g. * If "map" = { 1, 4, 0 }, * 1st handle is bound to MSI-X index 1 * 2nd handle is bound to MSI-X index 4 * 3rd handle is bound to MSI-X index 0 */ int pci_msix_alloc_map(const struct pci_attach_args *pa, pci_intr_handle_t **ihps, u_int *table_indexes, int count) { int hw_max, i, j; KASSERT(count > 0); hw_max = pci_msix_count(pa->pa_pc, pa->pa_tag); if (hw_max == 0) return ENODEV; if (count > hw_max) { DPRINTF(("over hardware max MSI-X count %d\n", hw_max)); return EINVAL; } /* check not to duplicate table_index */ for (i = 0; i < count; i++) { u_int basing = table_indexes[i]; KASSERT(table_indexes[i] < PCI_MSIX_MAX_VECTORS); if (basing >= hw_max) { DPRINTF(("table index is over hardware max MSI-X index %d\n", hw_max - 1)); return EINVAL; } for (j = i + 1; j < count; j++) { if (basing == table_indexes[j]) { DPRINTF(("MSI-X table index duplicated\n")); return EINVAL; } } } return x86_pci_msix_alloc_map(ihps, table_indexes, count, pa); }
static int athp_pci_attach(device_t dev) { struct ath10k_pci *ar_pci = device_get_softc(dev); struct ath10k *ar = &ar_pci->sc_sc; int rid, i; int err = 0; int ret; ar->sc_dev = dev; ar->sc_invalid = 1; /* XXX TODO: initialize sc_debug from TUNABLE */ #if 0 ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC | ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC | ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT; #endif ar->sc_psc = ar_pci; /* Load-time tunable/sysctl tree */ athp_attach_sysctl(ar); /* Enable WMI/HTT RX for now */ ar->sc_rx_wmi = 1; ar->sc_rx_htt = 1; /* Fetch pcie capability offset */ ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off); if (ret != 0) { device_printf(dev, "%s: failed to find pci-express capability offset\n", __func__); return (ret); } /* * Initialise ath10k core bits. */ if (ath10k_core_init(ar) < 0) goto bad0; /* * Initialise ath10k freebsd bits. */ sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev)); mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK, MTX_DEF); sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev)); mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF); sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev)); mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF); sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev)); mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf", MTX_DEF | MTX_RECURSE); sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev)); mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF); sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev)); mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF); sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev)); mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data", MTX_DEF); /* * Initialise ath10k BMI/PCIDIAG bits. */ ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req", 4, 1024); ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp", 4, 1024); if (ret != 0) { device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n", __func__); goto bad0; } /* * Initialise HTT descriptors/memory. */ ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt); if (ret != 0) { device_printf(dev, "%s: failed to alloc HTT RX descriptors\n", __func__); goto bad; } /* XXX here instead of in core_init because we need the lock init'ed */ callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0); ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT, NULL, ar_pci); (void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq", device_get_nameunit(dev)); if (ar_pci->pipe_taskq == NULL) { device_printf(dev, "%s: couldn't create pipe taskq\n", __func__); err = ENXIO; goto bad; } /* * Look at the device/vendor ID and choose which register offset * mapping to use. This is used by a lot of the register access * pieces to get the correct device-specific windows. */ ar_pci->sc_vendorid = pci_get_vendor(dev); ar_pci->sc_deviceid = pci_get_device(dev); if (athp_pci_hw_lookup(ar_pci) != 0) { device_printf(dev, "%s: hw lookup failed\n", __func__); err = ENXIO; goto bad; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (ar_pci->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); err = ENXIO; goto bad; } /* Driver copy; hopefully we can delete this */ ar->sc_st = rman_get_bustag(ar_pci->sc_sr); ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* Local copy for bus operations */ ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr); ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the HAL is setup are discarded. */ ar->sc_invalid = 1; printf("%s: msicount=%d, msixcount=%d\n", __func__, pci_msi_count(dev), pci_msix_count(dev)); /* * Arrange interrupt line. * * XXX TODO: this is effictively ath10k_pci_init_irq(). * Refactor it out later. * * First - attempt MSI. If we get it, then use it. */ i = MSI_NUM_REQUEST; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: %d MSI interrupts\n", __func__, i); ar_pci->num_msi_intrs = MSI_NUM_REQUEST; } else { i = 1; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: 1 MSI interrupt\n", __func__); ar_pci->num_msi_intrs = 1; } else { device_printf(dev, "%s: legacy interrupts\n", __func__); ar_pci->num_msi_intrs = 0; } } err = ath10k_pci_request_irq(ar_pci); if (err != 0) goto bad1; /* * Attach register ops - needed for the caller to do register IO. */ ar->sc_regio.reg_read = athp_pci_regio_read_reg; ar->sc_regio.reg_write = athp_pci_regio_write_reg; ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg; ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg; ar->sc_regio.reg_flush = athp_pci_regio_flush_reg; ar->sc_regio.reg_arg = ar_pci; /* * TODO: abstract this out to be a bus/hif specific * attach path. * * I'm not sure what USB/SDIO will look like here, but * I'm pretty sure it won't involve PCI/CE setup. * It'll still have WME/HIF/BMI, but it'll be done over * USB endpoints. */ if (athp_pci_setup_bufs(ar_pci) != 0) { err = ENXIO; goto bad4; } /* HIF ops attach */ ar->hif.ops = &ath10k_pci_hif_ops; ar->hif.bus = ATH10K_BUS_PCI; /* Alloc pipes */ ret = ath10k_pci_alloc_pipes(ar); if (ret) { device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n", __func__, ret); /* XXX cleanup */ err = ENXIO; goto bad4; } /* deinit ce */ ath10k_pci_ce_deinit(ar); /* disable irq */ ret = ath10k_pci_irq_disable(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* init IRQ */ ret = ath10k_pci_init_irq(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: init_irq failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* Ok, gate open the interrupt handler */ ar->sc_invalid = 0; /* pci_chip_reset */ ret = ath10k_pci_chip_reset(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* read SoC/chip version */ ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs)); /* Verify chip version is something we can use */ device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid); if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) { device_printf(ar->sc_dev, "%s: unsupported chip; chipid: 0x%08x\n", __func__, ar->sc_chipid); err = ENXIO; goto bad4; } /* Call main attach method with given info */ ar->sc_preinit_hook.ich_func = athp_attach_preinit; ar->sc_preinit_hook.ich_arg = ar; if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) { device_printf(ar->sc_dev, "%s: couldn't establish preinit hook\n", __func__); goto bad4; } return (0); /* Fallthrough for setup failure */ bad4: athp_pci_free_bufs(ar_pci); /* Ensure we disable interrupts from the device */ ath10k_pci_deinit_irq(ar_pci); ath10k_pci_free_irq(ar_pci); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr); bad: ath10k_htt_rx_free_desc(ar, &ar->htt); athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf); athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf); /* XXX disable busmaster? */ mtx_destroy(&ar_pci->ps_mtx); mtx_destroy(&ar_pci->ce_mtx); mtx_destroy(&ar->sc_conf_mtx); mtx_destroy(&ar->sc_data_mtx); mtx_destroy(&ar->sc_buf_mtx); mtx_destroy(&ar->sc_dma_mtx); mtx_destroy(&ar->sc_mtx); if (ar_pci->pipe_taskq) { taskqueue_drain_all(ar_pci->pipe_taskq); taskqueue_free(ar_pci->pipe_taskq); } /* Shutdown ioctl handler */ athp_ioctl_teardown(ar); ath10k_core_destroy(ar); bad0: return (err); }
/* * Interrupt handler allocation utility. This function calls each allocation * function as specified by arguments. * Currently callee functions are pci_intx_alloc(), pci_msi_alloc_exact(), * and pci_msix_alloc_exact(). * pa : pci_attach_args * ihps : interrupt handlers * counts : The array of number of required interrupt handlers. * It is overwritten by allocated the number of handlers. * CAUTION: The size of counts[] must be PCI_INTR_TYPE_SIZE. * max_type : "max" type of using interrupts. See below. * e.g. * If you want to use 5 MSI-X, 1 MSI, or INTx, you use "counts" as * int counts[PCI_INTR_TYPE_SIZE]; * counts[PCI_INTR_TYPE_MSIX] = 5; * counts[PCI_INTR_TYPE_MSI] = 1; * counts[PCI_INTR_TYPE_INTX] = 1; * error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSIX); * * If you want to use hardware max number MSI-X or 1 MSI, * and not to use INTx, you use "counts" as * int counts[PCI_INTR_TYPE_SIZE]; * counts[PCI_INTR_TYPE_MSIX] = -1; * counts[PCI_INTR_TYPE_MSI] = 1; * counts[PCI_INTR_TYPE_INTX] = 0; * error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSIX); * * If you want to use 3 MSI or INTx, you can use "counts" as * int counts[PCI_INTR_TYPE_SIZE]; * counts[PCI_INTR_TYPE_MSI] = 3; * counts[PCI_INTR_TYPE_INTX] = 1; * error = pci_intr_alloc(pa, ihps, counts, PCI_INTR_TYPE_MSI); * * If you want to use 1 MSI or INTx (probably most general usage), * you can simply use this API like * below * error = pci_intr_alloc(pa, ihps, NULL, 0); * ^ ignored */ int pci_intr_alloc(const struct pci_attach_args *pa, pci_intr_handle_t **ihps, int *counts, pci_intr_type_t max_type) { int error; int intx_count, msi_count, msix_count; intx_count = msi_count = msix_count = 0; if (counts == NULL) { /* simple pattern */ msi_count = 1; intx_count = 1; } else { switch(max_type) { case PCI_INTR_TYPE_MSIX: msix_count = counts[PCI_INTR_TYPE_MSIX]; /* FALLTHROUGH */ case PCI_INTR_TYPE_MSI: msi_count = counts[PCI_INTR_TYPE_MSI]; /* FALLTHROUGH */ case PCI_INTR_TYPE_INTX: intx_count = counts[PCI_INTR_TYPE_INTX]; break; default: return EINVAL; } } if (counts != NULL) memset(counts, 0, sizeof(counts[0]) * PCI_INTR_TYPE_SIZE); error = EINVAL; /* try MSI-X */ if (msix_count == -1) /* use hardware max */ msix_count = pci_msix_count(pa->pa_pc, pa->pa_tag); if (msix_count > 0) { error = pci_msix_alloc_exact(pa, ihps, msix_count); if (error == 0) { KASSERTMSG(counts != NULL, "If MSI-X is used, counts must not be NULL."); counts[PCI_INTR_TYPE_MSIX] = msix_count; goto out; } } /* try MSI */ if (msi_count == -1) /* use hardware max */ msi_count = pci_msi_count(pa->pa_pc, pa->pa_tag); if (msi_count > 0) { error = pci_msi_alloc_exact(pa, ihps, msi_count); if (error == 0) { if (counts != NULL) counts[PCI_INTR_TYPE_MSI] = msi_count; goto out; } } /* try INTx */ if (intx_count != 0) { /* The number of INTx is always 1. */ error = pci_intx_alloc(pa, ihps); if (error == 0) { if (counts != NULL) counts[PCI_INTR_TYPE_INTX] = 1; } } out: return error; }
static int vga_pci_msix_count(device_t dev, device_t child) { return (pci_msix_count(dev)); }
static int ntb_setup_interrupts(struct ntb_softc *ntb) { void (*interrupt_handler)(void *); void *int_arg; bool use_msix = 0; uint32_t num_vectors; int i; ntb->allocated_interrupts = 0; /* * On SOC, disable all interrupts. On XEON, disable all but Link * Interrupt. The rest will be unmasked as callbacks are registered. */ if (ntb->type == NTB_SOC) ntb_reg_write(8, ntb->reg_ofs.pdb_mask, ~0); else ntb_reg_write(2, ntb->reg_ofs.pdb_mask, ~(1 << ntb->limits.max_db_bits)); num_vectors = MIN(pci_msix_count(ntb->device), ntb->limits.max_db_bits); if (num_vectors >= 1) { pci_alloc_msix(ntb->device, &num_vectors); if (num_vectors >= 4) use_msix = TRUE; } ntb_create_callbacks(ntb, num_vectors); if (use_msix == TRUE) { for (i = 0; i < num_vectors; i++) { ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any( ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); if (ntb->int_info[i].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (-1); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; if (ntb->type == NTB_SOC) { interrupt_handler = handle_soc_irq; int_arg = &ntb->db_cb[i]; } else { if (i == num_vectors - 1) { interrupt_handler = handle_xeon_event_irq; int_arg = ntb; } else { interrupt_handler = handle_xeon_irq; int_arg = &ntb->db_cb[i]; } } if (bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, interrupt_handler, int_arg, &ntb->int_info[i].tag) != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } } else { ntb->int_info[0].rid = 0; ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); interrupt_handler = ntb_handle_legacy_interrupt; if (ntb->int_info[0].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (-1); } ntb->int_info[0].tag = NULL; ntb->allocated_interrupts = 1; if (bus_setup_intr(ntb->device, ntb->int_info[0].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, interrupt_handler, ntb, &ntb->int_info[0].tag) != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } return (0); }
static int ntb_setup_interrupts(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; uint64_t mask; int rc; ntb->allocated_interrupts = 0; /* * On SOC, disable all interrupts. On XEON, disable all but Link * Interrupt. The rest will be unmasked as callbacks are registered. */ mask = 0; if (ntb->type == NTB_XEON) mask = (1 << XEON_LINK_DB); db_iowrite(ntb, ntb->reg_ofs.ldb_mask, ~mask); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->limits.max_db_bits); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; /* * If allocating MSI-X interrupts succeeds, limit callbacks to the * number of MSI-X slots available. */ ntb_create_callbacks(ntb, num_vectors); if (ntb->type == NTB_XEON) rc = ntb_setup_xeon_msix(ntb, num_vectors); else rc = ntb_setup_soc_msix(ntb, num_vectors); if (rc != 0) { device_printf(ntb->device, "Error allocating MSI-X interrupts: %d\n", rc); /* * If allocating MSI-X interrupts failed and we're forced to * use legacy INTx anyway, the only limit on individual * callbacks is the number of doorbell bits. * * CEM: This seems odd to me but matches the behavior of the * Linux driver ca. September 2013 */ ntb_free_callbacks(ntb); ntb_create_callbacks(ntb, ntb->limits.max_db_bits); } if (ntb->type == NTB_XEON && rc == ENOSPC) rc = ntb_setup_legacy_interrupt(ntb); return (rc); }
int ppt_setup_msix(struct vm *vm, int vcpu, int bus, int slot, int func, int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) { struct pptdev *ppt; struct pci_devinfo *dinfo; int numvec, alloced, rid, error; size_t res_size, cookie_size, arg_size; ppt = ppt_find(bus, slot, func); if (ppt == NULL) return (ENOENT); if (ppt->vm != vm) /* Make sure we own this device */ return (EBUSY); dinfo = device_get_ivars(ppt->dev); if (!dinfo) return (ENXIO); /* * First-time configuration: * Allocate the MSI-X table * Allocate the IRQ resources * Set up some variables in ppt->msix */ if (ppt->msix.num_msgs == 0) { numvec = pci_msix_count(ppt->dev); if (numvec <= 0) return (EINVAL); ppt->msix.startrid = 1; ppt->msix.num_msgs = numvec; res_size = numvec * sizeof(ppt->msix.res[0]); cookie_size = numvec * sizeof(ppt->msix.cookie[0]); arg_size = numvec * sizeof(ppt->msix.arg[0]); ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO); ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX, M_WAITOK | M_ZERO); ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO); rid = dinfo->cfg.msix.msix_table_bar; ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (ppt->msix.msix_table_res == NULL) { ppt_teardown_msix(ppt); return (ENOSPC); } ppt->msix.msix_table_rid = rid; alloced = numvec; error = pci_alloc_msix(ppt->dev, &alloced); if (error || alloced != numvec) { ppt_teardown_msix(ppt); return (error == 0 ? ENOSPC: error); } } if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { /* Tear down the IRQ if it's already set up */ ppt_teardown_msix_intr(ppt, idx); /* Allocate the IRQ resource */ ppt->msix.cookie[idx] = NULL; rid = ppt->msix.startrid + idx; ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (ppt->msix.res[idx] == NULL) return (ENXIO); ppt->msix.arg[idx].pptdev = ppt; ppt->msix.arg[idx].addr = addr; ppt->msix.arg[idx].msg_data = msg; /* Setup the MSI-X interrupt */ error = bus_setup_intr(ppt->dev, ppt->msix.res[idx], INTR_TYPE_NET | INTR_MPSAFE, pptintr, NULL, &ppt->msix.arg[idx], &ppt->msix.cookie[idx]); if (error != 0) { bus_teardown_intr(ppt->dev, ppt->msix.res[idx], ppt->msix.cookie[idx]); bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]); ppt->msix.cookie[idx] = NULL; ppt->msix.res[idx] = NULL; return (ENXIO); } } else { /* Masked, tear it down if it's already been set up */ ppt_teardown_msix_intr(ppt, idx); } return (0); }
static int mpt_pci_attach(device_t dev) { struct mpt_softc *mpt; int iqd; uint32_t data, cmd; int mpt_io_bar, mpt_mem_bar; mpt = (struct mpt_softc*)device_get_softc(dev); switch (pci_get_device(dev)) { case MPI_MANUFACTPAGE_DEVICEID_FC909_FB: case MPI_MANUFACTPAGE_DEVICEID_FC909: case MPI_MANUFACTPAGE_DEVICEID_FC919: case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC929X: case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC919X: case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC949E: case MPI_MANUFACTPAGE_DEVICEID_FC949X: mpt->is_fc = 1; break; case MPI_MANUFACTPAGE_DEVID_SAS1078: case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB: mpt->is_1078 = 1; /* FALLTHROUGH */ case MPI_MANUFACTPAGE_DEVID_SAS1064: case MPI_MANUFACTPAGE_DEVID_SAS1064A: case MPI_MANUFACTPAGE_DEVID_SAS1064E: case MPI_MANUFACTPAGE_DEVID_SAS1066: case MPI_MANUFACTPAGE_DEVID_SAS1066E: case MPI_MANUFACTPAGE_DEVID_SAS1068: case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB: case MPI_MANUFACTPAGE_DEVID_SAS1068E: case MPI_MANUFACTPAGE_DEVID_SAS1068E_FB: mpt->is_sas = 1; break; default: mpt->is_spi = 1; break; } mpt->dev = dev; mpt->unit = device_get_unit(dev); mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT; mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT; mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT; mpt->verbose = MPT_PRT_NONE; mpt->role = MPT_ROLE_NONE; mpt->mpt_ini_id = MPT_INI_ID_NONE; #ifdef __sparc64__ if (mpt->is_spi) mpt->mpt_ini_id = OF_getscsinitid(dev); #endif mpt_set_options(mpt); if (mpt->verbose == MPT_PRT_NONE) { mpt->verbose = MPT_PRT_WARN; /* Print INFO level (if any) if bootverbose is set */ mpt->verbose += (bootverbose != 0)? 1 : 0; } /* Make sure memory access decoders are enabled */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "Memory accesses disabled"); return (ENXIO); } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd |= PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_BIOS, 4); data &= ~PCIM_BIOS_ENABLE; pci_write_config(dev, PCIR_BIOS, data, 4); /* * Is this part a dual? * If so, link with our partner (around yet) */ switch (pci_get_device(dev)) { case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC949E: case MPI_MANUFACTPAGE_DEVICEID_FC949X: case MPI_MANUFACTPAGE_DEVID_53C1030: case MPI_MANUFACTPAGE_DEVID_53C1030ZC: mpt_link_peer(mpt); break; default: break; } /* * Figure out which are the I/O and MEM Bars */ data = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_IO(data)) { /* BAR0 is IO, BAR1 is memory */ mpt_io_bar = 0; mpt_mem_bar = 1; } else { /* BAR0 is memory, BAR1 is IO */ mpt_mem_bar = 0; mpt_io_bar = 1; } /* * Set up register access. PIO mode is required for * certain reset operations (but must be disabled for * some cards otherwise). */ mpt_io_bar = PCIR_BAR(mpt_io_bar); mpt->pci_pio_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &mpt_io_bar, RF_ACTIVE); if (mpt->pci_pio_reg == NULL) { if (bootverbose) { device_printf(dev, "unable to map registers in PIO mode\n"); } } else { mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg); mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg); } mpt_mem_bar = PCIR_BAR(mpt_mem_bar); mpt->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mpt_mem_bar, RF_ACTIVE); if (mpt->pci_reg == NULL) { if (bootverbose || mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Unable to memory map registers.\n"); } if (mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Giving Up.\n"); goto bad; } if (bootverbose) { device_printf(dev, "Falling back to PIO mode.\n"); } mpt->pci_st = mpt->pci_pio_st; mpt->pci_sh = mpt->pci_pio_sh; } else { mpt->pci_st = rman_get_bustag(mpt->pci_reg); mpt->pci_sh = rman_get_bushandle(mpt->pci_reg); } /* Get a handle to the interrupt */ iqd = 0; if (mpt->msi_enable) { /* * First try to alloc an MSI-X message. If that * fails, then try to alloc an MSI message instead. */ if (pci_msix_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } if (iqd == 0 && pci_msi_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msi(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } } mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | (mpt->pci_msi_count ? 0 : RF_SHAREABLE)); if (mpt->pci_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } MPT_LOCK_SETUP(mpt); /* Disable interrupts at the part */ mpt_disable_ints(mpt); /* Register the interrupt handler */ if (mpt_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, NULL, mpt_pci_intr, mpt, &mpt->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* Allocate dma memory */ if (mpt_dma_mem_alloc(mpt)) { mpt_prt(mpt, "Could not allocate DMA memory\n"); goto bad; } #if 0 /* * Save the PCI config register values * * Hard resets are known to screw up the BAR for diagnostic * memory accesses (Mem1). * * Using Mem1 is known to make the chip stop responding to * configuration space transfers, so we need to save it now */ mpt_read_config_regs(mpt); #endif /* * Disable PIO until we need it */ if (mpt->is_sas) { pci_disable_io(dev, SYS_RES_IOPORT); } /* Initialize the hardware */ if (mpt->disabled == 0) { if (mpt_attach(mpt) != 0) { goto bad; } } else { mpt_prt(mpt, "device disabled at user request\n"); goto bad; } mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown, dev, SHUTDOWN_PRI_DEFAULT); if (mpt->eh == NULL) { mpt_prt(mpt, "shutdown event registration failed\n"); (void) mpt_detach(mpt); goto bad; } return (0); bad: mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_unlink_peer(mpt); MPT_LOCK_DESTROY(mpt); /* * but return zero to preserve unit numbering */ return (0); }