int ata_setup_interrupt(device_t dev, void *intr_func) { struct ata_pci_controller *ctlr = device_get_softc(dev); int i, msi = 0; if (!ctlr->legacy) { if (resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &i) == 0 && i != 0) msi = 1; if (msi && pci_msi_count(dev) > 0 && pci_alloc_msi(dev, &msi) == 0) { ctlr->r_irq_rid = 0x1; } else { msi = 0; ctlr->r_irq_rid = ATA_IRQ_RID; } if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); if (msi) pci_release_msi(dev); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, NULL, intr_func, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq); if (msi) pci_release_msi(dev); return ENXIO; } } return 0; }
/** * Attempt to allocate MSI interrupts, returning the count in @p msi_count * on success. */ static int bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count) { int error, count; /* Is MSI available? */ if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT) return (ENXIO); /* Allocate expected message count */ count = BHNDB_PCI_MSI_COUNT; if ((error = pci_alloc_msi(sc->parent, &count))) { device_printf(sc->dev, "failed to allocate MSI interrupts: " "%d\n", error); return (error); } if (count < BHNDB_PCI_MSI_COUNT) { pci_release_msi(sc->parent); return (ENXIO); } *msi_count = count; return (0); }
static int ciss_setup_msix(struct ciss_softc *sc) { int i, count, error; i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); count = pci_msix_count(sc->ciss_dev); if (count < CISS_MSI_COUNT) { count = pci_msi_count(sc->ciss_dev); if (count < CISS_MSI_COUNT) return (EINVAL); } count = MIN(count, CISS_MSI_COUNT); error = pci_alloc_msix(sc->ciss_dev, &count); if (error) { error = pci_alloc_msi(sc->ciss_dev, &count); if (error) return (EINVAL); } sc->ciss_msi = count; for (i = 0; i < count; i++) sc->ciss_irq_rid[i] = i + 1; return (0); }
static int mpr_alloc_msi(struct mpr_softc *sc, int msgs) { int error; error = pci_alloc_msi(sc->mpr_dev, &msgs); return (error); }
static int siba_bwn_alloc_msi(device_t dev, device_t child, int *count) { struct siba_bwn_softc *ssc; int error; ssc = device_get_softc(dev); if (ssc->ssc_msi_child != NULL) return (EBUSY); error = pci_alloc_msi(dev, count); if (error == 0) ssc->ssc_msi_child = child; return (error); }
static int vga_pci_alloc_msi(device_t dev, device_t child, int *count) { struct vga_pci_softc *sc; int error; sc = device_get_softc(dev); if (sc->vga_msi_child != NULL) return (EBUSY); error = pci_alloc_msi(dev, count); if (error == 0) sc->vga_msi_child = child; return (error); }
static int tws_setup_irq(struct tws_softc *sc) { int messages; u_int16_t cmd; cmd = pci_read_config(sc->tws_dev, PCIR_COMMAND, 2); switch(sc->intr_type) { case TWS_INTx : cmd = cmd & ~0x0400; pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2); sc->irqs = 1; sc->irq_res_id[0] = 0; sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( ! sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using legacy INTx\n"); break; case TWS_MSI : cmd = cmd | 0x0400; pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2); sc->irqs = 1; sc->irq_res_id[0] = 1; messages = 1; if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) { TWS_TRACE(sc, "pci alloc msi fail", 0, messages); return(FAILURE); } sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ, &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE); if ( !sc->irq_res[0] ) return(FAILURE); if ( tws_setup_intr(sc, sc->irqs) == FAILURE ) return(FAILURE); device_printf(sc->tws_dev, "Using MSI\n"); break; } return(SUCCESS); }
static int vtpci_alloc_msi(struct vtpci_softc *sc) { device_t dev; int nmsi, cnt; dev = sc->vtpci_dev; nmsi = pci_msi_count(dev); if (nmsi < 1) return (1); cnt = 1; if (pci_alloc_msi(dev, &cnt) == 0 && cnt == 1) return (0); return (1); }
static int vtpci_alloc_msi(struct vtpci_softc *sc) { device_t dev; int nmsi, cnt, required; dev = sc->vtpci_dev; required = 1; nmsi = pci_msi_count(dev); if (nmsi < required) return (1); cnt = required; if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) return (0); pci_release_msi(dev); return (1); }
static int uart_pci_attach(device_t dev) { struct uart_softc *sc; int count; sc = device_get_softc(dev); /* * Use MSI in preference to legacy IRQ if available. * Whilst some PCIe UARTs support >1 MSI vector, use only the first. */ if (pci_msi_count(dev) > 0) { count = 1; if (pci_alloc_msi(dev, &count) == 0) { sc->sc_irid = 1; device_printf(dev, "Using %d MSI message\n", count); } } return (uart_bus_attach(dev)); }
static int sfxge_intr_setup_msi(struct sfxge_softc *sc) { struct sfxge_intr_hdl *table; struct sfxge_intr *intr; device_t dev; int count; int error; dev = sc->dev; intr = &sc->intr; table = intr->table; /* * Check if MSI is available. All messages must be written to * the same address and on x86 this means the IRQs have the * same CPU affinity. So we only ever allocate 1. */ count = pci_msi_count(dev) ? 1 : 0; if (count == 0) return (EINVAL); if ((error = pci_alloc_msi(dev, &count)) != 0) return (ENOMEM); /* Allocate interrupt handler. */ if (sfxge_intr_alloc(sc, count) != 0) { pci_release_msi(dev); return (ENOMEM); } intr->type = EFX_INTR_MESSAGE; intr->n_alloc = count; return (0); }
static int ismt_attach(device_t dev) { struct ismt_softc *sc = device_get_softc(dev); int err, num_vectors, val; sc->pcidev = dev; pci_enable_busmaster(dev); if ((sc->smbdev = device_add_child(dev, "smbus", -1)) == NULL) { device_printf(dev, "no smbus child found\n"); err = ENXIO; goto fail; } sc->mmio_rid = PCIR_BAR(0); sc->mmio_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mmio_rid, RF_ACTIVE); if (sc->mmio_res == NULL) { device_printf(dev, "cannot allocate mmio region\n"); err = ENOMEM; goto fail; } sc->mmio_tag = rman_get_bustag(sc->mmio_res); sc->mmio_handle = rman_get_bushandle(sc->mmio_res); /* Attach "smbus" child */ if ((err = bus_generic_attach(dev)) != 0) { device_printf(dev, "failed to attach child: %d\n", err); err = ENXIO; goto fail; } bus_dma_tag_create(bus_get_dma_tag(dev), 4, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DESC_SIZE, 1, DESC_SIZE, 0, NULL, NULL, &sc->desc_dma_tag); bus_dma_tag_create(bus_get_dma_tag(dev), 4, PAGE_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, DMA_BUFFER_SIZE, 1, DMA_BUFFER_SIZE, 0, NULL, NULL, &sc->dma_buffer_dma_tag); bus_dmamap_create(sc->desc_dma_tag, 0, &sc->desc_dma_map); bus_dmamap_create(sc->dma_buffer_dma_tag, 0, &sc->dma_buffer_dma_map); bus_dmamem_alloc(sc->desc_dma_tag, (void **)&sc->desc, BUS_DMA_WAITOK, &sc->desc_dma_map); bus_dmamem_alloc(sc->dma_buffer_dma_tag, (void **)&sc->dma_buffer, BUS_DMA_WAITOK, &sc->dma_buffer_dma_map); bus_dmamap_load(sc->desc_dma_tag, sc->desc_dma_map, sc->desc, DESC_SIZE, ismt_single_map, &sc->desc_bus_addr, 0); bus_dmamap_load(sc->dma_buffer_dma_tag, sc->dma_buffer_dma_map, sc->dma_buffer, DMA_BUFFER_SIZE, ismt_single_map, &sc->dma_buffer_bus_addr, 0); bus_write_4(sc->mmio_res, ISMT_MSTR_MDBA, (sc->desc_bus_addr & 0xFFFFFFFFLL)); bus_write_4(sc->mmio_res, ISMT_MSTR_MDBA + 4, (sc->desc_bus_addr >> 32)); /* initialize the Master Control Register (MCTRL) */ bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, ISMT_MCTRL_MEIE); /* initialize the Master Status Register (MSTS) */ bus_write_4(sc->mmio_res, ISMT_MSTR_MSTS, 0); /* initialize the Master Descriptor Size (MDS) */ val = bus_read_4(sc->mmio_res, ISMT_MSTR_MDS); val &= ~ISMT_MDS_MASK; val |= (ISMT_DESC_ENTRIES - 1); bus_write_4(sc->mmio_res, ISMT_MSTR_MDS, val); sc->using_msi = 1; if (pci_msi_count(dev) == 0) { sc->using_msi = 0; goto intx; } num_vectors = 1; if (pci_alloc_msi(dev, &num_vectors) != 0) { sc->using_msi = 0; goto intx; } sc->intr_rid = 1; sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, RF_ACTIVE); if (sc->intr_res == NULL) { sc->using_msi = 0; pci_release_msi(dev); } intx: if (sc->using_msi == 0) { sc->intr_rid = 0; sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->intr_res == NULL) { device_printf(dev, "cannot allocate irq\n"); err = ENXIO; goto fail; } } ISMT_DEBUG(dev, "using_msi = %d\n", sc->using_msi); err = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, ismt_intr, sc, &sc->intr_handle); if (err != 0) { device_printf(dev, "cannot setup interrupt\n"); err = ENXIO; goto fail; } return (0); fail: ismt_detach(dev); return (err); }
static int ral_pci_attach(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); struct rt2560_softc *sc = &psc->u.sc_rt2560; int count, error, rid; pci_enable_busmaster(dev); switch (pci_get_device(dev)) { case 0x0201: psc->sc_opns = &ral_rt2560_opns; break; case 0x0301: case 0x0302: case 0x0401: psc->sc_opns = &ral_rt2661_opns; break; default: psc->sc_opns = &ral_rt2860_opns; break; } rid = PCIR_BAR(0); psc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (psc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return ENXIO; } sc->sc_st = rman_get_bustag(psc->mem); sc->sc_sh = rman_get_bushandle(psc->mem); sc->sc_invalid = 1; rid = 0; if (ral_msi_disable == 0) { count = 1; if (pci_alloc_msi(dev, &count) == 0) rid = 1; } psc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (psc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(psc->mem), psc->mem); return ENXIO; } error = (*psc->sc_opns->attach)(dev, pci_get_device(dev)); if (error != 0) { (void)ral_pci_detach(dev); return error; } /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, psc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, psc->sc_opns->intr, psc, &psc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); (void)ral_pci_detach(dev); return error; } sc->sc_invalid = 0; return 0; }
static int malo_pci_attach(device_t dev) { int error = ENXIO, i, msic, reg; struct malo_pci_softc *psc = device_get_softc(dev); struct malo_softc *sc = &psc->malo_sc; sc->malo_dev = dev; pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ psc->malo_mem_spec = malo_res_spec_mem; error = bus_alloc_resources(dev, psc->malo_mem_spec, psc->malo_res_mem); if (error) { device_printf(dev, "couldn't allocate memory resources\n"); return (ENXIO); } /* * Arrange and allocate interrupt line. */ sc->malo_invalid = 1; if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(dev, "MSI count : %d\n", msic); } else msic = 0; psc->malo_irq_spec = malo_res_spec_legacy; if (msic == MALO_MSI_MESSAGES && msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { if (msic == MALO_MSI_MESSAGES) { device_printf(dev, "Using %d MSI messages\n", msic); psc->malo_irq_spec = malo_res_spec_msi; psc->malo_msi = 1; } else pci_release_msi(dev); } } error = bus_alloc_resources(dev, psc->malo_irq_spec, psc->malo_res_irq); if (error) { device_printf(dev, "couldn't allocate IRQ resources\n"); goto bad; } if (psc->malo_msi == 0) error = bus_setup_intr(dev, psc->malo_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, malo_intr, NULL, sc, &psc->malo_intrhand[0]); else { for (i = 0; i < MALO_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, psc->malo_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, malo_intr, NULL, sc, &psc->malo_intrhand[i]); if (error != 0) break; } } /* * Setup DMA descriptor area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXADDR, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXADDR, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->malo_dmat)) { device_printf(dev, "cannot allocate DMA tag\n"); goto bad1; } sc->malo_io0t = rman_get_bustag(psc->malo_res_mem[0]); sc->malo_io0h = rman_get_bushandle(psc->malo_res_mem[0]); sc->malo_io1t = rman_get_bustag(psc->malo_res_mem[1]); sc->malo_io1h = rman_get_bushandle(psc->malo_res_mem[1]); error = malo_attach(pci_get_device(dev), sc); if (error != 0) goto bad2; return (error); bad2: bus_dma_tag_destroy(sc->malo_dmat); bad1: if (psc->malo_msi == 0) bus_teardown_intr(dev, psc->malo_res_irq[0], psc->malo_intrhand[0]); else { for (i = 0; i < MALO_MSI_MESSAGES; i++) bus_teardown_intr(dev, psc->malo_res_irq[i], psc->malo_intrhand[i]); } bus_release_resources(dev, psc->malo_irq_spec, psc->malo_res_irq); bad: if (psc->malo_msi != 0) pci_release_msi(dev); bus_release_resources(dev, psc->malo_mem_spec, psc->malo_res_mem); return (error); }
static int mfi_pci_attach(device_t dev) { struct mfi_softc *sc; struct mfi_ident *m; uint32_t command; int count, error; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mfi_dev = dev; m = mfi_find_ident(dev); sc->mfi_flags = m->flags; /* Verify that the adapter can be set up in PCI space */ command = pci_read_config(dev, PCIR_COMMAND, 2); command |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 2); command = pci_read_config(dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_BUSMASTEREN) == 0) { device_printf(dev, "Can't enable PCI busmaster\n"); return (ENXIO); } if ((command & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "PCI memory window not available\n"); return (ENXIO); } /* Allocate PCI registers */ if ((sc->mfi_flags & MFI_FLAGS_1064R) || (sc->mfi_flags & MFI_FLAGS_1078)) { /* 1068/1078: Memory mapped BAR is at offset 0x10 */ sc->mfi_regs_rid = PCIR_BAR(0); } else if (sc->mfi_flags & MFI_FLAGS_GEN2) { /* GEN2: Memory mapped BAR is at offset 0x14 */ sc->mfi_regs_rid = PCIR_BAR(1); } if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource); sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource); error = ENOMEM; /* Allocate parent DMA tag */ if (bus_dma_tag_create( NULL, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_parent_dmat)) { device_printf(dev, "Cannot allocate parent DMA tag\n"); goto out; } /* Allocate IRQ resource. */ sc->mfi_irq_rid = 0; count = 1; if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) { device_printf(sc->mfi_dev, "Using MSI\n"); sc->mfi_irq_rid = 1; } if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); error = EINVAL; goto out; } error = mfi_attach(sc); out: if (error) { mfi_free(sc); mfi_pci_free(sc); } return (error); }
static int mpt_pci_attach(device_t dev) { struct mpt_softc *mpt; int iqd; uint32_t data, cmd; int mpt_io_bar, mpt_mem_bar; mpt = (struct mpt_softc*)device_get_softc(dev); switch (pci_get_device(dev)) { case MPI_MANUFACTPAGE_DEVICEID_FC909_FB: case MPI_MANUFACTPAGE_DEVICEID_FC909: case MPI_MANUFACTPAGE_DEVICEID_FC919: case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC929X: case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC919X: case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC949E: case MPI_MANUFACTPAGE_DEVICEID_FC949X: mpt->is_fc = 1; break; case MPI_MANUFACTPAGE_DEVID_SAS1078: case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB: mpt->is_1078 = 1; /* FALLTHROUGH */ case MPI_MANUFACTPAGE_DEVID_SAS1064: case MPI_MANUFACTPAGE_DEVID_SAS1064A: case MPI_MANUFACTPAGE_DEVID_SAS1064E: case MPI_MANUFACTPAGE_DEVID_SAS1066: case MPI_MANUFACTPAGE_DEVID_SAS1066E: case MPI_MANUFACTPAGE_DEVID_SAS1068: case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB: case MPI_MANUFACTPAGE_DEVID_SAS1068E: case MPI_MANUFACTPAGE_DEVID_SAS1068E_FB: mpt->is_sas = 1; break; default: mpt->is_spi = 1; break; } mpt->dev = dev; mpt->unit = device_get_unit(dev); mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT; mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT; mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT; mpt->verbose = MPT_PRT_NONE; mpt->role = MPT_ROLE_NONE; mpt->mpt_ini_id = MPT_INI_ID_NONE; #ifdef __sparc64__ if (mpt->is_spi) mpt->mpt_ini_id = OF_getscsinitid(dev); #endif mpt_set_options(mpt); if (mpt->verbose == MPT_PRT_NONE) { mpt->verbose = MPT_PRT_WARN; /* Print INFO level (if any) if bootverbose is set */ mpt->verbose += (bootverbose != 0)? 1 : 0; } /* Make sure memory access decoders are enabled */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_MEMEN) == 0) { device_printf(dev, "Memory accesses disabled"); return (ENXIO); } /* * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. */ cmd |= PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); /* * Make sure we've disabled the ROM. */ data = pci_read_config(dev, PCIR_BIOS, 4); data &= ~PCIM_BIOS_ENABLE; pci_write_config(dev, PCIR_BIOS, data, 4); /* * Is this part a dual? * If so, link with our partner (around yet) */ switch (pci_get_device(dev)) { case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB: case MPI_MANUFACTPAGE_DEVICEID_FC949E: case MPI_MANUFACTPAGE_DEVICEID_FC949X: case MPI_MANUFACTPAGE_DEVID_53C1030: case MPI_MANUFACTPAGE_DEVID_53C1030ZC: mpt_link_peer(mpt); break; default: break; } /* * Figure out which are the I/O and MEM Bars */ data = pci_read_config(dev, PCIR_BAR(0), 4); if (PCI_BAR_IO(data)) { /* BAR0 is IO, BAR1 is memory */ mpt_io_bar = 0; mpt_mem_bar = 1; } else { /* BAR0 is memory, BAR1 is IO */ mpt_mem_bar = 0; mpt_io_bar = 1; } /* * Set up register access. PIO mode is required for * certain reset operations (but must be disabled for * some cards otherwise). */ mpt_io_bar = PCIR_BAR(mpt_io_bar); mpt->pci_pio_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &mpt_io_bar, RF_ACTIVE); if (mpt->pci_pio_reg == NULL) { if (bootverbose) { device_printf(dev, "unable to map registers in PIO mode\n"); } } else { mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg); mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg); } mpt_mem_bar = PCIR_BAR(mpt_mem_bar); mpt->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mpt_mem_bar, RF_ACTIVE); if (mpt->pci_reg == NULL) { if (bootverbose || mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Unable to memory map registers.\n"); } if (mpt->is_sas || mpt->pci_pio_reg == NULL) { device_printf(dev, "Giving Up.\n"); goto bad; } if (bootverbose) { device_printf(dev, "Falling back to PIO mode.\n"); } mpt->pci_st = mpt->pci_pio_st; mpt->pci_sh = mpt->pci_pio_sh; } else { mpt->pci_st = rman_get_bustag(mpt->pci_reg); mpt->pci_sh = rman_get_bushandle(mpt->pci_reg); } /* Get a handle to the interrupt */ iqd = 0; if (mpt->msi_enable) { /* * First try to alloc an MSI-X message. If that * fails, then try to alloc an MSI message instead. */ if (pci_msix_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } if (iqd == 0 && pci_msi_count(dev) == 1) { mpt->pci_msi_count = 1; if (pci_alloc_msi(dev, &mpt->pci_msi_count) == 0) { iqd = 1; } else { mpt->pci_msi_count = 0; } } } mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | (mpt->pci_msi_count ? 0 : RF_SHAREABLE)); if (mpt->pci_irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto bad; } MPT_LOCK_SETUP(mpt); /* Disable interrupts at the part */ mpt_disable_ints(mpt); /* Register the interrupt handler */ if (mpt_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, NULL, mpt_pci_intr, mpt, &mpt->ih)) { device_printf(dev, "could not setup interrupt\n"); goto bad; } /* Allocate dma memory */ if (mpt_dma_mem_alloc(mpt)) { mpt_prt(mpt, "Could not allocate DMA memory\n"); goto bad; } #if 0 /* * Save the PCI config register values * * Hard resets are known to screw up the BAR for diagnostic * memory accesses (Mem1). * * Using Mem1 is known to make the chip stop responding to * configuration space transfers, so we need to save it now */ mpt_read_config_regs(mpt); #endif /* * Disable PIO until we need it */ if (mpt->is_sas) { pci_disable_io(dev, SYS_RES_IOPORT); } /* Initialize the hardware */ if (mpt->disabled == 0) { if (mpt_attach(mpt) != 0) { goto bad; } } else { mpt_prt(mpt, "device disabled at user request\n"); goto bad; } mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown, dev, SHUTDOWN_PRI_DEFAULT); if (mpt->eh == NULL) { mpt_prt(mpt, "shutdown event registration failed\n"); (void) mpt_detach(mpt); goto bad; } return (0); bad: mpt_dma_mem_free(mpt); mpt_free_bus_resources(mpt); mpt_unlink_peer(mpt); MPT_LOCK_DESTROY(mpt); /* * but return zero to preserve unit numbering */ return (0); }
static int xhci_pci_attach(device_t self) { struct xhci_softc *sc = device_get_softc(self); int count, err, rid; uint8_t usedma32; rid = PCI_XHCI_CBMEM; sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_io_res) { device_printf(self, "Could not map memory\n"); return (ENOMEM); } sc->sc_io_tag = rman_get_bustag(sc->sc_io_res); sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res); sc->sc_io_size = rman_get_size(sc->sc_io_res); switch (pci_get_devid(self)) { case 0x01941033: /* NEC uPD720200 USB 3.0 controller */ /* Don't use 64-bit DMA on these controllers. */ usedma32 = 1; break; case 0x0f358086: /* BayTrail */ case 0x9c318086: /* Panther Point */ case 0x1e318086: /* Panther Point */ case 0x8c318086: /* Lynx Point */ case 0x8cb18086: /* Wildcat Point */ /* * On Intel chipsets, reroute ports from EHCI to XHCI * controller and use a different IMOD value. */ sc->sc_port_route = &xhci_pci_port_route; sc->sc_imod_default = XHCI_IMOD_DEFAULT_LP; /* FALLTHROUGH */ default: usedma32 = 0; break; } if (xhci_init(sc, self, usedma32)) { device_printf(self, "Could not initialize softc\n"); bus_release_resource(self, SYS_RES_MEMORY, PCI_XHCI_CBMEM, sc->sc_io_res); return (ENXIO); } pci_enable_busmaster(self); usb_callout_init_mtx(&sc->sc_callout, &sc->sc_bus.bus_mtx, 0); rid = 0; if (xhci_use_msi) { count = 1; if (pci_alloc_msi(self, &count) == 0) { if (bootverbose) device_printf(self, "MSI enabled\n"); rid = 1; } } sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->sc_irq_res == NULL) { pci_release_msi(self); device_printf(self, "Could not allocate IRQ\n"); /* goto error; FALLTHROUGH - use polling */ } sc->sc_bus.bdev = device_add_child(self, "usbus", -1); if (sc->sc_bus.bdev == NULL) { device_printf(self, "Could not add USB device\n"); goto error; } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); sprintf(sc->sc_vendor, "0x%04x", pci_get_vendor(self)); if (sc->sc_irq_res != NULL) { err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl); if (err != 0) { bus_release_resource(self, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); sc->sc_irq_res = NULL; pci_release_msi(self); device_printf(self, "Could not setup IRQ, err=%d\n", err); sc->sc_intr_hdl = NULL; } } if (sc->sc_irq_res == NULL || sc->sc_intr_hdl == NULL) { if (xhci_use_polling() != 0) { device_printf(self, "Interrupt polling at %dHz\n", hz); USB_BUS_LOCK(&sc->sc_bus); xhci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } else goto error; } xhci_pci_take_controller(self); err = xhci_halt_controller(sc); if (err == 0) err = xhci_start_controller(sc); if (err == 0) err = device_probe_and_attach(sc->sc_bus.bdev); if (err) { device_printf(self, "XHCI halt/start/probe failed err=%d\n", err); goto error; } return (0); error: xhci_pci_detach(self); return (ENXIO); }
static int xhci_pci_attach(device_t self) { struct xhci_softc *sc = device_get_softc(self); int count, err, rid; /* XXX check for 64-bit capability */ if (xhci_init(sc, self)) { device_printf(self, "Could not initialize softc\n"); goto error; } pci_enable_busmaster(self); rid = PCI_XHCI_CBMEM; sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_io_res) { device_printf(self, "Could not map memory\n"); goto error; } sc->sc_io_tag = rman_get_bustag(sc->sc_io_res); sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res); sc->sc_io_size = rman_get_size(sc->sc_io_res); usb_callout_init_mtx(&sc->sc_callout, &sc->sc_bus.bus_mtx, 0); sc->sc_irq_rid = 0; if (xhci_use_msi) { count = pci_msi_count(self); if (count >= 1) { count = 1; if (pci_alloc_msi(self, &count) == 0) { if (bootverbose) device_printf(self, "MSI enabled\n"); sc->sc_irq_rid = 1; } } } sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(self, "Could not allocate IRQ\n"); /* goto error; FALLTHROUGH - use polling */ } sc->sc_bus.bdev = device_add_child(self, "usbus", -1); if (sc->sc_bus.bdev == NULL) { device_printf(self, "Could not add USB device\n"); goto error; } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); sprintf(sc->sc_vendor, "0x%04x", pci_get_vendor(self)); if (sc->sc_irq_res != NULL) { err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE, NULL, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl); if (err != 0) { device_printf(self, "Could not setup IRQ, err=%d\n", err); sc->sc_intr_hdl = NULL; } } if (sc->sc_irq_res == NULL || sc->sc_intr_hdl == NULL || xhci_use_polling() != 0) { device_printf(self, "Interrupt polling at %dHz\n", hz); USB_BUS_LOCK(&sc->sc_bus); xhci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } /* On Intel chipsets reroute ports from EHCI to XHCI controller. */ switch (pci_get_devid(self)) { case 0x9c318086: /* Panther Point */ case 0x1e318086: /* Panther Point */ case 0x8c318086: /* Lynx Point */ sc->sc_port_route = &xhci_pci_port_route; sc->sc_imod_default = XHCI_IMOD_DEFAULT_LP; break; default: break; } xhci_pci_take_controller(self); err = xhci_halt_controller(sc); if (err == 0) err = xhci_start_controller(sc); if (err == 0) err = device_probe_and_attach(sc->sc_bus.bdev); if (err) { device_printf(self, "XHCI halt/start/probe failed err=%d\n", err); goto error; } return (0); error: xhci_pci_detach(self); return (ENXIO); }
int ppt_setup_msi(struct vm *vm, int vcpu, int bus, int slot, int func, uint64_t addr, uint64_t msg, int numvec) { int i, rid, flags; int msi_count, startrid, error, tmp; struct pptdev *ppt; if (numvec < 0 || numvec > MAX_MSIMSGS) return (EINVAL); ppt = ppt_find(bus, slot, func); if (ppt == NULL) return (ENOENT); if (ppt->vm != vm) /* Make sure we own this device */ return (EBUSY); /* Free any allocated resources */ ppt_teardown_msi(ppt); if (numvec == 0) /* nothing more to do */ return (0); flags = RF_ACTIVE; msi_count = pci_msi_count(ppt->dev); if (msi_count == 0) { startrid = 0; /* legacy interrupt */ msi_count = 1; flags |= RF_SHAREABLE; } else startrid = 1; /* MSI */ /* * The device must be capable of supporting the number of vectors * the guest wants to allocate. */ if (numvec > msi_count) return (EINVAL); /* * Make sure that we can allocate all the MSI vectors that are needed * by the guest. */ if (startrid == 1) { tmp = numvec; error = pci_alloc_msi(ppt->dev, &tmp); if (error) return (error); else if (tmp != numvec) { pci_release_msi(ppt->dev); return (ENOSPC); } else { /* success */ } } ppt->msi.startrid = startrid; /* * Allocate the irq resource and attach it to the interrupt handler. */ for (i = 0; i < numvec; i++) { ppt->msi.num_msgs = i + 1; ppt->msi.cookie[i] = NULL; rid = startrid + i; ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, &rid, flags); if (ppt->msi.res[i] == NULL) break; ppt->msi.arg[i].pptdev = ppt; ppt->msi.arg[i].addr = addr; ppt->msi.arg[i].msg_data = msg + i; error = bus_setup_intr(ppt->dev, ppt->msi.res[i], INTR_TYPE_NET | INTR_MPSAFE, pptintr, NULL, &ppt->msi.arg[i], &ppt->msi.cookie[i]); if (error != 0) break; } if (i < numvec) { ppt_teardown_msi(ppt); return (ENXIO); } return (0); }
static int rtwn_pci_attach(device_t dev) { const struct rtwn_pci_ident *ident; struct rtwn_pci_softc *pc = device_get_softc(dev); struct rtwn_softc *sc = &pc->pc_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t lcsr; int cap_off, i, error, rid; ident = rtwn_pci_probe_sub(dev); if (ident == NULL) return (ENXIO); /* * Get the offset of the PCI Express Capability Structure in PCI * Configuration Space. */ error = pci_find_cap(dev, PCIY_EXPRESS, &cap_off); if (error != 0) { device_printf(dev, "PCIe capability structure not found!\n"); return (error); } /* Enable bus-mastering. */ pci_enable_busmaster(dev); rid = PCIR_BAR(2); pc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (pc->mem == NULL) { device_printf(dev, "can't map mem space\n"); return (ENOMEM); } pc->pc_st = rman_get_bustag(pc->mem); pc->pc_sh = rman_get_bushandle(pc->mem); /* Install interrupt handler. */ rid = 1; if (pci_alloc_msi(dev, &rid) == 0) rid = 1; else rid = 0; pc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (pc->irq == NULL) { device_printf(dev, "can't map interrupt\n"); goto detach; } /* Disable PCIe Active State Power Management (ASPM). */ lcsr = pci_read_config(dev, cap_off + PCIER_LINK_CTL, 4); lcsr &= ~PCIEM_LINK_CTL_ASPMC; pci_write_config(dev, cap_off + PCIER_LINK_CTL, lcsr, 4); sc->sc_dev = dev; ic->ic_name = device_get_nameunit(dev); /* Need to be initialized early. */ rtwn_sysctlattach(sc); mtx_init(&sc->sc_mtx, ic->ic_name, MTX_NETWORK_LOCK, MTX_DEF); rtwn_pci_attach_methods(sc); rtwn_pci_attach_private(pc, ident->chip); /* Allocate Tx/Rx buffers. */ error = rtwn_pci_alloc_rx_list(sc); if (error != 0) { device_printf(dev, "could not allocate Rx buffers, error %d\n", error); goto detach; } for (i = 0; i < RTWN_PCI_NTXQUEUES; i++) { error = rtwn_pci_alloc_tx_list(sc, i); if (error != 0) { device_printf(dev, "could not allocate Tx buffers, error %d\n", error); goto detach; } } /* Generic attach. */ error = rtwn_attach(sc); if (error != 0) goto detach; /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, pc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, rtwn_pci_intr, sc, &pc->pc_ih); if (error != 0) { device_printf(dev, "can't establish interrupt, error %d\n", error); goto detach; } return (0); detach: rtwn_pci_detach(dev); /* failure */ return (ENXIO); }
static int athp_pci_attach(device_t dev) { struct ath10k_pci *ar_pci = device_get_softc(dev); struct ath10k *ar = &ar_pci->sc_sc; int rid, i; int err = 0; int ret; ar->sc_dev = dev; ar->sc_invalid = 1; /* XXX TODO: initialize sc_debug from TUNABLE */ #if 0 ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC | ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC | ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT; #endif ar->sc_psc = ar_pci; /* Load-time tunable/sysctl tree */ athp_attach_sysctl(ar); /* Enable WMI/HTT RX for now */ ar->sc_rx_wmi = 1; ar->sc_rx_htt = 1; /* Fetch pcie capability offset */ ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off); if (ret != 0) { device_printf(dev, "%s: failed to find pci-express capability offset\n", __func__); return (ret); } /* * Initialise ath10k core bits. */ if (ath10k_core_init(ar) < 0) goto bad0; /* * Initialise ath10k freebsd bits. */ sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev)); mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK, MTX_DEF); sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev)); mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF); sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev)); mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF); sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev)); mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf", MTX_DEF | MTX_RECURSE); sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev)); mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF); sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev)); mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF); sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev)); mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data", MTX_DEF); /* * Initialise ath10k BMI/PCIDIAG bits. */ ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req", 4, 1024); ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp", 4, 1024); if (ret != 0) { device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n", __func__); goto bad0; } /* * Initialise HTT descriptors/memory. */ ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt); if (ret != 0) { device_printf(dev, "%s: failed to alloc HTT RX descriptors\n", __func__); goto bad; } /* XXX here instead of in core_init because we need the lock init'ed */ callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0); ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT, NULL, ar_pci); (void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq", device_get_nameunit(dev)); if (ar_pci->pipe_taskq == NULL) { device_printf(dev, "%s: couldn't create pipe taskq\n", __func__); err = ENXIO; goto bad; } /* * Look at the device/vendor ID and choose which register offset * mapping to use. This is used by a lot of the register access * pieces to get the correct device-specific windows. */ ar_pci->sc_vendorid = pci_get_vendor(dev); ar_pci->sc_deviceid = pci_get_device(dev); if (athp_pci_hw_lookup(ar_pci) != 0) { device_printf(dev, "%s: hw lookup failed\n", __func__); err = ENXIO; goto bad; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (ar_pci->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); err = ENXIO; goto bad; } /* Driver copy; hopefully we can delete this */ ar->sc_st = rman_get_bustag(ar_pci->sc_sr); ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* Local copy for bus operations */ ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr); ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the HAL is setup are discarded. */ ar->sc_invalid = 1; printf("%s: msicount=%d, msixcount=%d\n", __func__, pci_msi_count(dev), pci_msix_count(dev)); /* * Arrange interrupt line. * * XXX TODO: this is effictively ath10k_pci_init_irq(). * Refactor it out later. * * First - attempt MSI. If we get it, then use it. */ i = MSI_NUM_REQUEST; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: %d MSI interrupts\n", __func__, i); ar_pci->num_msi_intrs = MSI_NUM_REQUEST; } else { i = 1; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: 1 MSI interrupt\n", __func__); ar_pci->num_msi_intrs = 1; } else { device_printf(dev, "%s: legacy interrupts\n", __func__); ar_pci->num_msi_intrs = 0; } } err = ath10k_pci_request_irq(ar_pci); if (err != 0) goto bad1; /* * Attach register ops - needed for the caller to do register IO. */ ar->sc_regio.reg_read = athp_pci_regio_read_reg; ar->sc_regio.reg_write = athp_pci_regio_write_reg; ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg; ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg; ar->sc_regio.reg_flush = athp_pci_regio_flush_reg; ar->sc_regio.reg_arg = ar_pci; /* * TODO: abstract this out to be a bus/hif specific * attach path. * * I'm not sure what USB/SDIO will look like here, but * I'm pretty sure it won't involve PCI/CE setup. * It'll still have WME/HIF/BMI, but it'll be done over * USB endpoints. */ if (athp_pci_setup_bufs(ar_pci) != 0) { err = ENXIO; goto bad4; } /* HIF ops attach */ ar->hif.ops = &ath10k_pci_hif_ops; ar->hif.bus = ATH10K_BUS_PCI; /* Alloc pipes */ ret = ath10k_pci_alloc_pipes(ar); if (ret) { device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n", __func__, ret); /* XXX cleanup */ err = ENXIO; goto bad4; } /* deinit ce */ ath10k_pci_ce_deinit(ar); /* disable irq */ ret = ath10k_pci_irq_disable(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* init IRQ */ ret = ath10k_pci_init_irq(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: init_irq failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* Ok, gate open the interrupt handler */ ar->sc_invalid = 0; /* pci_chip_reset */ ret = ath10k_pci_chip_reset(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* read SoC/chip version */ ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs)); /* Verify chip version is something we can use */ device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid); if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) { device_printf(ar->sc_dev, "%s: unsupported chip; chipid: 0x%08x\n", __func__, ar->sc_chipid); err = ENXIO; goto bad4; } /* Call main attach method with given info */ ar->sc_preinit_hook.ich_func = athp_attach_preinit; ar->sc_preinit_hook.ich_arg = ar; if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) { device_printf(ar->sc_dev, "%s: couldn't establish preinit hook\n", __func__); goto bad4; } return (0); /* Fallthrough for setup failure */ bad4: athp_pci_free_bufs(ar_pci); /* Ensure we disable interrupts from the device */ ath10k_pci_deinit_irq(ar_pci); ath10k_pci_free_irq(ar_pci); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr); bad: ath10k_htt_rx_free_desc(ar, &ar->htt); athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf); athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf); /* XXX disable busmaster? */ mtx_destroy(&ar_pci->ps_mtx); mtx_destroy(&ar_pci->ce_mtx); mtx_destroy(&ar->sc_conf_mtx); mtx_destroy(&ar->sc_data_mtx); mtx_destroy(&ar->sc_buf_mtx); mtx_destroy(&ar->sc_dma_mtx); mtx_destroy(&ar->sc_mtx); if (ar_pci->pipe_taskq) { taskqueue_drain_all(ar_pci->pipe_taskq); taskqueue_free(ar_pci->pipe_taskq); } /* Shutdown ioctl handler */ athp_ioctl_teardown(ar); ath10k_core_destroy(ar); bad0: return (err); }
/* * Allocate resources for our device, set up the bus interface. */ static int aac_pci_attach(device_t dev) { struct aac_softc *sc; const struct aac_ident *id; int count, error, rid; fwprintf(NULL, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Initialise softc. */ sc = device_get_softc(dev); sc->aac_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(dev); if (!(pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)) { device_printf(dev, "can't enable bus-master feature\n"); goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ id = aac_find_ident(dev); sc->aac_hwif = id->hwif; switch(sc->aac_hwif) { case AAC_HWIF_I960RX: case AAC_HWIF_NARK: fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for i960Rx/NARK"); sc->aac_if = &aac_rx_interface; break; case AAC_HWIF_STRONGARM: fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for StrongARM"); sc->aac_if = &aac_sa_interface; break; case AAC_HWIF_RKT: fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "set hardware up for Rocket/MIPS"); sc->aac_if = &aac_rkt_interface; break; default: sc->aac_hwif = AAC_HWIF_UNKNOWN; device_printf(dev, "unknown hardware type\n"); goto out; } /* Set up quirks */ sc->flags = id->quirks; /* * Allocate the PCI register window(s). */ rid = PCIR_BAR(0); if ((sc->aac_regs_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "can't allocate register window 0\n"); goto out; } sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); if (sc->aac_hwif == AAC_HWIF_NARK) { rid = PCIR_BAR(1); if ((sc->aac_regs_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "can't allocate register window 1\n"); goto out; } sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); } else { sc->aac_regs_res1 = sc->aac_regs_res0; sc->aac_btag1 = sc->aac_btag0; sc->aac_bhandle1 = sc->aac_bhandle0; } /* * Allocate the interrupt. */ rid = 0; if (aac_enable_msi != 0 && (sc->flags & AAC_FLAGS_NOMSI) == 0) { count = 1; if (pci_alloc_msi(dev, &count) == 0) rid = 1; } if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE))) == NULL) { device_printf(dev, "can't allocate interrupt\n"); goto out; } /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_parent_dmat)) { device_printf(dev, "can't allocate parent DMA tag\n"); goto out; } /* * Do bus-independent initialisation. */ error = aac_attach(sc); out: if (error) aac_free(sc); return(error); }
static int mfi_pci_attach(device_t dev) { struct mfi_softc *sc; struct mfi_ident *m; int count, error; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mfi_dev = dev; m = mfi_find_ident(dev); sc->mfi_flags = m->flags; /* Ensure busmastering is enabled */ pci_enable_busmaster(dev); /* Allocate PCI registers */ if ((sc->mfi_flags & MFI_FLAGS_1064R) || (sc->mfi_flags & MFI_FLAGS_1078)) { /* 1068/1078: Memory mapped BAR is at offset 0x10 */ sc->mfi_regs_rid = PCIR_BAR(0); } else if ((sc->mfi_flags & MFI_FLAGS_GEN2) || (sc->mfi_flags & MFI_FLAGS_SKINNY) || (sc->mfi_flags & MFI_FLAGS_TBOLT)) { /* Gen2/Skinny: Memory mapped BAR is at offset 0x14 */ sc->mfi_regs_rid = PCIR_BAR(1); } if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource); sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource); error = ENOMEM; /* Allocate parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_parent_dmat)) { device_printf(dev, "Cannot allocate parent DMA tag\n"); goto out; } /* Allocate IRQ resource. */ sc->mfi_irq_rid = 0; count = 1; if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) { device_printf(sc->mfi_dev, "Using MSI\n"); sc->mfi_irq_rid = 1; } if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); error = EINVAL; goto out; } error = mfi_attach(sc); out: if (error) { mfi_free(sc); mfi_pci_free(sc); } return (error); }
static int xhci_pci_attach(device_t self) { struct xhci_softc *sc = device_get_softc(self); int count, err, rid; uint8_t usedma32; rid = PCI_XHCI_CBMEM; sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_io_res) { device_printf(self, "Could not map memory\n"); return (ENOMEM); } sc->sc_io_tag = rman_get_bustag(sc->sc_io_res); sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res); sc->sc_io_size = rman_get_size(sc->sc_io_res); /* check for USB 3.0 controllers which don't support 64-bit DMA */ switch (pci_get_devid(self)) { case 0x01941033: /* NEC uPD720200 USB 3.0 controller */ case 0x00141912: /* NEC uPD720201 USB 3.0 controller */ case 0x78141022: /* AMD A10-7300, tested does not work w/64-bit DMA */ usedma32 = 1; break; default: usedma32 = 0; break; } if (xhci_init(sc, self, usedma32)) { device_printf(self, "Could not initialize softc\n"); bus_release_resource(self, SYS_RES_MEMORY, PCI_XHCI_CBMEM, sc->sc_io_res); return (ENXIO); } pci_enable_busmaster(self); usb_callout_init_mtx(&sc->sc_callout, &sc->sc_bus.bus_lock, 0); rid = 0; if (xhci_use_msi) { count = pci_msi_count(self); if (count >= 1) { count = 1; if (pci_alloc_msi(self, &rid, 1, count) == 0) { if (bootverbose) device_printf(self, "MSI enabled\n"); sc->sc_irq_rid = 1; } } } /* * hw.usb.xhci.use_polling=1 to force polling. */ if (xhci_use_polling() == 0) { sc->sc_irq_res = bus_alloc_resource_any( self, SYS_RES_IRQ, &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq_res == NULL) { pci_release_msi(self); device_printf(self, "Could not allocate IRQ\n"); /* goto error; FALLTHROUGH - use polling */ } } sc->sc_bus.bdev = device_add_child(self, "usbus", -1); if (sc->sc_bus.bdev == NULL) { device_printf(self, "Could not add USB device\n"); goto error; } device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus); ksprintf(sc->sc_vendor, "0x%04x", pci_get_vendor(self)); if (sc->sc_irq_res != NULL) { err = bus_setup_intr(self, sc->sc_irq_res, INTR_MPSAFE, (driver_intr_t *)xhci_interrupt, sc, &sc->sc_intr_hdl, NULL); if (err != 0) { bus_release_resource(self, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); sc->sc_irq_res = NULL; pci_release_msi(self); device_printf(self, "Could not setup IRQ, err=%d\n", err); sc->sc_intr_hdl = NULL; } } if (sc->sc_irq_res == NULL || sc->sc_intr_hdl == NULL) { if (xhci_use_polling() != 0) { device_printf(self, "Interrupt polling at %dHz\n", hz); USB_BUS_LOCK(&sc->sc_bus); xhci_interrupt_poll(sc); USB_BUS_UNLOCK(&sc->sc_bus); } else goto error; } /* On Intel chipsets reroute ports from EHCI to XHCI controller. */ switch (pci_get_devid(self)) { case 0x0f358086: /* BayTrail */ case 0x9c318086: /* Panther Point */ case 0x1e318086: /* Panther Point */ case 0x8c318086: /* Lynx Point */ case 0x8cb18086: /* Wildcat Point */ case 0x9cb18086: /* Wildcat Point-LP */ sc->sc_port_route = &xhci_pci_port_route; sc->sc_imod_default = XHCI_IMOD_DEFAULT_LP; break; default: break; } xhci_pci_take_controller(self); err = xhci_halt_controller(sc); if (err == 0) err = xhci_start_controller(sc); if (err == 0) err = device_probe_and_attach(sc->sc_bus.bdev); if (err) { device_printf(self, "XHCI halt/start/probe failed err=%d\n", err); goto error; } return (0); error: xhci_pci_detach(self); return (ENXIO); }